linux/drivers/target/target_core_sbc.c
<<
>>
Prefs
   1/*
   2 * SCSI Block Commands (SBC) parsing and emulation.
   3 *
   4 * (c) Copyright 2002-2013 Datera, Inc.
   5 *
   6 * Nicholas A. Bellinger <nab@kernel.org>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/ratelimit.h>
  26#include <linux/crc-t10dif.h>
  27#include <asm/unaligned.h>
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_tcq.h>
  30
  31#include <target/target_core_base.h>
  32#include <target/target_core_backend.h>
  33#include <target/target_core_fabric.h>
  34
  35#include "target_core_internal.h"
  36#include "target_core_ua.h"
  37#include "target_core_alua.h"
  38
  39static sense_reason_t
  40sbc_emulate_readcapacity(struct se_cmd *cmd)
  41{
  42        struct se_device *dev = cmd->se_dev;
  43        unsigned char *cdb = cmd->t_task_cdb;
  44        unsigned long long blocks_long = dev->transport->get_blocks(dev);
  45        unsigned char *rbuf;
  46        unsigned char buf[8];
  47        u32 blocks;
  48
  49        /*
  50         * SBC-2 says:
  51         *   If the PMI bit is set to zero and the LOGICAL BLOCK
  52         *   ADDRESS field is not set to zero, the device server shall
  53         *   terminate the command with CHECK CONDITION status with
  54         *   the sense key set to ILLEGAL REQUEST and the additional
  55         *   sense code set to INVALID FIELD IN CDB.
  56         *
  57         * In SBC-3, these fields are obsolete, but some SCSI
  58         * compliance tests actually check this, so we might as well
  59         * follow SBC-2.
  60         */
  61        if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
  62                return TCM_INVALID_CDB_FIELD;
  63
  64        if (blocks_long >= 0x00000000ffffffff)
  65                blocks = 0xffffffff;
  66        else
  67                blocks = (u32)blocks_long;
  68
  69        buf[0] = (blocks >> 24) & 0xff;
  70        buf[1] = (blocks >> 16) & 0xff;
  71        buf[2] = (blocks >> 8) & 0xff;
  72        buf[3] = blocks & 0xff;
  73        buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
  74        buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
  75        buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
  76        buf[7] = dev->dev_attrib.block_size & 0xff;
  77
  78        rbuf = transport_kmap_data_sg(cmd);
  79        if (rbuf) {
  80                memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
  81                transport_kunmap_data_sg(cmd);
  82        }
  83
  84        target_complete_cmd_with_length(cmd, GOOD, 8);
  85        return 0;
  86}
  87
  88static sense_reason_t
  89sbc_emulate_readcapacity_16(struct se_cmd *cmd)
  90{
  91        struct se_device *dev = cmd->se_dev;
  92        struct se_session *sess = cmd->se_sess;
  93        unsigned char *rbuf;
  94        unsigned char buf[32];
  95        unsigned long long blocks = dev->transport->get_blocks(dev);
  96
  97        memset(buf, 0, sizeof(buf));
  98        buf[0] = (blocks >> 56) & 0xff;
  99        buf[1] = (blocks >> 48) & 0xff;
 100        buf[2] = (blocks >> 40) & 0xff;
 101        buf[3] = (blocks >> 32) & 0xff;
 102        buf[4] = (blocks >> 24) & 0xff;
 103        buf[5] = (blocks >> 16) & 0xff;
 104        buf[6] = (blocks >> 8) & 0xff;
 105        buf[7] = blocks & 0xff;
 106        buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
 107        buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
 108        buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
 109        buf[11] = dev->dev_attrib.block_size & 0xff;
 110        /*
 111         * Set P_TYPE and PROT_EN bits for DIF support
 112         */
 113        if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
 114                if (dev->dev_attrib.pi_prot_type)
 115                        buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
 116        }
 117
 118        if (dev->transport->get_lbppbe)
 119                buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
 120
 121        if (dev->transport->get_alignment_offset_lbas) {
 122                u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
 123                buf[14] = (lalba >> 8) & 0x3f;
 124                buf[15] = lalba & 0xff;
 125        }
 126
 127        /*
 128         * Set Thin Provisioning Enable bit following sbc3r22 in section
 129         * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
 130         */
 131        if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
 132                buf[14] |= 0x80;
 133
 134        rbuf = transport_kmap_data_sg(cmd);
 135        if (rbuf) {
 136                memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
 137                transport_kunmap_data_sg(cmd);
 138        }
 139
 140        target_complete_cmd_with_length(cmd, GOOD, 32);
 141        return 0;
 142}
 143
 144sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
 145{
 146        u32 num_blocks;
 147
 148        if (cmd->t_task_cdb[0] == WRITE_SAME)
 149                num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
 150        else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
 151                num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
 152        else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
 153                num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
 154
 155        /*
 156         * Use the explicit range when non zero is supplied, otherwise calculate
 157         * the remaining range based on ->get_blocks() - starting LBA.
 158         */
 159        if (num_blocks)
 160                return num_blocks;
 161
 162        return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
 163                cmd->t_task_lba + 1;
 164}
 165EXPORT_SYMBOL(sbc_get_write_same_sectors);
 166
 167static sense_reason_t
 168sbc_emulate_noop(struct se_cmd *cmd)
 169{
 170        target_complete_cmd(cmd, GOOD);
 171        return 0;
 172}
 173
 174static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
 175{
 176        return cmd->se_dev->dev_attrib.block_size * sectors;
 177}
 178
 179static inline u32 transport_get_sectors_6(unsigned char *cdb)
 180{
 181        /*
 182         * Use 8-bit sector value.  SBC-3 says:
 183         *
 184         *   A TRANSFER LENGTH field set to zero specifies that 256
 185         *   logical blocks shall be written.  Any other value
 186         *   specifies the number of logical blocks that shall be
 187         *   written.
 188         */
 189        return cdb[4] ? : 256;
 190}
 191
 192static inline u32 transport_get_sectors_10(unsigned char *cdb)
 193{
 194        return (u32)(cdb[7] << 8) + cdb[8];
 195}
 196
 197static inline u32 transport_get_sectors_12(unsigned char *cdb)
 198{
 199        return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
 200}
 201
 202static inline u32 transport_get_sectors_16(unsigned char *cdb)
 203{
 204        return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
 205                    (cdb[12] << 8) + cdb[13];
 206}
 207
 208/*
 209 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
 210 */
 211static inline u32 transport_get_sectors_32(unsigned char *cdb)
 212{
 213        return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
 214                    (cdb[30] << 8) + cdb[31];
 215
 216}
 217
 218static inline u32 transport_lba_21(unsigned char *cdb)
 219{
 220        return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
 221}
 222
 223static inline u32 transport_lba_32(unsigned char *cdb)
 224{
 225        return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
 226}
 227
 228static inline unsigned long long transport_lba_64(unsigned char *cdb)
 229{
 230        unsigned int __v1, __v2;
 231
 232        __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
 233        __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
 234
 235        return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
 236}
 237
 238/*
 239 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
 240 */
 241static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
 242{
 243        unsigned int __v1, __v2;
 244
 245        __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
 246        __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
 247
 248        return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
 249}
 250
 251static sense_reason_t
 252sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
 253{
 254        unsigned int sectors = sbc_get_write_same_sectors(cmd);
 255
 256        if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
 257                pr_err("WRITE_SAME PBDATA and LBDATA"
 258                        " bits not supported for Block Discard"
 259                        " Emulation\n");
 260                return TCM_UNSUPPORTED_SCSI_OPCODE;
 261        }
 262        if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
 263                pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
 264                        sectors, cmd->se_dev->dev_attrib.max_write_same_len);
 265                return TCM_INVALID_CDB_FIELD;
 266        }
 267        /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
 268        if (flags[0] & 0x10) {
 269                pr_warn("WRITE SAME with ANCHOR not supported\n");
 270                return TCM_INVALID_CDB_FIELD;
 271        }
 272        /*
 273         * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
 274         * translated into block discard requests within backend code.
 275         */
 276        if (flags[0] & 0x08) {
 277                if (!ops->execute_write_same_unmap)
 278                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 279
 280                cmd->execute_cmd = ops->execute_write_same_unmap;
 281                return 0;
 282        }
 283        if (!ops->execute_write_same)
 284                return TCM_UNSUPPORTED_SCSI_OPCODE;
 285
 286        cmd->execute_cmd = ops->execute_write_same;
 287        return 0;
 288}
 289
 290static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
 291{
 292        unsigned char *buf, *addr;
 293        struct scatterlist *sg;
 294        unsigned int offset;
 295        sense_reason_t ret = TCM_NO_SENSE;
 296        int i, count;
 297        /*
 298         * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
 299         *
 300         * 1) read the specified logical block(s);
 301         * 2) transfer logical blocks from the data-out buffer;
 302         * 3) XOR the logical blocks transferred from the data-out buffer with
 303         *    the logical blocks read, storing the resulting XOR data in a buffer;
 304         * 4) if the DISABLE WRITE bit is set to zero, then write the logical
 305         *    blocks transferred from the data-out buffer; and
 306         * 5) transfer the resulting XOR data to the data-in buffer.
 307         */
 308        buf = kmalloc(cmd->data_length, GFP_KERNEL);
 309        if (!buf) {
 310                pr_err("Unable to allocate xor_callback buf\n");
 311                return TCM_OUT_OF_RESOURCES;
 312        }
 313        /*
 314         * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
 315         * into the locally allocated *buf
 316         */
 317        sg_copy_to_buffer(cmd->t_data_sg,
 318                          cmd->t_data_nents,
 319                          buf,
 320                          cmd->data_length);
 321
 322        /*
 323         * Now perform the XOR against the BIDI read memory located at
 324         * cmd->t_mem_bidi_list
 325         */
 326
 327        offset = 0;
 328        for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
 329                addr = kmap_atomic(sg_page(sg));
 330                if (!addr) {
 331                        ret = TCM_OUT_OF_RESOURCES;
 332                        goto out;
 333                }
 334
 335                for (i = 0; i < sg->length; i++)
 336                        *(addr + sg->offset + i) ^= *(buf + offset + i);
 337
 338                offset += sg->length;
 339                kunmap_atomic(addr);
 340        }
 341
 342out:
 343        kfree(buf);
 344        return ret;
 345}
 346
 347static sense_reason_t
 348sbc_execute_rw(struct se_cmd *cmd)
 349{
 350        return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
 351                               cmd->data_direction);
 352}
 353
 354static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
 355{
 356        struct se_device *dev = cmd->se_dev;
 357
 358        /*
 359         * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
 360         * within target_complete_ok_work() if the command was successfully
 361         * sent to the backend driver.
 362         */
 363        spin_lock_irq(&cmd->t_state_lock);
 364        if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
 365                cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
 366        spin_unlock_irq(&cmd->t_state_lock);
 367
 368        /*
 369         * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
 370         * before the original READ I/O submission.
 371         */
 372        up(&dev->caw_sem);
 373
 374        return TCM_NO_SENSE;
 375}
 376
 377static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
 378{
 379        struct se_device *dev = cmd->se_dev;
 380        struct scatterlist *write_sg = NULL, *sg;
 381        unsigned char *buf = NULL, *addr;
 382        struct sg_mapping_iter m;
 383        unsigned int offset = 0, len;
 384        unsigned int nlbas = cmd->t_task_nolb;
 385        unsigned int block_size = dev->dev_attrib.block_size;
 386        unsigned int compare_len = (nlbas * block_size);
 387        sense_reason_t ret = TCM_NO_SENSE;
 388        int rc, i;
 389
 390        /*
 391         * Handle early failure in transport_generic_request_failure(),
 392         * which will not have taken ->caw_mutex yet..
 393         */
 394        if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
 395                return TCM_NO_SENSE;
 396        /*
 397         * Immediately exit + release dev->caw_sem if command has already
 398         * been failed with a non-zero SCSI status.
 399         */
 400        if (cmd->scsi_status) {
 401                pr_err("compare_and_write_callback: non zero scsi_status:"
 402                        " 0x%02x\n", cmd->scsi_status);
 403                goto out;
 404        }
 405
 406        buf = kzalloc(cmd->data_length, GFP_KERNEL);
 407        if (!buf) {
 408                pr_err("Unable to allocate compare_and_write buf\n");
 409                ret = TCM_OUT_OF_RESOURCES;
 410                goto out;
 411        }
 412
 413        write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
 414                           GFP_KERNEL);
 415        if (!write_sg) {
 416                pr_err("Unable to allocate compare_and_write sg\n");
 417                ret = TCM_OUT_OF_RESOURCES;
 418                goto out;
 419        }
 420        sg_init_table(write_sg, cmd->t_data_nents);
 421        /*
 422         * Setup verify and write data payloads from total NumberLBAs.
 423         */
 424        rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
 425                               cmd->data_length);
 426        if (!rc) {
 427                pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
 428                ret = TCM_OUT_OF_RESOURCES;
 429                goto out;
 430        }
 431        /*
 432         * Compare against SCSI READ payload against verify payload
 433         */
 434        for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
 435                addr = (unsigned char *)kmap_atomic(sg_page(sg));
 436                if (!addr) {
 437                        ret = TCM_OUT_OF_RESOURCES;
 438                        goto out;
 439                }
 440
 441                len = min(sg->length, compare_len);
 442
 443                if (memcmp(addr, buf + offset, len)) {
 444                        pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
 445                                addr, buf + offset);
 446                        kunmap_atomic(addr);
 447                        goto miscompare;
 448                }
 449                kunmap_atomic(addr);
 450
 451                offset += len;
 452                compare_len -= len;
 453                if (!compare_len)
 454                        break;
 455        }
 456
 457        i = 0;
 458        len = cmd->t_task_nolb * block_size;
 459        sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
 460        /*
 461         * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
 462         */
 463        while (len) {
 464                sg_miter_next(&m);
 465
 466                if (block_size < PAGE_SIZE) {
 467                        sg_set_page(&write_sg[i], m.page, block_size,
 468                                    block_size);
 469                } else {
 470                        sg_miter_next(&m);
 471                        sg_set_page(&write_sg[i], m.page, block_size,
 472                                    0);
 473                }
 474                len -= block_size;
 475                i++;
 476        }
 477        sg_miter_stop(&m);
 478        /*
 479         * Save the original SGL + nents values before updating to new
 480         * assignments, to be released in transport_free_pages() ->
 481         * transport_reset_sgl_orig()
 482         */
 483        cmd->t_data_sg_orig = cmd->t_data_sg;
 484        cmd->t_data_sg = write_sg;
 485        cmd->t_data_nents_orig = cmd->t_data_nents;
 486        cmd->t_data_nents = 1;
 487
 488        cmd->sam_task_attr = TCM_HEAD_TAG;
 489        cmd->transport_complete_callback = compare_and_write_post;
 490        /*
 491         * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
 492         * for submitting the adjusted SGL to write instance user-data.
 493         */
 494        cmd->execute_cmd = sbc_execute_rw;
 495
 496        spin_lock_irq(&cmd->t_state_lock);
 497        cmd->t_state = TRANSPORT_PROCESSING;
 498        cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
 499        spin_unlock_irq(&cmd->t_state_lock);
 500
 501        __target_execute_cmd(cmd);
 502
 503        kfree(buf);
 504        return ret;
 505
 506miscompare:
 507        pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
 508                dev->transport->name);
 509        ret = TCM_MISCOMPARE_VERIFY;
 510out:
 511        /*
 512         * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
 513         * sbc_compare_and_write() before the original READ I/O submission.
 514         */
 515        up(&dev->caw_sem);
 516        kfree(write_sg);
 517        kfree(buf);
 518        return ret;
 519}
 520
 521static sense_reason_t
 522sbc_compare_and_write(struct se_cmd *cmd)
 523{
 524        struct se_device *dev = cmd->se_dev;
 525        sense_reason_t ret;
 526        int rc;
 527        /*
 528         * Submit the READ first for COMPARE_AND_WRITE to perform the
 529         * comparision using SGLs at cmd->t_bidi_data_sg..
 530         */
 531        rc = down_interruptible(&dev->caw_sem);
 532        if ((rc != 0) || signal_pending(current)) {
 533                cmd->transport_complete_callback = NULL;
 534                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 535        }
 536        /*
 537         * Reset cmd->data_length to individual block_size in order to not
 538         * confuse backend drivers that depend on this value matching the
 539         * size of the I/O being submitted.
 540         */
 541        cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
 542
 543        ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
 544                              DMA_FROM_DEVICE);
 545        if (ret) {
 546                cmd->transport_complete_callback = NULL;
 547                up(&dev->caw_sem);
 548                return ret;
 549        }
 550        /*
 551         * Unlock of dev->caw_sem to occur in compare_and_write_callback()
 552         * upon MISCOMPARE, or in compare_and_write_done() upon completion
 553         * of WRITE instance user-data.
 554         */
 555        return TCM_NO_SENSE;
 556}
 557
 558static int
 559sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
 560                       bool is_write, struct se_cmd *cmd)
 561{
 562        if (is_write) {
 563                cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
 564                                         TARGET_PROT_DOUT_INSERT;
 565                switch (protect) {
 566                case 0x0:
 567                case 0x3:
 568                        cmd->prot_checks = 0;
 569                        break;
 570                case 0x1:
 571                case 0x5:
 572                        cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 573                        if (prot_type == TARGET_DIF_TYPE1_PROT)
 574                                cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
 575                        break;
 576                case 0x2:
 577                        if (prot_type == TARGET_DIF_TYPE1_PROT)
 578                                cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
 579                        break;
 580                case 0x4:
 581                        cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 582                        break;
 583                default:
 584                        pr_err("Unsupported protect field %d\n", protect);
 585                        return -EINVAL;
 586                }
 587        } else {
 588                cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
 589                                         TARGET_PROT_DIN_STRIP;
 590                switch (protect) {
 591                case 0x0:
 592                case 0x1:
 593                case 0x5:
 594                        cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 595                        if (prot_type == TARGET_DIF_TYPE1_PROT)
 596                                cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
 597                        break;
 598                case 0x2:
 599                        if (prot_type == TARGET_DIF_TYPE1_PROT)
 600                                cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
 601                        break;
 602                case 0x3:
 603                        cmd->prot_checks = 0;
 604                        break;
 605                case 0x4:
 606                        cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
 607                        break;
 608                default:
 609                        pr_err("Unsupported protect field %d\n", protect);
 610                        return -EINVAL;
 611                }
 612        }
 613
 614        return 0;
 615}
 616
 617static bool
 618sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
 619               u32 sectors, bool is_write)
 620{
 621        u8 protect = cdb[1] >> 5;
 622
 623        if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
 624                return true;
 625
 626        switch (dev->dev_attrib.pi_prot_type) {
 627        case TARGET_DIF_TYPE3_PROT:
 628                cmd->reftag_seed = 0xffffffff;
 629                break;
 630        case TARGET_DIF_TYPE2_PROT:
 631                if (protect)
 632                        return false;
 633
 634                cmd->reftag_seed = cmd->t_task_lba;
 635                break;
 636        case TARGET_DIF_TYPE1_PROT:
 637                cmd->reftag_seed = cmd->t_task_lba;
 638                break;
 639        case TARGET_DIF_TYPE0_PROT:
 640        default:
 641                return true;
 642        }
 643
 644        if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
 645                                   is_write, cmd))
 646                return false;
 647
 648        cmd->prot_type = dev->dev_attrib.pi_prot_type;
 649        cmd->prot_length = dev->prot_length * sectors;
 650
 651        /**
 652         * In case protection information exists over the wire
 653         * we modify command data length to describe pure data.
 654         * The actual transfer length is data length + protection
 655         * length
 656         **/
 657        if (protect)
 658                cmd->data_length = sectors * dev->dev_attrib.block_size;
 659
 660        pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
 661                 "prot_op=%d prot_checks=%d\n",
 662                 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
 663                 cmd->prot_op, cmd->prot_checks);
 664
 665        return true;
 666}
 667
 668sense_reason_t
 669sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 670{
 671        struct se_device *dev = cmd->se_dev;
 672        unsigned char *cdb = cmd->t_task_cdb;
 673        unsigned int size;
 674        u32 sectors = 0;
 675        sense_reason_t ret;
 676
 677        switch (cdb[0]) {
 678        case READ_6:
 679                sectors = transport_get_sectors_6(cdb);
 680                cmd->t_task_lba = transport_lba_21(cdb);
 681                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 682                cmd->execute_rw = ops->execute_rw;
 683                cmd->execute_cmd = sbc_execute_rw;
 684                break;
 685        case READ_10:
 686                sectors = transport_get_sectors_10(cdb);
 687                cmd->t_task_lba = transport_lba_32(cdb);
 688
 689                if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 690                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 691
 692                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 693                cmd->execute_rw = ops->execute_rw;
 694                cmd->execute_cmd = sbc_execute_rw;
 695                break;
 696        case READ_12:
 697                sectors = transport_get_sectors_12(cdb);
 698                cmd->t_task_lba = transport_lba_32(cdb);
 699
 700                if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 701                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 702
 703                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 704                cmd->execute_rw = ops->execute_rw;
 705                cmd->execute_cmd = sbc_execute_rw;
 706                break;
 707        case READ_16:
 708                sectors = transport_get_sectors_16(cdb);
 709                cmd->t_task_lba = transport_lba_64(cdb);
 710
 711                if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
 712                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 713
 714                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 715                cmd->execute_rw = ops->execute_rw;
 716                cmd->execute_cmd = sbc_execute_rw;
 717                break;
 718        case WRITE_6:
 719                sectors = transport_get_sectors_6(cdb);
 720                cmd->t_task_lba = transport_lba_21(cdb);
 721                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 722                cmd->execute_rw = ops->execute_rw;
 723                cmd->execute_cmd = sbc_execute_rw;
 724                break;
 725        case WRITE_10:
 726        case WRITE_VERIFY:
 727                sectors = transport_get_sectors_10(cdb);
 728                cmd->t_task_lba = transport_lba_32(cdb);
 729
 730                if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 731                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 732
 733                if (cdb[1] & 0x8)
 734                        cmd->se_cmd_flags |= SCF_FUA;
 735                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 736                cmd->execute_rw = ops->execute_rw;
 737                cmd->execute_cmd = sbc_execute_rw;
 738                break;
 739        case WRITE_12:
 740                sectors = transport_get_sectors_12(cdb);
 741                cmd->t_task_lba = transport_lba_32(cdb);
 742
 743                if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 744                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 745
 746                if (cdb[1] & 0x8)
 747                        cmd->se_cmd_flags |= SCF_FUA;
 748                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 749                cmd->execute_rw = ops->execute_rw;
 750                cmd->execute_cmd = sbc_execute_rw;
 751                break;
 752        case WRITE_16:
 753                sectors = transport_get_sectors_16(cdb);
 754                cmd->t_task_lba = transport_lba_64(cdb);
 755
 756                if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
 757                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 758
 759                if (cdb[1] & 0x8)
 760                        cmd->se_cmd_flags |= SCF_FUA;
 761                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 762                cmd->execute_rw = ops->execute_rw;
 763                cmd->execute_cmd = sbc_execute_rw;
 764                break;
 765        case XDWRITEREAD_10:
 766                if (cmd->data_direction != DMA_TO_DEVICE ||
 767                    !(cmd->se_cmd_flags & SCF_BIDI))
 768                        return TCM_INVALID_CDB_FIELD;
 769                sectors = transport_get_sectors_10(cdb);
 770
 771                cmd->t_task_lba = transport_lba_32(cdb);
 772                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 773
 774                /*
 775                 * Setup BIDI XOR callback to be run after I/O completion.
 776                 */
 777                cmd->execute_rw = ops->execute_rw;
 778                cmd->execute_cmd = sbc_execute_rw;
 779                cmd->transport_complete_callback = &xdreadwrite_callback;
 780                if (cdb[1] & 0x8)
 781                        cmd->se_cmd_flags |= SCF_FUA;
 782                break;
 783        case VARIABLE_LENGTH_CMD:
 784        {
 785                u16 service_action = get_unaligned_be16(&cdb[8]);
 786                switch (service_action) {
 787                case XDWRITEREAD_32:
 788                        sectors = transport_get_sectors_32(cdb);
 789
 790                        /*
 791                         * Use WRITE_32 and READ_32 opcodes for the emulated
 792                         * XDWRITE_READ_32 logic.
 793                         */
 794                        cmd->t_task_lba = transport_lba_64_ext(cdb);
 795                        cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 796
 797                        /*
 798                         * Setup BIDI XOR callback to be run during after I/O
 799                         * completion.
 800                         */
 801                        cmd->execute_rw = ops->execute_rw;
 802                        cmd->execute_cmd = sbc_execute_rw;
 803                        cmd->transport_complete_callback = &xdreadwrite_callback;
 804                        if (cdb[1] & 0x8)
 805                                cmd->se_cmd_flags |= SCF_FUA;
 806                        break;
 807                case WRITE_SAME_32:
 808                        sectors = transport_get_sectors_32(cdb);
 809                        if (!sectors) {
 810                                pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
 811                                       " supported\n");
 812                                return TCM_INVALID_CDB_FIELD;
 813                        }
 814
 815                        size = sbc_get_size(cmd, 1);
 816                        cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
 817
 818                        ret = sbc_setup_write_same(cmd, &cdb[10], ops);
 819                        if (ret)
 820                                return ret;
 821                        break;
 822                default:
 823                        pr_err("VARIABLE_LENGTH_CMD service action"
 824                                " 0x%04x not supported\n", service_action);
 825                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 826                }
 827                break;
 828        }
 829        case COMPARE_AND_WRITE:
 830                sectors = cdb[13];
 831                /*
 832                 * Currently enforce COMPARE_AND_WRITE for a single sector
 833                 */
 834                if (sectors > 1) {
 835                        pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
 836                               " than 1\n", sectors);
 837                        return TCM_INVALID_CDB_FIELD;
 838                }
 839                /*
 840                 * Double size because we have two buffers, note that
 841                 * zero is not an error..
 842                 */
 843                size = 2 * sbc_get_size(cmd, sectors);
 844                cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
 845                cmd->t_task_nolb = sectors;
 846                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
 847                cmd->execute_rw = ops->execute_rw;
 848                cmd->execute_cmd = sbc_compare_and_write;
 849                cmd->transport_complete_callback = compare_and_write_callback;
 850                break;
 851        case READ_CAPACITY:
 852                size = READ_CAP_LEN;
 853                cmd->execute_cmd = sbc_emulate_readcapacity;
 854                break;
 855        case SERVICE_ACTION_IN_16:
 856                switch (cmd->t_task_cdb[1] & 0x1f) {
 857                case SAI_READ_CAPACITY_16:
 858                        cmd->execute_cmd = sbc_emulate_readcapacity_16;
 859                        break;
 860                case SAI_REPORT_REFERRALS:
 861                        cmd->execute_cmd = target_emulate_report_referrals;
 862                        break;
 863                default:
 864                        pr_err("Unsupported SA: 0x%02x\n",
 865                                cmd->t_task_cdb[1] & 0x1f);
 866                        return TCM_INVALID_CDB_FIELD;
 867                }
 868                size = (cdb[10] << 24) | (cdb[11] << 16) |
 869                       (cdb[12] << 8) | cdb[13];
 870                break;
 871        case SYNCHRONIZE_CACHE:
 872        case SYNCHRONIZE_CACHE_16:
 873                if (cdb[0] == SYNCHRONIZE_CACHE) {
 874                        sectors = transport_get_sectors_10(cdb);
 875                        cmd->t_task_lba = transport_lba_32(cdb);
 876                } else {
 877                        sectors = transport_get_sectors_16(cdb);
 878                        cmd->t_task_lba = transport_lba_64(cdb);
 879                }
 880                if (ops->execute_sync_cache) {
 881                        cmd->execute_cmd = ops->execute_sync_cache;
 882                        goto check_lba;
 883                }
 884                size = 0;
 885                cmd->execute_cmd = sbc_emulate_noop;
 886                break;
 887        case UNMAP:
 888                if (!ops->execute_unmap)
 889                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 890
 891                size = get_unaligned_be16(&cdb[7]);
 892                cmd->execute_cmd = ops->execute_unmap;
 893                break;
 894        case WRITE_SAME_16:
 895                sectors = transport_get_sectors_16(cdb);
 896                if (!sectors) {
 897                        pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
 898                        return TCM_INVALID_CDB_FIELD;
 899                }
 900
 901                size = sbc_get_size(cmd, 1);
 902                cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
 903
 904                ret = sbc_setup_write_same(cmd, &cdb[1], ops);
 905                if (ret)
 906                        return ret;
 907                break;
 908        case WRITE_SAME:
 909                sectors = transport_get_sectors_10(cdb);
 910                if (!sectors) {
 911                        pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
 912                        return TCM_INVALID_CDB_FIELD;
 913                }
 914
 915                size = sbc_get_size(cmd, 1);
 916                cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
 917
 918                /*
 919                 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
 920                 * of byte 1 bit 3 UNMAP instead of original reserved field
 921                 */
 922                ret = sbc_setup_write_same(cmd, &cdb[1], ops);
 923                if (ret)
 924                        return ret;
 925                break;
 926        case VERIFY:
 927                size = 0;
 928                sectors = transport_get_sectors_10(cdb);
 929                cmd->t_task_lba = transport_lba_32(cdb);
 930                cmd->execute_cmd = sbc_emulate_noop;
 931                goto check_lba;
 932        case REZERO_UNIT:
 933        case SEEK_6:
 934        case SEEK_10:
 935                /*
 936                 * There are still clients out there which use these old SCSI-2
 937                 * commands. This mainly happens when running VMs with legacy
 938                 * guest systems, connected via SCSI command pass-through to
 939                 * iSCSI targets. Make them happy and return status GOOD.
 940                 */
 941                size = 0;
 942                cmd->execute_cmd = sbc_emulate_noop;
 943                break;
 944        default:
 945                ret = spc_parse_cdb(cmd, &size);
 946                if (ret)
 947                        return ret;
 948        }
 949
 950        /* reject any command that we don't have a handler for */
 951        if (!cmd->execute_cmd)
 952                return TCM_UNSUPPORTED_SCSI_OPCODE;
 953
 954        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
 955                unsigned long long end_lba;
 956check_lba:
 957                end_lba = dev->transport->get_blocks(dev) + 1;
 958                if (cmd->t_task_lba + sectors > end_lba) {
 959                        pr_err("cmd exceeds last lba %llu "
 960                                "(lba %llu, sectors %u)\n",
 961                                end_lba, cmd->t_task_lba, sectors);
 962                        return TCM_ADDRESS_OUT_OF_RANGE;
 963                }
 964
 965                if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
 966                        size = sbc_get_size(cmd, sectors);
 967        }
 968
 969        return target_cmd_size_check(cmd, size);
 970}
 971EXPORT_SYMBOL(sbc_parse_cdb);
 972
 973u32 sbc_get_device_type(struct se_device *dev)
 974{
 975        return TYPE_DISK;
 976}
 977EXPORT_SYMBOL(sbc_get_device_type);
 978
 979sense_reason_t
 980sbc_execute_unmap(struct se_cmd *cmd,
 981        sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
 982                                      sector_t, sector_t),
 983        void *priv)
 984{
 985        struct se_device *dev = cmd->se_dev;
 986        unsigned char *buf, *ptr = NULL;
 987        sector_t lba;
 988        int size;
 989        u32 range;
 990        sense_reason_t ret = 0;
 991        int dl, bd_dl;
 992
 993        /* We never set ANC_SUP */
 994        if (cmd->t_task_cdb[1])
 995                return TCM_INVALID_CDB_FIELD;
 996
 997        if (cmd->data_length == 0) {
 998                target_complete_cmd(cmd, SAM_STAT_GOOD);
 999                return 0;
1000        }
1001
1002        if (cmd->data_length < 8) {
1003                pr_warn("UNMAP parameter list length %u too small\n",
1004                        cmd->data_length);
1005                return TCM_PARAMETER_LIST_LENGTH_ERROR;
1006        }
1007
1008        buf = transport_kmap_data_sg(cmd);
1009        if (!buf)
1010                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1011
1012        dl = get_unaligned_be16(&buf[0]);
1013        bd_dl = get_unaligned_be16(&buf[2]);
1014
1015        size = cmd->data_length - 8;
1016        if (bd_dl > size)
1017                pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1018                        cmd->data_length, bd_dl);
1019        else
1020                size = bd_dl;
1021
1022        if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1023                ret = TCM_INVALID_PARAMETER_LIST;
1024                goto err;
1025        }
1026
1027        /* First UNMAP block descriptor starts at 8 byte offset */
1028        ptr = &buf[8];
1029        pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1030                " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1031
1032        while (size >= 16) {
1033                lba = get_unaligned_be64(&ptr[0]);
1034                range = get_unaligned_be32(&ptr[8]);
1035                pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1036                                 (unsigned long long)lba, range);
1037
1038                if (range > dev->dev_attrib.max_unmap_lba_count) {
1039                        ret = TCM_INVALID_PARAMETER_LIST;
1040                        goto err;
1041                }
1042
1043                if (lba + range > dev->transport->get_blocks(dev) + 1) {
1044                        ret = TCM_ADDRESS_OUT_OF_RANGE;
1045                        goto err;
1046                }
1047
1048                ret = do_unmap_fn(cmd, priv, lba, range);
1049                if (ret)
1050                        goto err;
1051
1052                ptr += 16;
1053                size -= 16;
1054        }
1055
1056err:
1057        transport_kunmap_data_sg(cmd);
1058        if (!ret)
1059                target_complete_cmd(cmd, GOOD);
1060        return ret;
1061}
1062EXPORT_SYMBOL(sbc_execute_unmap);
1063
1064void
1065sbc_dif_generate(struct se_cmd *cmd)
1066{
1067        struct se_device *dev = cmd->se_dev;
1068        struct se_dif_v1_tuple *sdt;
1069        struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1070        sector_t sector = cmd->t_task_lba;
1071        void *daddr, *paddr;
1072        int i, j, offset = 0;
1073
1074        for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1075                daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1076                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1077
1078                for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1079
1080                        if (offset >= psg->length) {
1081                                kunmap_atomic(paddr);
1082                                psg = sg_next(psg);
1083                                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1084                                offset = 0;
1085                        }
1086
1087                        sdt = paddr + offset;
1088                        sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
1089                                                dev->dev_attrib.block_size));
1090                        if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
1091                                sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1092                        sdt->app_tag = 0;
1093
1094                        pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1095                                 " app_tag: 0x%04x ref_tag: %u\n",
1096                                 (unsigned long long)sector, sdt->guard_tag,
1097                                 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1098
1099                        sector++;
1100                        offset += sizeof(struct se_dif_v1_tuple);
1101                }
1102
1103                kunmap_atomic(paddr);
1104                kunmap_atomic(daddr);
1105        }
1106}
1107
1108static sense_reason_t
1109sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1110                  const void *p, sector_t sector, unsigned int ei_lba)
1111{
1112        int block_size = dev->dev_attrib.block_size;
1113        __be16 csum;
1114
1115        csum = cpu_to_be16(crc_t10dif(p, block_size));
1116
1117        if (sdt->guard_tag != csum) {
1118                pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1119                        " csum 0x%04x\n", (unsigned long long)sector,
1120                        be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1121                return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1122        }
1123
1124        if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1125            be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1126                pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1127                       " sector MSB: 0x%08x\n", (unsigned long long)sector,
1128                       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1129                return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1130        }
1131
1132        if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1133            be32_to_cpu(sdt->ref_tag) != ei_lba) {
1134                pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1135                       " ei_lba: 0x%08x\n", (unsigned long long)sector,
1136                        be32_to_cpu(sdt->ref_tag), ei_lba);
1137                return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1138        }
1139
1140        return 0;
1141}
1142
1143static void
1144sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1145                  struct scatterlist *sg, int sg_off)
1146{
1147        struct se_device *dev = cmd->se_dev;
1148        struct scatterlist *psg;
1149        void *paddr, *addr;
1150        unsigned int i, len, left;
1151        unsigned int offset = sg_off;
1152
1153        left = sectors * dev->prot_length;
1154
1155        for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1156                unsigned int psg_len, copied = 0;
1157
1158                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1159                psg_len = min(left, psg->length);
1160                while (psg_len) {
1161                        len = min(psg_len, sg->length - offset);
1162                        addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1163
1164                        if (read)
1165                                memcpy(paddr + copied, addr, len);
1166                        else
1167                                memcpy(addr, paddr + copied, len);
1168
1169                        left -= len;
1170                        offset += len;
1171                        copied += len;
1172                        psg_len -= len;
1173
1174                        if (offset >= sg->length) {
1175                                sg = sg_next(sg);
1176                                offset = 0;
1177                        }
1178                        kunmap_atomic(addr);
1179                }
1180                kunmap_atomic(paddr);
1181        }
1182}
1183
1184sense_reason_t
1185sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1186                     unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1187{
1188        struct se_device *dev = cmd->se_dev;
1189        struct se_dif_v1_tuple *sdt;
1190        struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1191        sector_t sector = start;
1192        void *daddr, *paddr;
1193        int i, j, offset = 0;
1194        sense_reason_t rc;
1195
1196        for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1197                daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1198                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1199
1200                for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1201
1202                        if (offset >= psg->length) {
1203                                kunmap_atomic(paddr);
1204                                psg = sg_next(psg);
1205                                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1206                                offset = 0;
1207                        }
1208
1209                        sdt = paddr + offset;
1210
1211                        pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1212                                 " app_tag: 0x%04x ref_tag: %u\n",
1213                                 (unsigned long long)sector, sdt->guard_tag,
1214                                 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1215
1216                        rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1217                                               ei_lba);
1218                        if (rc) {
1219                                kunmap_atomic(paddr);
1220                                kunmap_atomic(daddr);
1221                                cmd->bad_sector = sector;
1222                                return rc;
1223                        }
1224
1225                        sector++;
1226                        ei_lba++;
1227                        offset += sizeof(struct se_dif_v1_tuple);
1228                }
1229
1230                kunmap_atomic(paddr);
1231                kunmap_atomic(daddr);
1232        }
1233        sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1234
1235        return 0;
1236}
1237EXPORT_SYMBOL(sbc_dif_verify_write);
1238
1239static sense_reason_t
1240__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1241                      unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1242{
1243        struct se_device *dev = cmd->se_dev;
1244        struct se_dif_v1_tuple *sdt;
1245        struct scatterlist *dsg, *psg = sg;
1246        sector_t sector = start;
1247        void *daddr, *paddr;
1248        int i, j, offset = sg_off;
1249        sense_reason_t rc;
1250
1251        for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1252                daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1253                paddr = kmap_atomic(sg_page(psg)) + sg->offset;
1254
1255                for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1256
1257                        if (offset >= psg->length) {
1258                                kunmap_atomic(paddr);
1259                                psg = sg_next(psg);
1260                                paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1261                                offset = 0;
1262                        }
1263
1264                        sdt = paddr + offset;
1265
1266                        pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1267                                 " app_tag: 0x%04x ref_tag: %u\n",
1268                                 (unsigned long long)sector, sdt->guard_tag,
1269                                 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1270
1271                        if (sdt->app_tag == cpu_to_be16(0xffff)) {
1272                                sector++;
1273                                offset += sizeof(struct se_dif_v1_tuple);
1274                                continue;
1275                        }
1276
1277                        rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1278                                               ei_lba);
1279                        if (rc) {
1280                                kunmap_atomic(paddr);
1281                                kunmap_atomic(daddr);
1282                                cmd->bad_sector = sector;
1283                                return rc;
1284                        }
1285
1286                        sector++;
1287                        ei_lba++;
1288                        offset += sizeof(struct se_dif_v1_tuple);
1289                }
1290
1291                kunmap_atomic(paddr);
1292                kunmap_atomic(daddr);
1293        }
1294
1295        return 0;
1296}
1297
1298sense_reason_t
1299sbc_dif_read_strip(struct se_cmd *cmd)
1300{
1301        struct se_device *dev = cmd->se_dev;
1302        u32 sectors = cmd->prot_length / dev->prot_length;
1303
1304        return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
1305                                     cmd->t_prot_sg, 0);
1306}
1307
1308sense_reason_t
1309sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1310                    unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1311{
1312        sense_reason_t rc;
1313
1314        rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
1315        if (rc)
1316                return rc;
1317
1318        sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1319        return 0;
1320}
1321EXPORT_SYMBOL(sbc_dif_verify_read);
1322