linux/drivers/target/target_core_xcopy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename: target_core_xcopy.c
   4 *
   5 * This file contains support for SPC-4 Extended-Copy offload with generic
   6 * TCM backends.
   7 *
   8 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
   9 *
  10 * Author:
  11 * Nicholas A. Bellinger <nab@daterainc.com>
  12 *
  13 ******************************************************************************/
  14
  15#include <linux/slab.h>
  16#include <linux/spinlock.h>
  17#include <linux/list.h>
  18#include <linux/configfs.h>
  19#include <linux/ratelimit.h>
  20#include <scsi/scsi_proto.h>
  21#include <asm/unaligned.h>
  22
  23#include <target/target_core_base.h>
  24#include <target/target_core_backend.h>
  25#include <target/target_core_fabric.h>
  26
  27#include "target_core_internal.h"
  28#include "target_core_pr.h"
  29#include "target_core_ua.h"
  30#include "target_core_xcopy.h"
  31
  32static struct workqueue_struct *xcopy_wq = NULL;
  33
  34static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
  35
  36static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
  37{
  38        int off = 0;
  39
  40        buf[off++] = (0x6 << 4);
  41        buf[off++] = 0x01;
  42        buf[off++] = 0x40;
  43        buf[off] = (0x5 << 4);
  44
  45        spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
  46        return 0;
  47}
  48
  49struct xcopy_dev_search_info {
  50        const unsigned char *dev_wwn;
  51        struct se_device *found_dev;
  52};
  53
  54static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
  55                                              void *data)
  56{
  57        struct xcopy_dev_search_info *info = data;
  58        unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
  59        int rc;
  60
  61        if (!se_dev->dev_attrib.emulate_3pc)
  62                return 0;
  63
  64        memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
  65        target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
  66
  67        rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
  68        if (rc != 0)
  69                return 0;
  70
  71        info->found_dev = se_dev;
  72        pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
  73
  74        rc = target_depend_item(&se_dev->dev_group.cg_item);
  75        if (rc != 0) {
  76                pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
  77                       rc, se_dev);
  78                return rc;
  79        }
  80
  81        pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
  82                 se_dev, &se_dev->dev_group);
  83        return 1;
  84}
  85
  86static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
  87                                        struct se_device **found_dev)
  88{
  89        struct xcopy_dev_search_info info;
  90        int ret;
  91
  92        memset(&info, 0, sizeof(info));
  93        info.dev_wwn = dev_wwn;
  94
  95        ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
  96        if (ret == 1) {
  97                *found_dev = info.found_dev;
  98                return 0;
  99        } else {
 100                pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
 101                return -EINVAL;
 102        }
 103}
 104
 105static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
 106                                unsigned char *p, unsigned short cscd_index)
 107{
 108        unsigned char *desc = p;
 109        unsigned short ript;
 110        u8 desig_len;
 111        /*
 112         * Extract RELATIVE INITIATOR PORT IDENTIFIER
 113         */
 114        ript = get_unaligned_be16(&desc[2]);
 115        pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
 116        /*
 117         * Check for supported code set, association, and designator type
 118         */
 119        if ((desc[4] & 0x0f) != 0x1) {
 120                pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
 121                return -EINVAL;
 122        }
 123        if ((desc[5] & 0x30) != 0x00) {
 124                pr_err("XCOPY 0xe4: association other than LUN not supported\n");
 125                return -EINVAL;
 126        }
 127        if ((desc[5] & 0x0f) != 0x3) {
 128                pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
 129                                (desc[5] & 0x0f));
 130                return -EINVAL;
 131        }
 132        /*
 133         * Check for matching 16 byte length for NAA IEEE Registered Extended
 134         * Assigned designator
 135         */
 136        desig_len = desc[7];
 137        if (desig_len != 16) {
 138                pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
 139                return -EINVAL;
 140        }
 141        pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
 142        /*
 143         * Check for NAA IEEE Registered Extended Assigned header..
 144         */
 145        if ((desc[8] & 0xf0) != 0x60) {
 146                pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
 147                                        (desc[8] & 0xf0));
 148                return -EINVAL;
 149        }
 150
 151        if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
 152                pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
 153                         "dest\n", cscd_index);
 154                return 0;
 155        }
 156
 157        if (cscd_index == xop->stdi) {
 158                memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
 159                /*
 160                 * Determine if the source designator matches the local device
 161                 */
 162                if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
 163                                XCOPY_NAA_IEEE_REGEX_LEN)) {
 164                        xop->op_origin = XCOL_SOURCE_RECV_OP;
 165                        xop->src_dev = se_cmd->se_dev;
 166                        pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
 167                                        " received xop\n", xop->src_dev);
 168                }
 169        }
 170
 171        if (cscd_index == xop->dtdi) {
 172                memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
 173                /*
 174                 * Determine if the destination designator matches the local
 175                 * device. If @cscd_index corresponds to both source (stdi) and
 176                 * destination (dtdi), or dtdi comes after stdi, then
 177                 * XCOL_DEST_RECV_OP wins.
 178                 */
 179                if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
 180                                XCOPY_NAA_IEEE_REGEX_LEN)) {
 181                        xop->op_origin = XCOL_DEST_RECV_OP;
 182                        xop->dst_dev = se_cmd->se_dev;
 183                        pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
 184                                " received xop\n", xop->dst_dev);
 185                }
 186        }
 187
 188        return 0;
 189}
 190
 191static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
 192                                struct xcopy_op *xop, unsigned char *p,
 193                                unsigned short tdll, sense_reason_t *sense_ret)
 194{
 195        struct se_device *local_dev = se_cmd->se_dev;
 196        unsigned char *desc = p;
 197        int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
 198        unsigned short cscd_index = 0;
 199        unsigned short start = 0;
 200
 201        *sense_ret = TCM_INVALID_PARAMETER_LIST;
 202
 203        if (offset != 0) {
 204                pr_err("XCOPY target descriptor list length is not"
 205                        " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
 206                *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
 207                return -EINVAL;
 208        }
 209        if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
 210                pr_err("XCOPY target descriptor supports a maximum"
 211                        " two src/dest descriptors, tdll: %hu too large..\n", tdll);
 212                /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
 213                *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
 214                return -EINVAL;
 215        }
 216        /*
 217         * Generate an IEEE Registered Extended designator based upon the
 218         * se_device the XCOPY was received upon..
 219         */
 220        memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
 221        target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
 222
 223        while (start < tdll) {
 224                /*
 225                 * Check target descriptor identification with 0xE4 type, and
 226                 * compare the current index with the CSCD descriptor IDs in
 227                 * the segment descriptor. Use VPD 0x83 WWPN matching ..
 228                 */
 229                switch (desc[0]) {
 230                case 0xe4:
 231                        rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
 232                                                        &desc[0], cscd_index);
 233                        if (rc != 0)
 234                                goto out;
 235                        start += XCOPY_TARGET_DESC_LEN;
 236                        desc += XCOPY_TARGET_DESC_LEN;
 237                        cscd_index++;
 238                        break;
 239                default:
 240                        pr_err("XCOPY unsupported descriptor type code:"
 241                                        " 0x%02x\n", desc[0]);
 242                        *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
 243                        goto out;
 244                }
 245        }
 246
 247        switch (xop->op_origin) {
 248        case XCOL_SOURCE_RECV_OP:
 249                rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
 250                                                &xop->dst_dev);
 251                break;
 252        case XCOL_DEST_RECV_OP:
 253                rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
 254                                                &xop->src_dev);
 255                break;
 256        default:
 257                pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
 258                        "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
 259                rc = -EINVAL;
 260                break;
 261        }
 262        /*
 263         * If a matching IEEE NAA 0x83 descriptor for the requested device
 264         * is not located on this node, return COPY_ABORTED with ASQ/ASQC
 265         * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
 266         * initiator to fall back to normal copy method.
 267         */
 268        if (rc < 0) {
 269                *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
 270                goto out;
 271        }
 272
 273        pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
 274                 xop->src_dev, &xop->src_tid_wwn[0]);
 275        pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
 276                 xop->dst_dev, &xop->dst_tid_wwn[0]);
 277
 278        return cscd_index;
 279
 280out:
 281        return -EINVAL;
 282}
 283
 284static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
 285                                        unsigned char *p)
 286{
 287        unsigned char *desc = p;
 288        int dc = (desc[1] & 0x02);
 289        unsigned short desc_len;
 290
 291        desc_len = get_unaligned_be16(&desc[2]);
 292        if (desc_len != 0x18) {
 293                pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
 294                                " %hu\n", desc_len);
 295                return -EINVAL;
 296        }
 297
 298        xop->stdi = get_unaligned_be16(&desc[4]);
 299        xop->dtdi = get_unaligned_be16(&desc[6]);
 300
 301        if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
 302            xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
 303                pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
 304                        XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
 305                return -EINVAL;
 306        }
 307
 308        pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
 309                desc_len, xop->stdi, xop->dtdi, dc);
 310
 311        xop->nolb = get_unaligned_be16(&desc[10]);
 312        xop->src_lba = get_unaligned_be64(&desc[12]);
 313        xop->dst_lba = get_unaligned_be64(&desc[20]);
 314        pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
 315                xop->nolb, (unsigned long long)xop->src_lba,
 316                (unsigned long long)xop->dst_lba);
 317
 318        if (dc != 0) {
 319                xop->dbl = get_unaligned_be24(&desc[29]);
 320
 321                pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
 322        }
 323        return 0;
 324}
 325
 326static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
 327                                struct xcopy_op *xop, unsigned char *p,
 328                                unsigned int sdll, sense_reason_t *sense_ret)
 329{
 330        unsigned char *desc = p;
 331        unsigned int start = 0;
 332        int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
 333
 334        *sense_ret = TCM_INVALID_PARAMETER_LIST;
 335
 336        if (offset != 0) {
 337                pr_err("XCOPY segment descriptor list length is not"
 338                        " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
 339                *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
 340                return -EINVAL;
 341        }
 342        if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
 343                pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
 344                        " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
 345                /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
 346                *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
 347                return -EINVAL;
 348        }
 349
 350        while (start < sdll) {
 351                /*
 352                 * Check segment descriptor type code for block -> block
 353                 */
 354                switch (desc[0]) {
 355                case 0x02:
 356                        rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
 357                        if (rc < 0)
 358                                goto out;
 359
 360                        ret++;
 361                        start += XCOPY_SEGMENT_DESC_LEN;
 362                        desc += XCOPY_SEGMENT_DESC_LEN;
 363                        break;
 364                default:
 365                        pr_err("XCOPY unsupported segment descriptor"
 366                                "type: 0x%02x\n", desc[0]);
 367                        *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
 368                        goto out;
 369                }
 370        }
 371
 372        return ret;
 373
 374out:
 375        return -EINVAL;
 376}
 377
 378/*
 379 * Start xcopy_pt ops
 380 */
 381
 382struct xcopy_pt_cmd {
 383        struct se_cmd se_cmd;
 384        struct completion xpt_passthrough_sem;
 385        unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
 386};
 387
 388struct se_portal_group xcopy_pt_tpg;
 389static struct se_session xcopy_pt_sess;
 390static struct se_node_acl xcopy_pt_nacl;
 391
 392static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
 393{
 394        return 0;
 395}
 396
 397static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
 398{
 399        struct se_device *remote_dev;
 400
 401        if (xop->op_origin == XCOL_SOURCE_RECV_OP)
 402                remote_dev = xop->dst_dev;
 403        else
 404                remote_dev = xop->src_dev;
 405
 406        pr_debug("Calling configfs_undepend_item for"
 407                  " remote_dev: %p remote_dev->dev_group: %p\n",
 408                  remote_dev, &remote_dev->dev_group.cg_item);
 409
 410        target_undepend_item(&remote_dev->dev_group.cg_item);
 411}
 412
 413static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
 414{
 415        struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
 416                                struct xcopy_pt_cmd, se_cmd);
 417
 418        kfree(xpt_cmd);
 419}
 420
 421static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
 422{
 423        struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
 424                                struct xcopy_pt_cmd, se_cmd);
 425
 426        complete(&xpt_cmd->xpt_passthrough_sem);
 427        return 0;
 428}
 429
 430static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
 431{
 432        return 0;
 433}
 434
 435static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
 436{
 437        return 0;
 438}
 439
 440static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
 441{
 442        return 0;
 443}
 444
 445static const struct target_core_fabric_ops xcopy_pt_tfo = {
 446        .fabric_name            = "xcopy-pt",
 447        .get_cmd_state          = xcopy_pt_get_cmd_state,
 448        .release_cmd            = xcopy_pt_release_cmd,
 449        .check_stop_free        = xcopy_pt_check_stop_free,
 450        .write_pending          = xcopy_pt_write_pending,
 451        .queue_data_in          = xcopy_pt_queue_data_in,
 452        .queue_status           = xcopy_pt_queue_status,
 453};
 454
 455/*
 456 * End xcopy_pt_ops
 457 */
 458
 459int target_xcopy_setup_pt(void)
 460{
 461        int ret;
 462
 463        xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
 464        if (!xcopy_wq) {
 465                pr_err("Unable to allocate xcopy_wq\n");
 466                return -ENOMEM;
 467        }
 468
 469        memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
 470        INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
 471        INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
 472
 473        xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
 474
 475        memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
 476        INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
 477        INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
 478        memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
 479        ret = transport_init_session(&xcopy_pt_sess);
 480        if (ret < 0)
 481                return ret;
 482
 483        xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
 484        xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
 485
 486        xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
 487        xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
 488
 489        return 0;
 490}
 491
 492void target_xcopy_release_pt(void)
 493{
 494        if (xcopy_wq)
 495                destroy_workqueue(xcopy_wq);
 496}
 497
 498/*
 499 * target_xcopy_setup_pt_cmd - set up a pass-through command
 500 * @xpt_cmd:     Data structure to initialize.
 501 * @xop:         Describes the XCOPY operation received from an initiator.
 502 * @se_dev:      Backend device to associate with @xpt_cmd if
 503 *               @remote_port == true.
 504 * @cdb:         SCSI CDB to be copied into @xpt_cmd.
 505 * @remote_port: If false, use the LUN through which the XCOPY command has
 506 *               been received. If true, use @se_dev->xcopy_lun.
 507 * @alloc_mem:   Whether or not to allocate an SGL list.
 508 *
 509 * Set up a SCSI command (READ or WRITE) that will be used to execute an
 510 * XCOPY command.
 511 */
 512static int target_xcopy_setup_pt_cmd(
 513        struct xcopy_pt_cmd *xpt_cmd,
 514        struct xcopy_op *xop,
 515        struct se_device *se_dev,
 516        unsigned char *cdb,
 517        bool remote_port,
 518        bool alloc_mem)
 519{
 520        struct se_cmd *cmd = &xpt_cmd->se_cmd;
 521        sense_reason_t sense_rc;
 522        int ret = 0, rc;
 523
 524        /*
 525         * Setup LUN+port to honor reservations based upon xop->op_origin for
 526         * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
 527         */
 528        if (remote_port) {
 529                cmd->se_lun = &se_dev->xcopy_lun;
 530                cmd->se_dev = se_dev;
 531        } else {
 532                cmd->se_lun = xop->xop_se_cmd->se_lun;
 533                cmd->se_dev = xop->xop_se_cmd->se_dev;
 534        }
 535        cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 536
 537        cmd->tag = 0;
 538        sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
 539        if (sense_rc) {
 540                ret = -EINVAL;
 541                goto out;
 542        }
 543
 544        if (alloc_mem) {
 545                rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
 546                                      cmd->data_length, false, false);
 547                if (rc < 0) {
 548                        ret = rc;
 549                        goto out;
 550                }
 551                /*
 552                 * Set this bit so that transport_free_pages() allows the
 553                 * caller to release SGLs + physical memory allocated by
 554                 * transport_generic_get_mem()..
 555                 */
 556                cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 557        } else {
 558                /*
 559                 * Here the previously allocated SGLs for the internal READ
 560                 * are mapped zero-copy to the internal WRITE.
 561                 */
 562                sense_rc = transport_generic_map_mem_to_cmd(cmd,
 563                                        xop->xop_data_sg, xop->xop_data_nents,
 564                                        NULL, 0);
 565                if (sense_rc) {
 566                        ret = -EINVAL;
 567                        goto out;
 568                }
 569
 570                pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
 571                         " %u\n", cmd->t_data_sg, cmd->t_data_nents);
 572        }
 573
 574        return 0;
 575
 576out:
 577        return ret;
 578}
 579
 580static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
 581{
 582        struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
 583        sense_reason_t sense_rc;
 584
 585        sense_rc = transport_generic_new_cmd(se_cmd);
 586        if (sense_rc)
 587                return -EINVAL;
 588
 589        if (se_cmd->data_direction == DMA_TO_DEVICE)
 590                target_execute_cmd(se_cmd);
 591
 592        wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
 593
 594        pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
 595                        se_cmd->scsi_status);
 596
 597        return (se_cmd->scsi_status) ? -EINVAL : 0;
 598}
 599
 600static int target_xcopy_read_source(
 601        struct se_cmd *ec_cmd,
 602        struct xcopy_op *xop,
 603        struct se_device *src_dev,
 604        sector_t src_lba,
 605        u32 src_sectors)
 606{
 607        struct xcopy_pt_cmd *xpt_cmd;
 608        struct se_cmd *se_cmd;
 609        u32 length = (src_sectors * src_dev->dev_attrib.block_size);
 610        int rc;
 611        unsigned char cdb[16];
 612        bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
 613
 614        xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
 615        if (!xpt_cmd) {
 616                pr_err("Unable to allocate xcopy_pt_cmd\n");
 617                return -ENOMEM;
 618        }
 619        init_completion(&xpt_cmd->xpt_passthrough_sem);
 620        se_cmd = &xpt_cmd->se_cmd;
 621
 622        memset(&cdb[0], 0, 16);
 623        cdb[0] = READ_16;
 624        put_unaligned_be64(src_lba, &cdb[2]);
 625        put_unaligned_be32(src_sectors, &cdb[10]);
 626        pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
 627                (unsigned long long)src_lba, src_sectors, length);
 628
 629        transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
 630                              DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
 631        xop->src_pt_cmd = xpt_cmd;
 632
 633        rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
 634                                remote_port, true);
 635        if (rc < 0) {
 636                ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
 637                transport_generic_free_cmd(se_cmd, 0);
 638                return rc;
 639        }
 640
 641        xop->xop_data_sg = se_cmd->t_data_sg;
 642        xop->xop_data_nents = se_cmd->t_data_nents;
 643        pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
 644                " memory\n", xop->xop_data_sg, xop->xop_data_nents);
 645
 646        rc = target_xcopy_issue_pt_cmd(xpt_cmd);
 647        if (rc < 0) {
 648                ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
 649                transport_generic_free_cmd(se_cmd, 0);
 650                return rc;
 651        }
 652        /*
 653         * Clear off the allocated t_data_sg, that has been saved for
 654         * zero-copy WRITE submission reuse in struct xcopy_op..
 655         */
 656        se_cmd->t_data_sg = NULL;
 657        se_cmd->t_data_nents = 0;
 658
 659        return 0;
 660}
 661
 662static int target_xcopy_write_destination(
 663        struct se_cmd *ec_cmd,
 664        struct xcopy_op *xop,
 665        struct se_device *dst_dev,
 666        sector_t dst_lba,
 667        u32 dst_sectors)
 668{
 669        struct xcopy_pt_cmd *xpt_cmd;
 670        struct se_cmd *se_cmd;
 671        u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
 672        int rc;
 673        unsigned char cdb[16];
 674        bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
 675
 676        xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
 677        if (!xpt_cmd) {
 678                pr_err("Unable to allocate xcopy_pt_cmd\n");
 679                return -ENOMEM;
 680        }
 681        init_completion(&xpt_cmd->xpt_passthrough_sem);
 682        se_cmd = &xpt_cmd->se_cmd;
 683
 684        memset(&cdb[0], 0, 16);
 685        cdb[0] = WRITE_16;
 686        put_unaligned_be64(dst_lba, &cdb[2]);
 687        put_unaligned_be32(dst_sectors, &cdb[10]);
 688        pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
 689                (unsigned long long)dst_lba, dst_sectors, length);
 690
 691        transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
 692                              DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
 693        xop->dst_pt_cmd = xpt_cmd;
 694
 695        rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
 696                                remote_port, false);
 697        if (rc < 0) {
 698                struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
 699                ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
 700                /*
 701                 * If the failure happened before the t_mem_list hand-off in
 702                 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
 703                 * core releases this memory on error during X-COPY WRITE I/O.
 704                 */
 705                src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 706                src_cmd->t_data_sg = xop->xop_data_sg;
 707                src_cmd->t_data_nents = xop->xop_data_nents;
 708
 709                transport_generic_free_cmd(se_cmd, 0);
 710                return rc;
 711        }
 712
 713        rc = target_xcopy_issue_pt_cmd(xpt_cmd);
 714        if (rc < 0) {
 715                ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
 716                se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 717                transport_generic_free_cmd(se_cmd, 0);
 718                return rc;
 719        }
 720
 721        return 0;
 722}
 723
 724static void target_xcopy_do_work(struct work_struct *work)
 725{
 726        struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
 727        struct se_cmd *ec_cmd = xop->xop_se_cmd;
 728        struct se_device *src_dev, *dst_dev;
 729        sector_t src_lba, dst_lba, end_lba;
 730        unsigned int max_sectors;
 731        int rc = 0;
 732        unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
 733
 734        if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
 735                goto err_free;
 736
 737        if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev))
 738                goto err_free;
 739
 740        src_dev = xop->src_dev;
 741        dst_dev = xop->dst_dev;
 742        src_lba = xop->src_lba;
 743        dst_lba = xop->dst_lba;
 744        nolb = xop->nolb;
 745        end_lba = src_lba + nolb;
 746        /*
 747         * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
 748         * smallest max_sectors between src_dev + dev_dev, or
 749         */
 750        max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
 751                          dst_dev->dev_attrib.hw_max_sectors);
 752        max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
 753
 754        max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
 755
 756        pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
 757                        nolb, max_nolb, (unsigned long long)end_lba);
 758        pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
 759                        (unsigned long long)src_lba, (unsigned long long)dst_lba);
 760
 761        while (src_lba < end_lba) {
 762                cur_nolb = min(nolb, max_nolb);
 763
 764                pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
 765                        " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
 766
 767                rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
 768                if (rc < 0)
 769                        goto out;
 770
 771                src_lba += cur_nolb;
 772                pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
 773                                (unsigned long long)src_lba);
 774
 775                pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
 776                        " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
 777
 778                rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
 779                                                dst_lba, cur_nolb);
 780                if (rc < 0) {
 781                        transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
 782                        goto out;
 783                }
 784
 785                dst_lba += cur_nolb;
 786                pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
 787                                (unsigned long long)dst_lba);
 788
 789                copied_nolb += cur_nolb;
 790                nolb -= cur_nolb;
 791
 792                transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
 793                xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 794
 795                transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
 796        }
 797
 798        xcopy_pt_undepend_remotedev(xop);
 799        kfree(xop);
 800
 801        pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
 802                (unsigned long long)src_lba, (unsigned long long)dst_lba);
 803        pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
 804                copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
 805
 806        pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
 807        target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
 808        return;
 809
 810out:
 811        xcopy_pt_undepend_remotedev(xop);
 812
 813err_free:
 814        kfree(xop);
 815        /*
 816         * Don't override an error scsi status if it has already been set
 817         */
 818        if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
 819                pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
 820                        " CHECK_CONDITION -> sending response\n", rc);
 821                ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
 822        }
 823        target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
 824}
 825
 826/*
 827 * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
 828 * fails.
 829 */
 830static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
 831{
 832        struct se_cmd *se_cmd = xop->xop_se_cmd;
 833        unsigned char *p = NULL, *seg_desc;
 834        unsigned int list_id, list_id_usage, sdll, inline_dl;
 835        sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
 836        int rc;
 837        unsigned short tdll;
 838
 839        p = transport_kmap_data_sg(se_cmd);
 840        if (!p) {
 841                pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
 842                return TCM_OUT_OF_RESOURCES;
 843        }
 844
 845        list_id = p[0];
 846        list_id_usage = (p[1] & 0x18) >> 3;
 847
 848        /*
 849         * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
 850         */
 851        tdll = get_unaligned_be16(&p[2]);
 852        sdll = get_unaligned_be32(&p[8]);
 853        if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
 854                pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
 855                       tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
 856                ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
 857                goto out;
 858        }
 859
 860        inline_dl = get_unaligned_be32(&p[12]);
 861        if (inline_dl != 0) {
 862                pr_err("XCOPY with non zero inline data length\n");
 863                goto out;
 864        }
 865
 866        if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
 867                pr_err("XCOPY parameter truncation: data length %u too small "
 868                        "for tdll: %hu sdll: %u inline_dl: %u\n",
 869                        se_cmd->data_length, tdll, sdll, inline_dl);
 870                ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
 871                goto out;
 872        }
 873
 874        pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
 875                " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
 876                tdll, sdll, inline_dl);
 877
 878        /*
 879         * skip over the target descriptors until segment descriptors
 880         * have been passed - CSCD ids are needed to determine src and dest.
 881         */
 882        seg_desc = &p[16] + tdll;
 883
 884        rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
 885                                                    sdll, &ret);
 886        if (rc <= 0)
 887                goto out;
 888
 889        pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
 890                                rc * XCOPY_SEGMENT_DESC_LEN);
 891
 892        rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
 893        if (rc <= 0)
 894                goto out;
 895
 896        if (xop->src_dev->dev_attrib.block_size !=
 897            xop->dst_dev->dev_attrib.block_size) {
 898                pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
 899                       " block_size: %u currently unsupported\n",
 900                        xop->src_dev->dev_attrib.block_size,
 901                        xop->dst_dev->dev_attrib.block_size);
 902                xcopy_pt_undepend_remotedev(xop);
 903                ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 904                goto out;
 905        }
 906
 907        pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
 908                                rc * XCOPY_TARGET_DESC_LEN);
 909        transport_kunmap_data_sg(se_cmd);
 910        return TCM_NO_SENSE;
 911
 912out:
 913        if (p)
 914                transport_kunmap_data_sg(se_cmd);
 915        return ret;
 916}
 917
 918sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
 919{
 920        struct se_device *dev = se_cmd->se_dev;
 921        struct xcopy_op *xop;
 922        unsigned int sa;
 923
 924        if (!dev->dev_attrib.emulate_3pc) {
 925                pr_err("EXTENDED_COPY operation explicitly disabled\n");
 926                return TCM_UNSUPPORTED_SCSI_OPCODE;
 927        }
 928
 929        sa = se_cmd->t_task_cdb[1] & 0x1f;
 930        if (sa != 0x00) {
 931                pr_err("EXTENDED_COPY(LID4) not supported\n");
 932                return TCM_UNSUPPORTED_SCSI_OPCODE;
 933        }
 934
 935        if (se_cmd->data_length == 0) {
 936                target_complete_cmd(se_cmd, SAM_STAT_GOOD);
 937                return TCM_NO_SENSE;
 938        }
 939        if (se_cmd->data_length < XCOPY_HDR_LEN) {
 940                pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
 941                                se_cmd->data_length, XCOPY_HDR_LEN);
 942                return TCM_PARAMETER_LIST_LENGTH_ERROR;
 943        }
 944
 945        xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
 946        if (!xop)
 947                goto err;
 948        xop->xop_se_cmd = se_cmd;
 949        INIT_WORK(&xop->xop_work, target_xcopy_do_work);
 950        if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
 951                goto free;
 952        return TCM_NO_SENSE;
 953
 954free:
 955        kfree(xop);
 956
 957err:
 958        return TCM_OUT_OF_RESOURCES;
 959}
 960
 961static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
 962{
 963        unsigned char *p;
 964
 965        p = transport_kmap_data_sg(se_cmd);
 966        if (!p) {
 967                pr_err("transport_kmap_data_sg failed in"
 968                       " target_rcr_operating_parameters\n");
 969                return TCM_OUT_OF_RESOURCES;
 970        }
 971
 972        if (se_cmd->data_length < 54) {
 973                pr_err("Receive Copy Results Op Parameters length"
 974                       " too small: %u\n", se_cmd->data_length);
 975                transport_kunmap_data_sg(se_cmd);
 976                return TCM_INVALID_CDB_FIELD;
 977        }
 978        /*
 979         * Set SNLID=1 (Supports no List ID)
 980         */
 981        p[4] = 0x1;
 982        /*
 983         * MAXIMUM TARGET DESCRIPTOR COUNT
 984         */
 985        put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
 986        /*
 987         * MAXIMUM SEGMENT DESCRIPTOR COUNT
 988         */
 989        put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
 990        /*
 991         * MAXIMUM DESCRIPTOR LIST LENGTH
 992         */
 993        put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
 994        /*
 995         * MAXIMUM SEGMENT LENGTH
 996         */
 997        put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
 998        /*
 999         * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
1000         */
1001        put_unaligned_be32(0x0, &p[20]);
1002        /*
1003         * HELD DATA LIMIT
1004         */
1005        put_unaligned_be32(0x0, &p[24]);
1006        /*
1007         * MAXIMUM STREAM DEVICE TRANSFER SIZE
1008         */
1009        put_unaligned_be32(0x0, &p[28]);
1010        /*
1011         * TOTAL CONCURRENT COPIES
1012         */
1013        put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
1014        /*
1015         * MAXIMUM CONCURRENT COPIES
1016         */
1017        p[36] = RCR_OP_MAX_CONCURR_COPIES;
1018        /*
1019         * DATA SEGMENT GRANULARITY (log 2)
1020         */
1021        p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
1022        /*
1023         * INLINE DATA GRANULARITY log 2)
1024         */
1025        p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
1026        /*
1027         * HELD DATA GRANULARITY
1028         */
1029        p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
1030        /*
1031         * IMPLEMENTED DESCRIPTOR LIST LENGTH
1032         */
1033        p[43] = 0x2;
1034        /*
1035         * List of implemented descriptor type codes (ordered)
1036         */
1037        p[44] = 0x02; /* Copy Block to Block device */
1038        p[45] = 0xe4; /* Identification descriptor target descriptor */
1039
1040        /*
1041         * AVAILABLE DATA (n-3)
1042         */
1043        put_unaligned_be32(42, &p[0]);
1044
1045        transport_kunmap_data_sg(se_cmd);
1046        target_complete_cmd(se_cmd, GOOD);
1047
1048        return TCM_NO_SENSE;
1049}
1050
1051sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1052{
1053        unsigned char *cdb = &se_cmd->t_task_cdb[0];
1054        int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1055        sense_reason_t rc = TCM_NO_SENSE;
1056
1057        pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1058                " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1059
1060        if (list_id != 0) {
1061                pr_err("Receive Copy Results with non zero list identifier"
1062                       " not supported\n");
1063                return TCM_INVALID_CDB_FIELD;
1064        }
1065
1066        switch (sa) {
1067        case RCR_SA_OPERATING_PARAMETERS:
1068                rc = target_rcr_operating_parameters(se_cmd);
1069                break;
1070        case RCR_SA_COPY_STATUS:
1071        case RCR_SA_RECEIVE_DATA:
1072        case RCR_SA_FAILED_SEGMENT_DETAILS:
1073        default:
1074                pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1075                return TCM_INVALID_CDB_FIELD;
1076        }
1077
1078        return rc;
1079}
1080