linux/drivers/target/tcm_fc/tfc_cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2010 Cisco Systems, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, write to the Free Software Foundation, Inc.,
  15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16 */
  17
  18/* XXX TBD some includes may be extraneous */
  19
  20#include <linux/module.h>
  21#include <linux/moduleparam.h>
  22#include <linux/utsname.h>
  23#include <linux/init.h>
  24#include <linux/slab.h>
  25#include <linux/kthread.h>
  26#include <linux/types.h>
  27#include <linux/string.h>
  28#include <linux/configfs.h>
  29#include <linux/ctype.h>
  30#include <linux/hash.h>
  31#include <linux/percpu_ida.h>
  32#include <asm/unaligned.h>
  33#include <scsi/scsi.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_device.h>
  36#include <scsi/scsi_cmnd.h>
  37#include <scsi/scsi_tcq.h>
  38#include <scsi/libfc.h>
  39#include <scsi/fc_encode.h>
  40
  41#include <target/target_core_base.h>
  42#include <target/target_core_fabric.h>
  43#include <target/target_core_configfs.h>
  44#include <target/configfs_macros.h>
  45
  46#include "tcm_fc.h"
  47
  48/*
  49 * Dump cmd state for debugging.
  50 */
  51static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
  52{
  53        struct fc_exch *ep;
  54        struct fc_seq *sp;
  55        struct se_cmd *se_cmd;
  56        struct scatterlist *sg;
  57        int count;
  58
  59        se_cmd = &cmd->se_cmd;
  60        pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
  61                caller, cmd, cmd->sess, cmd->seq, se_cmd);
  62
  63        pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
  64                caller, cmd, se_cmd->t_data_nents,
  65               se_cmd->data_length, se_cmd->se_cmd_flags);
  66
  67        for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
  68                pr_debug("%s: cmd %p sg %p page %p "
  69                        "len 0x%x off 0x%x\n",
  70                        caller, cmd, sg,
  71                        sg_page(sg), sg->length, sg->offset);
  72
  73        sp = cmd->seq;
  74        if (sp) {
  75                ep = fc_seq_exch(sp);
  76                pr_debug("%s: cmd %p sid %x did %x "
  77                        "ox_id %x rx_id %x seq_id %x e_stat %x\n",
  78                        caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
  79                        sp->id, ep->esb_stat);
  80        }
  81}
  82
  83void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
  84{
  85        if (unlikely(ft_debug_logging))
  86                _ft_dump_cmd(cmd, caller);
  87}
  88
  89static void ft_free_cmd(struct ft_cmd *cmd)
  90{
  91        struct fc_frame *fp;
  92        struct fc_lport *lport;
  93        struct se_session *se_sess;
  94
  95        if (!cmd)
  96                return;
  97        se_sess = cmd->sess->se_sess;
  98        fp = cmd->req_frame;
  99        lport = fr_dev(fp);
 100        if (fr_seq(fp))
 101                lport->tt.seq_release(fr_seq(fp));
 102        fc_frame_free(fp);
 103        percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
 104        ft_sess_put(cmd->sess); /* undo get from lookup at recv */
 105}
 106
 107void ft_release_cmd(struct se_cmd *se_cmd)
 108{
 109        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 110
 111        ft_free_cmd(cmd);
 112}
 113
 114int ft_check_stop_free(struct se_cmd *se_cmd)
 115{
 116        transport_generic_free_cmd(se_cmd, 0);
 117        return 1;
 118}
 119
 120/*
 121 * Send response.
 122 */
 123int ft_queue_status(struct se_cmd *se_cmd)
 124{
 125        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 126        struct fc_frame *fp;
 127        struct fcp_resp_with_ext *fcp;
 128        struct fc_lport *lport;
 129        struct fc_exch *ep;
 130        size_t len;
 131
 132        if (cmd->aborted)
 133                return 0;
 134        ft_dump_cmd(cmd, __func__);
 135        ep = fc_seq_exch(cmd->seq);
 136        lport = ep->lp;
 137        len = sizeof(*fcp) + se_cmd->scsi_sense_length;
 138        fp = fc_frame_alloc(lport, len);
 139        if (!fp) {
 140                /* XXX shouldn't just drop it - requeue and retry? */
 141                return 0;
 142        }
 143        fcp = fc_frame_payload_get(fp, len);
 144        memset(fcp, 0, len);
 145        fcp->resp.fr_status = se_cmd->scsi_status;
 146
 147        len = se_cmd->scsi_sense_length;
 148        if (len) {
 149                fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
 150                fcp->ext.fr_sns_len = htonl(len);
 151                memcpy((fcp + 1), se_cmd->sense_buffer, len);
 152        }
 153
 154        /*
 155         * Test underflow and overflow with one mask.  Usually both are off.
 156         * Bidirectional commands are not handled yet.
 157         */
 158        if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
 159                if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
 160                        fcp->resp.fr_flags |= FCP_RESID_OVER;
 161                else
 162                        fcp->resp.fr_flags |= FCP_RESID_UNDER;
 163                fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
 164        }
 165
 166        /*
 167         * Send response.
 168         */
 169        cmd->seq = lport->tt.seq_start_next(cmd->seq);
 170        fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
 171                       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
 172
 173        lport->tt.seq_send(lport, cmd->seq, fp);
 174        lport->tt.exch_done(cmd->seq);
 175        return 0;
 176}
 177
 178int ft_write_pending_status(struct se_cmd *se_cmd)
 179{
 180        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 181
 182        return cmd->write_data_len != se_cmd->data_length;
 183}
 184
 185/*
 186 * Send TX_RDY (transfer ready).
 187 */
 188int ft_write_pending(struct se_cmd *se_cmd)
 189{
 190        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 191        struct fc_frame *fp;
 192        struct fcp_txrdy *txrdy;
 193        struct fc_lport *lport;
 194        struct fc_exch *ep;
 195        struct fc_frame_header *fh;
 196        u32 f_ctl;
 197
 198        ft_dump_cmd(cmd, __func__);
 199
 200        if (cmd->aborted)
 201                return 0;
 202        ep = fc_seq_exch(cmd->seq);
 203        lport = ep->lp;
 204        fp = fc_frame_alloc(lport, sizeof(*txrdy));
 205        if (!fp)
 206                return -ENOMEM; /* Signal QUEUE_FULL */
 207
 208        txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
 209        memset(txrdy, 0, sizeof(*txrdy));
 210        txrdy->ft_burst_len = htonl(se_cmd->data_length);
 211
 212        cmd->seq = lport->tt.seq_start_next(cmd->seq);
 213        fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
 214                       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 215
 216        fh = fc_frame_header_get(fp);
 217        f_ctl = ntoh24(fh->fh_f_ctl);
 218
 219        /* Only if it is 'Exchange Responder' */
 220        if (f_ctl & FC_FC_EX_CTX) {
 221                /* Target is 'exchange responder' and sending XFER_READY
 222                 * to 'exchange initiator (initiator)'
 223                 */
 224                if ((ep->xid <= lport->lro_xid) &&
 225                    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
 226                        if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
 227                            lport->tt.ddp_target(lport, ep->xid,
 228                                                 se_cmd->t_data_sg,
 229                                                 se_cmd->t_data_nents))
 230                                cmd->was_ddp_setup = 1;
 231                }
 232        }
 233        lport->tt.seq_send(lport, cmd->seq, fp);
 234        return 0;
 235}
 236
 237u32 ft_get_task_tag(struct se_cmd *se_cmd)
 238{
 239        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 240
 241        if (cmd->aborted)
 242                return ~0;
 243        return fc_seq_exch(cmd->seq)->rxid;
 244}
 245
 246int ft_get_cmd_state(struct se_cmd *se_cmd)
 247{
 248        return 0;
 249}
 250
 251/*
 252 * FC sequence response handler for follow-on sequences (data) and aborts.
 253 */
 254static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
 255{
 256        struct ft_cmd *cmd = arg;
 257        struct fc_frame_header *fh;
 258
 259        if (unlikely(IS_ERR(fp))) {
 260                /* XXX need to find cmd if queued */
 261                cmd->seq = NULL;
 262                cmd->aborted = true;
 263                return;
 264        }
 265
 266        fh = fc_frame_header_get(fp);
 267
 268        switch (fh->fh_r_ctl) {
 269        case FC_RCTL_DD_SOL_DATA:       /* write data */
 270                ft_recv_write_data(cmd, fp);
 271                break;
 272        case FC_RCTL_DD_UNSOL_CTL:      /* command */
 273        case FC_RCTL_DD_SOL_CTL:        /* transfer ready */
 274        case FC_RCTL_DD_DATA_DESC:      /* transfer ready */
 275        default:
 276                pr_debug("%s: unhandled frame r_ctl %x\n",
 277                       __func__, fh->fh_r_ctl);
 278                ft_invl_hw_context(cmd);
 279                fc_frame_free(fp);
 280                transport_generic_free_cmd(&cmd->se_cmd, 0);
 281                break;
 282        }
 283}
 284
 285/*
 286 * Send a FCP response including SCSI status and optional FCP rsp_code.
 287 * status is SAM_STAT_GOOD (zero) iff code is valid.
 288 * This is used in error cases, such as allocation failures.
 289 */
 290static void ft_send_resp_status(struct fc_lport *lport,
 291                                const struct fc_frame *rx_fp,
 292                                u32 status, enum fcp_resp_rsp_codes code)
 293{
 294        struct fc_frame *fp;
 295        struct fc_seq *sp;
 296        const struct fc_frame_header *fh;
 297        size_t len;
 298        struct fcp_resp_with_ext *fcp;
 299        struct fcp_resp_rsp_info *info;
 300
 301        fh = fc_frame_header_get(rx_fp);
 302        pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
 303                  ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
 304        len = sizeof(*fcp);
 305        if (status == SAM_STAT_GOOD)
 306                len += sizeof(*info);
 307        fp = fc_frame_alloc(lport, len);
 308        if (!fp)
 309                return;
 310        fcp = fc_frame_payload_get(fp, len);
 311        memset(fcp, 0, len);
 312        fcp->resp.fr_status = status;
 313        if (status == SAM_STAT_GOOD) {
 314                fcp->ext.fr_rsp_len = htonl(sizeof(*info));
 315                fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
 316                info = (struct fcp_resp_rsp_info *)(fcp + 1);
 317                info->rsp_code = code;
 318        }
 319
 320        fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
 321        sp = fr_seq(fp);
 322        if (sp) {
 323                lport->tt.seq_send(lport, sp, fp);
 324                lport->tt.exch_done(sp);
 325        } else {
 326                lport->tt.frame_send(lport, fp);
 327        }
 328}
 329
 330/*
 331 * Send error or task management response.
 332 */
 333static void ft_send_resp_code(struct ft_cmd *cmd,
 334                              enum fcp_resp_rsp_codes code)
 335{
 336        ft_send_resp_status(cmd->sess->tport->lport,
 337                            cmd->req_frame, SAM_STAT_GOOD, code);
 338}
 339
 340
 341/*
 342 * Send error or task management response.
 343 * Always frees the cmd and associated state.
 344 */
 345static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
 346                                      enum fcp_resp_rsp_codes code)
 347{
 348        ft_send_resp_code(cmd, code);
 349        ft_free_cmd(cmd);
 350}
 351
 352/*
 353 * Handle Task Management Request.
 354 */
 355static void ft_send_tm(struct ft_cmd *cmd)
 356{
 357        struct fcp_cmnd *fcp;
 358        int rc;
 359        u8 tm_func;
 360
 361        fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
 362
 363        switch (fcp->fc_tm_flags) {
 364        case FCP_TMF_LUN_RESET:
 365                tm_func = TMR_LUN_RESET;
 366                break;
 367        case FCP_TMF_TGT_RESET:
 368                tm_func = TMR_TARGET_WARM_RESET;
 369                break;
 370        case FCP_TMF_CLR_TASK_SET:
 371                tm_func = TMR_CLEAR_TASK_SET;
 372                break;
 373        case FCP_TMF_ABT_TASK_SET:
 374                tm_func = TMR_ABORT_TASK_SET;
 375                break;
 376        case FCP_TMF_CLR_ACA:
 377                tm_func = TMR_CLEAR_ACA;
 378                break;
 379        default:
 380                /*
 381                 * FCP4r01 indicates having a combination of
 382                 * tm_flags set is invalid.
 383                 */
 384                pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
 385                ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
 386                return;
 387        }
 388
 389        /* FIXME: Add referenced task tag for ABORT_TASK */
 390        rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
 391                &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
 392                cmd, tm_func, GFP_KERNEL, 0, 0);
 393        if (rc < 0)
 394                ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
 395}
 396
 397/*
 398 * Send status from completed task management request.
 399 */
 400void ft_queue_tm_resp(struct se_cmd *se_cmd)
 401{
 402        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 403        struct se_tmr_req *tmr = se_cmd->se_tmr_req;
 404        enum fcp_resp_rsp_codes code;
 405
 406        if (cmd->aborted)
 407                return;
 408        switch (tmr->response) {
 409        case TMR_FUNCTION_COMPLETE:
 410                code = FCP_TMF_CMPL;
 411                break;
 412        case TMR_LUN_DOES_NOT_EXIST:
 413                code = FCP_TMF_INVALID_LUN;
 414                break;
 415        case TMR_FUNCTION_REJECTED:
 416                code = FCP_TMF_REJECTED;
 417                break;
 418        case TMR_TASK_DOES_NOT_EXIST:
 419        case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
 420        default:
 421                code = FCP_TMF_FAILED;
 422                break;
 423        }
 424        pr_debug("tmr fn %d resp %d fcp code %d\n",
 425                  tmr->function, tmr->response, code);
 426        ft_send_resp_code(cmd, code);
 427}
 428
 429static void ft_send_work(struct work_struct *work);
 430
 431/*
 432 * Handle incoming FCP command.
 433 */
 434static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
 435{
 436        struct ft_cmd *cmd;
 437        struct fc_lport *lport = sess->tport->lport;
 438        struct se_session *se_sess = sess->se_sess;
 439        int tag;
 440
 441        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
 442        if (tag < 0)
 443                goto busy;
 444
 445        cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
 446        memset(cmd, 0, sizeof(struct ft_cmd));
 447
 448        cmd->se_cmd.map_tag = tag;
 449        cmd->sess = sess;
 450        cmd->seq = lport->tt.seq_assign(lport, fp);
 451        if (!cmd->seq) {
 452                percpu_ida_free(&se_sess->sess_tag_pool, tag);
 453                goto busy;
 454        }
 455        cmd->req_frame = fp;            /* hold frame during cmd */
 456
 457        INIT_WORK(&cmd->work, ft_send_work);
 458        queue_work(sess->tport->tpg->workqueue, &cmd->work);
 459        return;
 460
 461busy:
 462        pr_debug("cmd or seq allocation failure - sending BUSY\n");
 463        ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
 464        fc_frame_free(fp);
 465        ft_sess_put(sess);              /* undo get from lookup */
 466}
 467
 468
 469/*
 470 * Handle incoming FCP frame.
 471 * Caller has verified that the frame is type FCP.
 472 */
 473void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
 474{
 475        struct fc_frame_header *fh = fc_frame_header_get(fp);
 476
 477        switch (fh->fh_r_ctl) {
 478        case FC_RCTL_DD_UNSOL_CMD:      /* command */
 479                ft_recv_cmd(sess, fp);
 480                break;
 481        case FC_RCTL_DD_SOL_DATA:       /* write data */
 482        case FC_RCTL_DD_UNSOL_CTL:
 483        case FC_RCTL_DD_SOL_CTL:
 484        case FC_RCTL_DD_DATA_DESC:      /* transfer ready */
 485        case FC_RCTL_ELS4_REQ:          /* SRR, perhaps */
 486        default:
 487                pr_debug("%s: unhandled frame r_ctl %x\n",
 488                       __func__, fh->fh_r_ctl);
 489                fc_frame_free(fp);
 490                ft_sess_put(sess);      /* undo get from lookup */
 491                break;
 492        }
 493}
 494
 495/*
 496 * Send new command to target.
 497 */
 498static void ft_send_work(struct work_struct *work)
 499{
 500        struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
 501        struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
 502        struct fcp_cmnd *fcp;
 503        int data_dir = 0;
 504        int task_attr;
 505
 506        fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
 507        if (!fcp)
 508                goto err;
 509
 510        if (fcp->fc_flags & FCP_CFL_LEN_MASK)
 511                goto err;               /* not handling longer CDBs yet */
 512
 513        /*
 514         * Check for FCP task management flags
 515         */
 516        if (fcp->fc_tm_flags) {
 517                ft_send_tm(cmd);
 518                return;
 519        }
 520
 521        switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
 522        case 0:
 523                data_dir = DMA_NONE;
 524                break;
 525        case FCP_CFL_RDDATA:
 526                data_dir = DMA_FROM_DEVICE;
 527                break;
 528        case FCP_CFL_WRDATA:
 529                data_dir = DMA_TO_DEVICE;
 530                break;
 531        case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
 532                goto err;       /* TBD not supported by tcm_fc yet */
 533        }
 534        /*
 535         * Locate the SAM Task Attr from fc_pri_ta
 536         */
 537        switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
 538        case FCP_PTA_HEADQ:
 539                task_attr = MSG_HEAD_TAG;
 540                break;
 541        case FCP_PTA_ORDERED:
 542                task_attr = MSG_ORDERED_TAG;
 543                break;
 544        case FCP_PTA_ACA:
 545                task_attr = MSG_ACA_TAG;
 546                break;
 547        case FCP_PTA_SIMPLE: /* Fallthrough */
 548        default:
 549                task_attr = MSG_SIMPLE_TAG;
 550        }
 551
 552        fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
 553        /*
 554         * Use a single se_cmd->cmd_kref as we expect to release se_cmd
 555         * directly from ft_check_stop_free callback in response path.
 556         */
 557        if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
 558                              &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
 559                              ntohl(fcp->fc_dl), task_attr, data_dir, 0))
 560                goto err;
 561
 562        pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
 563        return;
 564
 565err:
 566        ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
 567}
 568