linux/drivers/scsi/elx/efct/efct_scsi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
   4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
   5 */
   6
   7#include "efct_driver.h"
   8#include "efct_hw.h"
   9
  10#define enable_tsend_auto_resp(efct)    1
  11#define enable_treceive_auto_resp(efct) 0
  12
  13#define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
  14
  15#define scsi_io_printf(io, fmt, ...) \
  16        efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
  17                io->node->display_name, io->instance_index,\
  18                io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
  19
  20#define EFCT_LOG_ENABLE_SCSI_TRACE(efct)                \
  21                (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
  22
  23#define scsi_io_trace(io, fmt, ...) \
  24        do { \
  25                if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
  26                        scsi_io_printf(io, fmt, ##__VA_ARGS__); \
  27        } while (0)
  28
  29struct efct_io *
  30efct_scsi_io_alloc(struct efct_node *node)
  31{
  32        struct efct *efct;
  33        struct efct_xport *xport;
  34        struct efct_io *io;
  35        unsigned long flags;
  36
  37        efct = node->efct;
  38
  39        xport = efct->xport;
  40
  41        io = efct_io_pool_io_alloc(efct->xport->io_pool);
  42        if (!io) {
  43                efc_log_err(efct, "IO alloc Failed\n");
  44                atomic_add_return(1, &xport->io_alloc_failed_count);
  45                return NULL;
  46        }
  47
  48        /* initialize refcount */
  49        kref_init(&io->ref);
  50        io->release = _efct_scsi_io_free;
  51
  52        /* set generic fields */
  53        io->efct = efct;
  54        io->node = node;
  55        kref_get(&node->ref);
  56
  57        /* set type and name */
  58        io->io_type = EFCT_IO_TYPE_IO;
  59        io->display_name = "scsi_io";
  60
  61        io->cmd_ini = false;
  62        io->cmd_tgt = true;
  63
  64        /* Add to node's active_ios list */
  65        INIT_LIST_HEAD(&io->list_entry);
  66        spin_lock_irqsave(&node->active_ios_lock, flags);
  67        list_add(&io->list_entry, &node->active_ios);
  68
  69        spin_unlock_irqrestore(&node->active_ios_lock, flags);
  70
  71        return io;
  72}
  73
  74void
  75_efct_scsi_io_free(struct kref *arg)
  76{
  77        struct efct_io *io = container_of(arg, struct efct_io, ref);
  78        struct efct *efct = io->efct;
  79        struct efct_node *node = io->node;
  80        unsigned long flags = 0;
  81
  82        scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
  83
  84        if (io->io_free) {
  85                efc_log_err(efct, "IO already freed.\n");
  86                return;
  87        }
  88
  89        spin_lock_irqsave(&node->active_ios_lock, flags);
  90        list_del_init(&io->list_entry);
  91        spin_unlock_irqrestore(&node->active_ios_lock, flags);
  92
  93        kref_put(&node->ref, node->release);
  94        io->node = NULL;
  95        efct_io_pool_io_free(efct->xport->io_pool, io);
  96}
  97
  98void
  99efct_scsi_io_free(struct efct_io *io)
 100{
 101        scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
 102        WARN_ON(!refcount_read(&io->ref.refcount));
 103        kref_put(&io->ref, io->release);
 104}
 105
 106static void
 107efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
 108                  u32 ext_status, void *app)
 109{
 110        u32 flags = 0;
 111        struct efct_io *io = app;
 112        struct efct *efct;
 113        enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
 114        efct_scsi_io_cb_t cb;
 115
 116        if (!io || !io->efct) {
 117                pr_err("%s: IO can not be NULL\n", __func__);
 118                return;
 119        }
 120
 121        scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
 122
 123        efct = io->efct;
 124
 125        io->transferred += length;
 126
 127        if (!io->scsi_tgt_cb) {
 128                efct_scsi_check_pending(efct);
 129                return;
 130        }
 131
 132        /* Call target server completion */
 133        cb = io->scsi_tgt_cb;
 134
 135        /* Clear the callback before invoking the callback */
 136        io->scsi_tgt_cb = NULL;
 137
 138        /* if status was good, and auto-good-response was set,
 139         * then callback target-server with IO_CMPL_RSP_SENT,
 140         * otherwise send IO_CMPL
 141         */
 142        if (status == 0 && io->auto_resp)
 143                flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
 144        else
 145                flags |= EFCT_SCSI_IO_CMPL;
 146
 147        switch (status) {
 148        case SLI4_FC_WCQE_STATUS_SUCCESS:
 149                scsi_stat = EFCT_SCSI_STATUS_GOOD;
 150                break;
 151        case SLI4_FC_WCQE_STATUS_DI_ERROR:
 152                if (ext_status & SLI4_FC_DI_ERROR_GE)
 153                        scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
 154                else if (ext_status & SLI4_FC_DI_ERROR_AE)
 155                        scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
 156                else if (ext_status & SLI4_FC_DI_ERROR_RE)
 157                        scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
 158                else
 159                        scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
 160                break;
 161        case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
 162                switch (ext_status) {
 163                case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
 164                case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
 165                        scsi_stat = EFCT_SCSI_STATUS_ABORTED;
 166                        break;
 167                case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
 168                        scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
 169                        break;
 170                case SLI4_FC_LOCAL_REJECT_NO_XRI:
 171                        scsi_stat = EFCT_SCSI_STATUS_NO_IO;
 172                        break;
 173                default:
 174                        /*we have seen 0x0d(TX_DMA_FAILED err)*/
 175                        scsi_stat = EFCT_SCSI_STATUS_ERROR;
 176                        break;
 177                }
 178                break;
 179
 180        case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
 181                /* target IO timed out */
 182                scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
 183                break;
 184
 185        case SLI4_FC_WCQE_STATUS_SHUTDOWN:
 186                /* Target IO cancelled by HW */
 187                scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
 188                break;
 189
 190        default:
 191                scsi_stat = EFCT_SCSI_STATUS_ERROR;
 192                break;
 193        }
 194
 195        cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
 196
 197        efct_scsi_check_pending(efct);
 198}
 199
 200static int
 201efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
 202                     struct efct_scsi_sgl *sgl, u32 sgl_count,
 203                     enum efct_hw_io_type type)
 204{
 205        int rc;
 206        u32 i;
 207        struct efct *efct = hw->os;
 208
 209        /* Initialize HW SGL */
 210        rc = efct_hw_io_init_sges(hw, hio, type);
 211        if (rc) {
 212                efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
 213                return -EIO;
 214        }
 215
 216        for (i = 0; i < sgl_count; i++) {
 217                /* Add data SGE */
 218                rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
 219                if (rc) {
 220                        efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
 221                                    sgl_count, rc);
 222                        return rc;
 223                }
 224        }
 225
 226        return 0;
 227}
 228
 229static void efc_log_sgl(struct efct_io *io)
 230{
 231        struct efct_hw_io *hio = io->hio;
 232        struct sli4_sge *data = NULL;
 233        u32 *dword = NULL;
 234        u32 i;
 235        u32 n_sge;
 236
 237        scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
 238                      upper_32_bits(hio->def_sgl.phys),
 239                      lower_32_bits(hio->def_sgl.phys));
 240        n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
 241        for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
 242                dword = (u32 *)data;
 243
 244                scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
 245                              i, dword[0], dword[1], dword[2], dword[3]);
 246
 247                if (dword[2] & (1U << 31))
 248                        break;
 249        }
 250}
 251
 252static void
 253efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
 254                                 u8 *mqe, void *arg)
 255{
 256        struct efct_io *io = arg;
 257
 258        if (io) {
 259                efct_hw_done_t cb = io->hw_cb;
 260
 261                if (!io->hw_cb)
 262                        return;
 263
 264                io->hw_cb = NULL;
 265                (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
 266        }
 267}
 268
 269static int
 270efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
 271{
 272        int rc = 0;
 273        struct efct *efct = io->efct;
 274
 275        /* Got a HW IO;
 276         * update ini/tgt_task_tag with HW IO info and dispatch
 277         */
 278        io->hio = hio;
 279        if (io->cmd_tgt)
 280                io->tgt_task_tag = hio->indicator;
 281        else if (io->cmd_ini)
 282                io->init_task_tag = hio->indicator;
 283        io->hw_tag = hio->reqtag;
 284
 285        hio->eq = io->hw_priv;
 286
 287        /* Copy WQ steering */
 288        switch (io->wq_steering) {
 289        case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
 290                hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
 291                break;
 292        case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
 293                hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
 294                break;
 295        case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
 296                hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
 297                break;
 298        }
 299
 300        switch (io->io_type) {
 301        case EFCT_IO_TYPE_IO:
 302                rc = efct_scsi_build_sgls(&efct->hw, io->hio,
 303                                          io->sgl, io->sgl_count, io->hio_type);
 304                if (rc)
 305                        break;
 306
 307                if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
 308                        efc_log_sgl(io);
 309
 310                if (io->app_id)
 311                        io->iparam.fcp_tgt.app_id = io->app_id;
 312
 313                io->iparam.fcp_tgt.vpi = io->node->vpi;
 314                io->iparam.fcp_tgt.rpi = io->node->rpi;
 315                io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
 316                io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
 317                io->iparam.fcp_tgt.xmit_len = io->wire_len;
 318
 319                rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
 320                                     &io->iparam, io->hw_cb, io);
 321                break;
 322        default:
 323                scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
 324                rc = -EIO;
 325                break;
 326        }
 327        return rc;
 328}
 329
 330static int
 331efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
 332{
 333        int rc;
 334
 335        switch (io->io_type) {
 336        case EFCT_IO_TYPE_ABORT: {
 337                struct efct_hw_io *hio_to_abort = NULL;
 338
 339                hio_to_abort = io->io_to_abort->hio;
 340
 341                if (!hio_to_abort) {
 342                        /*
 343                         * If "IO to abort" does not have an
 344                         * associated HW IO, immediately make callback with
 345                         * success. The command must have been sent to
 346                         * the backend, but the data phase has not yet
 347                         * started, so we don't have a HW IO.
 348                         *
 349                         * Note: since the backend shims should be
 350                         * taking a reference on io_to_abort, it should not
 351                         * be possible to have been completed and freed by
 352                         * the backend before the abort got here.
 353                         */
 354                        scsi_io_printf(io, "IO: not active\n");
 355                        ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
 356                                        SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
 357                        rc = 0;
 358                        break;
 359                }
 360
 361                /* HW IO is valid, abort it */
 362                scsi_io_printf(io, "aborting\n");
 363                rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
 364                                      io->send_abts, io->hw_cb, io);
 365                if (rc) {
 366                        int status = SLI4_FC_WCQE_STATUS_SUCCESS;
 367                        efct_hw_done_t cb = io->hw_cb;
 368
 369                        if (rc != -ENOENT && rc != -EINPROGRESS) {
 370                                status = -1;
 371                                scsi_io_printf(io, "Failed to abort IO rc=%d\n",
 372                                               rc);
 373                        }
 374                        cb(io->hio, 0, status, 0, io);
 375                        rc = 0;
 376                }
 377
 378                break;
 379        }
 380        default:
 381                scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
 382                rc = -EIO;
 383                break;
 384        }
 385        return rc;
 386}
 387
 388static struct efct_io *
 389efct_scsi_dispatch_pending(struct efct *efct)
 390{
 391        struct efct_xport *xport = efct->xport;
 392        struct efct_io *io = NULL;
 393        struct efct_hw_io *hio;
 394        unsigned long flags = 0;
 395        int status;
 396
 397        spin_lock_irqsave(&xport->io_pending_lock, flags);
 398
 399        if (!list_empty(&xport->io_pending_list)) {
 400                io = list_first_entry(&xport->io_pending_list, struct efct_io,
 401                                      io_pending_link);
 402                list_del_init(&io->io_pending_link);
 403        }
 404
 405        if (!io) {
 406                spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 407                return NULL;
 408        }
 409
 410        if (io->io_type == EFCT_IO_TYPE_ABORT) {
 411                hio = NULL;
 412        } else {
 413                hio = efct_hw_io_alloc(&efct->hw);
 414                if (!hio) {
 415                        /*
 416                         * No HW IO available.Put IO back on
 417                         * the front of pending list
 418                         */
 419                        list_add(&xport->io_pending_list, &io->io_pending_link);
 420                        io = NULL;
 421                } else {
 422                        hio->eq = io->hw_priv;
 423                }
 424        }
 425
 426        /* Must drop the lock before dispatching the IO */
 427        spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 428
 429        if (!io)
 430                return NULL;
 431
 432        /*
 433         * We pulled an IO off the pending list,
 434         * and either got an HW IO or don't need one
 435         */
 436        atomic_sub_return(1, &xport->io_pending_count);
 437        if (!hio)
 438                status = efct_scsi_io_dispatch_no_hw_io(io);
 439        else
 440                status = efct_scsi_io_dispatch_hw_io(io, hio);
 441        if (status) {
 442                /*
 443                 * Invoke the HW callback, but do so in the
 444                 * separate execution context,provided by the
 445                 * NOP mailbox completion processing context
 446                 * by using efct_hw_async_call()
 447                 */
 448                if (efct_hw_async_call(&efct->hw,
 449                                       efct_scsi_check_pending_async_cb, io)) {
 450                        efc_log_debug(efct, "call hw async failed\n");
 451                }
 452        }
 453
 454        return io;
 455}
 456
 457void
 458efct_scsi_check_pending(struct efct *efct)
 459{
 460        struct efct_xport *xport = efct->xport;
 461        struct efct_io *io = NULL;
 462        int count = 0;
 463        unsigned long flags = 0;
 464        int dispatch = 0;
 465
 466        /* Guard against recursion */
 467        if (atomic_add_return(1, &xport->io_pending_recursing)) {
 468                /* This function is already running.  Decrement and return. */
 469                atomic_sub_return(1, &xport->io_pending_recursing);
 470                return;
 471        }
 472
 473        while (efct_scsi_dispatch_pending(efct))
 474                count++;
 475
 476        if (count) {
 477                atomic_sub_return(1, &xport->io_pending_recursing);
 478                return;
 479        }
 480
 481        /*
 482         * If nothing was removed from the list,
 483         * we might be in a case where we need to abort an
 484         * active IO and the abort is on the pending list.
 485         * Look for an abort we can dispatch.
 486         */
 487
 488        spin_lock_irqsave(&xport->io_pending_lock, flags);
 489
 490        list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
 491                if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
 492                        /* This IO has a HW IO, so it is
 493                         * active.  Dispatch the abort.
 494                         */
 495                        dispatch = 1;
 496                        list_del_init(&io->io_pending_link);
 497                        atomic_sub_return(1, &xport->io_pending_count);
 498                        break;
 499                }
 500        }
 501
 502        spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 503
 504        if (dispatch) {
 505                if (efct_scsi_io_dispatch_no_hw_io(io)) {
 506                        if (efct_hw_async_call(&efct->hw,
 507                                efct_scsi_check_pending_async_cb, io)) {
 508                                efc_log_debug(efct, "hw async failed\n");
 509                        }
 510                }
 511        }
 512
 513        atomic_sub_return(1, &xport->io_pending_recursing);
 514}
 515
 516int
 517efct_scsi_io_dispatch(struct efct_io *io, void *cb)
 518{
 519        struct efct_hw_io *hio;
 520        struct efct *efct = io->efct;
 521        struct efct_xport *xport = efct->xport;
 522        unsigned long flags = 0;
 523
 524        io->hw_cb = cb;
 525
 526        /*
 527         * if this IO already has a HW IO, then this is either
 528         * not the first phase of the IO. Send it to the HW.
 529         */
 530        if (io->hio)
 531                return efct_scsi_io_dispatch_hw_io(io, io->hio);
 532
 533        /*
 534         * We don't already have a HW IO associated with the IO. First check
 535         * the pending list. If not empty, add IO to the tail and process the
 536         * pending list.
 537         */
 538        spin_lock_irqsave(&xport->io_pending_lock, flags);
 539        if (!list_empty(&xport->io_pending_list)) {
 540                /*
 541                 * If this is a low latency request,
 542                 * the put at the front of the IO pending
 543                 * queue, otherwise put it at the end of the queue.
 544                 */
 545                if (io->low_latency) {
 546                        INIT_LIST_HEAD(&io->io_pending_link);
 547                        list_add(&xport->io_pending_list, &io->io_pending_link);
 548                } else {
 549                        INIT_LIST_HEAD(&io->io_pending_link);
 550                        list_add_tail(&io->io_pending_link,
 551                                      &xport->io_pending_list);
 552                }
 553                spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 554                atomic_add_return(1, &xport->io_pending_count);
 555                atomic_add_return(1, &xport->io_total_pending);
 556
 557                /* process pending list */
 558                efct_scsi_check_pending(efct);
 559                return 0;
 560        }
 561        spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 562
 563        /*
 564         * We don't have a HW IO associated with the IO and there's nothing
 565         * on the pending list. Attempt to allocate a HW IO and dispatch it.
 566         */
 567        hio = efct_hw_io_alloc(&io->efct->hw);
 568        if (!hio) {
 569                /* Couldn't get a HW IO. Save this IO on the pending list */
 570                spin_lock_irqsave(&xport->io_pending_lock, flags);
 571                INIT_LIST_HEAD(&io->io_pending_link);
 572                list_add_tail(&io->io_pending_link, &xport->io_pending_list);
 573                spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 574
 575                atomic_add_return(1, &xport->io_total_pending);
 576                atomic_add_return(1, &xport->io_pending_count);
 577                return 0;
 578        }
 579
 580        /* We successfully allocated a HW IO; dispatch to HW */
 581        return efct_scsi_io_dispatch_hw_io(io, hio);
 582}
 583
 584int
 585efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
 586{
 587        struct efct *efct = io->efct;
 588        struct efct_xport *xport = efct->xport;
 589        unsigned long flags = 0;
 590
 591        io->hw_cb = cb;
 592
 593        /*
 594         * For aborts, we don't need a HW IO, but we still want
 595         * to pass through the pending list to preserve ordering.
 596         * Thus, if the pending list is not empty, add this abort
 597         * to the pending list and process the pending list.
 598         */
 599        spin_lock_irqsave(&xport->io_pending_lock, flags);
 600        if (!list_empty(&xport->io_pending_list)) {
 601                INIT_LIST_HEAD(&io->io_pending_link);
 602                list_add_tail(&io->io_pending_link, &xport->io_pending_list);
 603                spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 604                atomic_add_return(1, &xport->io_pending_count);
 605                atomic_add_return(1, &xport->io_total_pending);
 606
 607                /* process pending list */
 608                efct_scsi_check_pending(efct);
 609                return 0;
 610        }
 611        spin_unlock_irqrestore(&xport->io_pending_lock, flags);
 612
 613        /* nothing on pending list, dispatch abort */
 614        return efct_scsi_io_dispatch_no_hw_io(io);
 615}
 616
 617static inline int
 618efct_scsi_xfer_data(struct efct_io *io, u32 flags,
 619                    struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
 620                    enum efct_hw_io_type type, int enable_ar,
 621                    efct_scsi_io_cb_t cb, void *arg)
 622{
 623        struct efct *efct;
 624        size_t residual = 0;
 625
 626        io->sgl_count = sgl_count;
 627
 628        efct = io->efct;
 629
 630        scsi_io_trace(io, "%s wire_len %llu\n",
 631                      (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
 632                      xwire_len);
 633
 634        io->hio_type = type;
 635
 636        io->scsi_tgt_cb = cb;
 637        io->scsi_tgt_cb_arg = arg;
 638
 639        residual = io->exp_xfer_len - io->transferred;
 640        io->wire_len = (xwire_len < residual) ? xwire_len : residual;
 641        residual = (xwire_len - io->wire_len);
 642
 643        memset(&io->iparam, 0, sizeof(io->iparam));
 644        io->iparam.fcp_tgt.ox_id = io->init_task_tag;
 645        io->iparam.fcp_tgt.offset = io->transferred;
 646        io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
 647        io->iparam.fcp_tgt.timeout = io->timeout;
 648
 649        /* if this is the last data phase and there is no residual, enable
 650         * auto-good-response
 651         */
 652        if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
 653            ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
 654            (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
 655                io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
 656                io->auto_resp = true;
 657        } else {
 658                io->auto_resp = false;
 659        }
 660
 661        /* save this transfer length */
 662        io->xfer_req = io->wire_len;
 663
 664        /* Adjust the transferred count to account for overrun
 665         * when the residual is calculated in efct_scsi_send_resp
 666         */
 667        io->transferred += residual;
 668
 669        /* Adjust the SGL size if there is overrun */
 670
 671        if (residual) {
 672                struct efct_scsi_sgl  *sgl_ptr = &io->sgl[sgl_count - 1];
 673
 674                while (residual) {
 675                        size_t len = sgl_ptr->len;
 676
 677                        if (len > residual) {
 678                                sgl_ptr->len = len - residual;
 679                                residual = 0;
 680                        } else {
 681                                sgl_ptr->len = 0;
 682                                residual -= len;
 683                                io->sgl_count--;
 684                        }
 685                        sgl_ptr--;
 686                }
 687        }
 688
 689        /* Set latency and WQ steering */
 690        io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
 691        io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
 692                                EFCT_SCSI_WQ_STEERING_SHIFT;
 693        io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
 694                                EFCT_SCSI_WQ_CLASS_SHIFT;
 695
 696        if (efct->xport) {
 697                struct efct_xport *xport = efct->xport;
 698
 699                if (type == EFCT_HW_IO_TARGET_READ) {
 700                        xport->fcp_stats.input_requests++;
 701                        xport->fcp_stats.input_bytes += xwire_len;
 702                } else if (type == EFCT_HW_IO_TARGET_WRITE) {
 703                        xport->fcp_stats.output_requests++;
 704                        xport->fcp_stats.output_bytes += xwire_len;
 705                }
 706        }
 707        return efct_scsi_io_dispatch(io, efct_target_io_cb);
 708}
 709
 710int
 711efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
 712                       struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
 713                       efct_scsi_io_cb_t cb, void *arg)
 714{
 715        return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
 716                                   len, EFCT_HW_IO_TARGET_READ,
 717                                   enable_tsend_auto_resp(io->efct), cb, arg);
 718}
 719
 720int
 721efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
 722                       struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
 723                       efct_scsi_io_cb_t cb, void *arg)
 724{
 725        return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
 726                                   EFCT_HW_IO_TARGET_WRITE,
 727                                   enable_treceive_auto_resp(io->efct), cb, arg);
 728}
 729
 730int
 731efct_scsi_send_resp(struct efct_io *io, u32 flags,
 732                    struct efct_scsi_cmd_resp *rsp,
 733                    efct_scsi_io_cb_t cb, void *arg)
 734{
 735        struct efct *efct;
 736        int residual;
 737        /* Always try auto resp */
 738        bool auto_resp = true;
 739        u8 scsi_status = 0;
 740        u16 scsi_status_qualifier = 0;
 741        u8 *sense_data = NULL;
 742        u32 sense_data_length = 0;
 743
 744        efct = io->efct;
 745
 746        if (rsp) {
 747                scsi_status = rsp->scsi_status;
 748                scsi_status_qualifier = rsp->scsi_status_qualifier;
 749                sense_data = rsp->sense_data;
 750                sense_data_length = rsp->sense_data_length;
 751                residual = rsp->residual;
 752        } else {
 753                residual = io->exp_xfer_len - io->transferred;
 754        }
 755
 756        io->wire_len = 0;
 757        io->hio_type = EFCT_HW_IO_TARGET_RSP;
 758
 759        io->scsi_tgt_cb = cb;
 760        io->scsi_tgt_cb_arg = arg;
 761
 762        memset(&io->iparam, 0, sizeof(io->iparam));
 763        io->iparam.fcp_tgt.ox_id = io->init_task_tag;
 764        io->iparam.fcp_tgt.offset = 0;
 765        io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
 766        io->iparam.fcp_tgt.timeout = io->timeout;
 767
 768        /* Set low latency queueing request */
 769        io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
 770        io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
 771                                EFCT_SCSI_WQ_STEERING_SHIFT;
 772        io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
 773                                EFCT_SCSI_WQ_CLASS_SHIFT;
 774
 775        if (scsi_status != 0 || residual || sense_data_length) {
 776                struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
 777                u8 *sns_data;
 778
 779                if (!fcprsp) {
 780                        efc_log_err(efct, "NULL response buffer\n");
 781                        return -EIO;
 782                }
 783
 784                sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
 785
 786                auto_resp = false;
 787
 788                memset(fcprsp, 0, sizeof(*fcprsp));
 789
 790                io->wire_len += sizeof(*fcprsp);
 791
 792                fcprsp->resp.fr_status = scsi_status;
 793                fcprsp->resp.fr_retry_delay =
 794                        cpu_to_be16(scsi_status_qualifier);
 795
 796                /* set residual status if necessary */
 797                if (residual != 0) {
 798                        /* FCP: if data transferred is less than the
 799                         * amount expected, then this is an underflow.
 800                         * If data transferred would have been greater
 801                         * than the amount expected this is an overflow
 802                         */
 803                        if (residual > 0) {
 804                                fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
 805                                fcprsp->ext.fr_resid =  cpu_to_be32(residual);
 806                        } else {
 807                                fcprsp->resp.fr_flags |= FCP_RESID_OVER;
 808                                fcprsp->ext.fr_resid = cpu_to_be32(-residual);
 809                        }
 810                }
 811
 812                if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
 813                        if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
 814                                efc_log_err(efct, "Sense exceeds max size.\n");
 815                                return -EIO;
 816                        }
 817
 818                        fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
 819                        memcpy(sns_data, sense_data, sense_data_length);
 820                        fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
 821                        io->wire_len += sense_data_length;
 822                }
 823
 824                io->sgl[0].addr = io->rspbuf.phys;
 825                io->sgl[0].dif_addr = 0;
 826                io->sgl[0].len = io->wire_len;
 827                io->sgl_count = 1;
 828        }
 829
 830        if (auto_resp)
 831                io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
 832
 833        return efct_scsi_io_dispatch(io, efct_target_io_cb);
 834}
 835
 836static int
 837efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
 838                        u32 ext_status, void *app)
 839{
 840        struct efct_io *io = app;
 841        struct efct *efct;
 842        enum efct_scsi_io_status bls_status;
 843
 844        efct = io->efct;
 845
 846        /* BLS isn't really a "SCSI" concept, but use SCSI status */
 847        if (status) {
 848                io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
 849                bls_status = EFCT_SCSI_STATUS_ERROR;
 850        } else {
 851                bls_status = EFCT_SCSI_STATUS_GOOD;
 852        }
 853
 854        if (io->bls_cb) {
 855                efct_scsi_io_cb_t bls_cb = io->bls_cb;
 856                void *bls_cb_arg = io->bls_cb_arg;
 857
 858                io->bls_cb = NULL;
 859                io->bls_cb_arg = NULL;
 860
 861                /* invoke callback */
 862                bls_cb(io, bls_status, 0, bls_cb_arg);
 863        }
 864
 865        efct_scsi_check_pending(efct);
 866        return 0;
 867}
 868
 869static int
 870efct_target_send_bls_resp(struct efct_io *io,
 871                          efct_scsi_io_cb_t cb, void *arg)
 872{
 873        struct efct_node *node = io->node;
 874        struct sli_bls_params *bls = &io->iparam.bls;
 875        struct efct *efct = node->efct;
 876        struct fc_ba_acc *acc;
 877        int rc;
 878
 879        /* fill out IO structure with everything needed to send BA_ACC */
 880        memset(&io->iparam, 0, sizeof(io->iparam));
 881        bls->ox_id = io->init_task_tag;
 882        bls->rx_id = io->abort_rx_id;
 883        bls->vpi = io->node->vpi;
 884        bls->rpi = io->node->rpi;
 885        bls->s_id = U32_MAX;
 886        bls->d_id = io->node->node_fc_id;
 887        bls->rpi_registered = true;
 888
 889        acc = (void *)bls->payload;
 890        acc->ba_ox_id = cpu_to_be16(bls->ox_id);
 891        acc->ba_rx_id = cpu_to_be16(bls->rx_id);
 892        acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
 893
 894        /* generic io fields have already been populated */
 895
 896        /* set type and BLS-specific fields */
 897        io->io_type = EFCT_IO_TYPE_BLS_RESP;
 898        io->display_name = "bls_rsp";
 899        io->hio_type = EFCT_HW_BLS_ACC;
 900        io->bls_cb = cb;
 901        io->bls_cb_arg = arg;
 902
 903        /* dispatch IO */
 904        rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
 905                              efct_target_bls_resp_cb, io);
 906        return rc;
 907}
 908
 909static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
 910                                u32 ext_status, void *app)
 911{
 912        struct efct_io *io = app;
 913
 914        efct_scsi_io_free(io);
 915        return 0;
 916}
 917
 918struct efct_io *
 919efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
 920{
 921        struct efct_node *node = io->node;
 922        struct sli_bls_params *bls = &io->iparam.bls;
 923        struct efct *efct = node->efct;
 924        struct fc_ba_rjt *acc;
 925        int rc;
 926
 927        /* fill out BLS Response-specific fields */
 928        io->io_type = EFCT_IO_TYPE_BLS_RESP;
 929        io->display_name = "ba_rjt";
 930        io->hio_type = EFCT_HW_BLS_RJT;
 931        io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
 932
 933        /* fill out iparam fields */
 934        memset(&io->iparam, 0, sizeof(io->iparam));
 935        bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
 936        bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
 937        bls->vpi = io->node->vpi;
 938        bls->rpi = io->node->rpi;
 939        bls->s_id = U32_MAX;
 940        bls->d_id = io->node->node_fc_id;
 941        bls->rpi_registered = true;
 942
 943        acc = (void *)bls->payload;
 944        acc->br_reason = ELS_RJT_UNAB;
 945        acc->br_explan = ELS_EXPL_NONE;
 946
 947        rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
 948                              io);
 949        if (rc) {
 950                efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
 951                efct_scsi_io_free(io);
 952                io = NULL;
 953        }
 954        return io;
 955}
 956
 957int
 958efct_scsi_send_tmf_resp(struct efct_io *io,
 959                        enum efct_scsi_tmf_resp rspcode,
 960                        u8 addl_rsp_info[3],
 961                        efct_scsi_io_cb_t cb, void *arg)
 962{
 963        int rc;
 964        struct {
 965                struct fcp_resp_with_ext rsp_ext;
 966                struct fcp_resp_rsp_info info;
 967        } *fcprsp;
 968        u8 fcp_rspcode;
 969
 970        io->wire_len = 0;
 971
 972        switch (rspcode) {
 973        case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
 974                fcp_rspcode = FCP_TMF_CMPL;
 975                break;
 976        case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
 977        case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
 978                fcp_rspcode = FCP_TMF_CMPL;
 979                break;
 980        case EFCT_SCSI_TMF_FUNCTION_REJECTED:
 981                fcp_rspcode = FCP_TMF_REJECTED;
 982                break;
 983        case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
 984                fcp_rspcode = FCP_TMF_INVALID_LUN;
 985                break;
 986        case EFCT_SCSI_TMF_SERVICE_DELIVERY:
 987                fcp_rspcode = FCP_TMF_FAILED;
 988                break;
 989        default:
 990                fcp_rspcode = FCP_TMF_REJECTED;
 991                break;
 992        }
 993
 994        io->hio_type = EFCT_HW_IO_TARGET_RSP;
 995
 996        io->scsi_tgt_cb = cb;
 997        io->scsi_tgt_cb_arg = arg;
 998
 999        if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
1000                rc = efct_target_send_bls_resp(io, cb, arg);
1001                return rc;
1002        }
1003
1004        /* populate the FCP TMF response */
1005        fcprsp = io->rspbuf.virt;
1006        memset(fcprsp, 0, sizeof(*fcprsp));
1007
1008        fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
1009
1010        if (addl_rsp_info) {
1011                memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
1012                       sizeof(fcprsp->info._fr_resvd));
1013        }
1014        fcprsp->info.rsp_code = fcp_rspcode;
1015
1016        io->wire_len = sizeof(*fcprsp);
1017
1018        fcprsp->rsp_ext.ext.fr_rsp_len =
1019                        cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
1020
1021        io->sgl[0].addr = io->rspbuf.phys;
1022        io->sgl[0].dif_addr = 0;
1023        io->sgl[0].len = io->wire_len;
1024        io->sgl_count = 1;
1025
1026        memset(&io->iparam, 0, sizeof(io->iparam));
1027        io->iparam.fcp_tgt.ox_id = io->init_task_tag;
1028        io->iparam.fcp_tgt.offset = 0;
1029        io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
1030        io->iparam.fcp_tgt.timeout = io->timeout;
1031
1032        rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
1033
1034        return rc;
1035}
1036
1037static int
1038efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
1039                     u32 ext_status, void *app)
1040{
1041        struct efct_io *io = app;
1042        struct efct *efct;
1043        enum efct_scsi_io_status scsi_status;
1044        efct_scsi_io_cb_t abort_cb;
1045        void *abort_cb_arg;
1046
1047        efct = io->efct;
1048
1049        if (!io->abort_cb)
1050                goto done;
1051
1052        abort_cb = io->abort_cb;
1053        abort_cb_arg = io->abort_cb_arg;
1054
1055        io->abort_cb = NULL;
1056        io->abort_cb_arg = NULL;
1057
1058        switch (status) {
1059        case SLI4_FC_WCQE_STATUS_SUCCESS:
1060                scsi_status = EFCT_SCSI_STATUS_GOOD;
1061                break;
1062        case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
1063                switch (ext_status) {
1064                case SLI4_FC_LOCAL_REJECT_NO_XRI:
1065                        scsi_status = EFCT_SCSI_STATUS_NO_IO;
1066                        break;
1067                case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
1068                        scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
1069                        break;
1070                default:
1071                        /*we have seen 0x15 (abort in progress)*/
1072                        scsi_status = EFCT_SCSI_STATUS_ERROR;
1073                        break;
1074                }
1075                break;
1076        case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
1077                scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
1078                break;
1079        default:
1080                scsi_status = EFCT_SCSI_STATUS_ERROR;
1081                break;
1082        }
1083        /* invoke callback */
1084        abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
1085
1086done:
1087        /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
1088        kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
1089
1090        efct_io_pool_io_free(efct->xport->io_pool, io);
1091
1092        efct_scsi_check_pending(efct);
1093        return 0;
1094}
1095
1096int
1097efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
1098{
1099        struct efct *efct;
1100        struct efct_xport *xport;
1101        int rc;
1102        struct efct_io *abort_io = NULL;
1103
1104        efct = io->efct;
1105        xport = efct->xport;
1106
1107        /* take a reference on IO being aborted */
1108        if (kref_get_unless_zero(&io->ref) == 0) {
1109                /* command no longer active */
1110                scsi_io_printf(io, "command no longer active\n");
1111                return -EIO;
1112        }
1113
1114        /*
1115         * allocate a new IO to send the abort request. Use efct_io_alloc()
1116         * directly, as we need an IO object that will not fail allocation
1117         * due to allocations being disabled (in efct_scsi_io_alloc())
1118         */
1119        abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
1120        if (!abort_io) {
1121                atomic_add_return(1, &xport->io_alloc_failed_count);
1122                kref_put(&io->ref, io->release);
1123                return -EIO;
1124        }
1125
1126        /* Save the target server callback and argument */
1127        /* set generic fields */
1128        abort_io->cmd_tgt = true;
1129        abort_io->node = io->node;
1130
1131        /* set type and abort-specific fields */
1132        abort_io->io_type = EFCT_IO_TYPE_ABORT;
1133        abort_io->display_name = "tgt_abort";
1134        abort_io->io_to_abort = io;
1135        abort_io->send_abts = false;
1136        abort_io->abort_cb = cb;
1137        abort_io->abort_cb_arg = arg;
1138
1139        /* now dispatch IO */
1140        rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
1141        if (rc)
1142                kref_put(&io->ref, io->release);
1143        return rc;
1144}
1145
1146void
1147efct_scsi_io_complete(struct efct_io *io)
1148{
1149        if (io->io_free) {
1150                efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
1151                              io->tag);
1152                return;
1153        }
1154
1155        scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
1156        kref_put(&io->ref, io->release);
1157}
1158