linux/drivers/scsi/elx/libefc/efc_node.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
   4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
   5 */
   6
   7#include "efc.h"
   8
   9int
  10efc_remote_node_cb(void *arg, int event, void *data)
  11{
  12        struct efc *efc = arg;
  13        struct efc_remote_node *rnode = data;
  14        struct efc_node *node = rnode->node;
  15        unsigned long flags = 0;
  16
  17        spin_lock_irqsave(&efc->lock, flags);
  18        efc_node_post_event(node, event, NULL);
  19        spin_unlock_irqrestore(&efc->lock, flags);
  20
  21        return 0;
  22}
  23
  24struct efc_node *
  25efc_node_find(struct efc_nport *nport, u32 port_id)
  26{
  27        /* Find an FC node structure given the FC port ID */
  28        return xa_load(&nport->lookup, port_id);
  29}
  30
  31static void
  32_efc_node_free(struct kref *arg)
  33{
  34        struct efc_node *node = container_of(arg, struct efc_node, ref);
  35        struct efc *efc = node->efc;
  36        struct efc_dma *dma;
  37
  38        dma = &node->sparm_dma_buf;
  39        dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
  40        memset(dma, 0, sizeof(struct efc_dma));
  41        mempool_free(node, efc->node_pool);
  42}
  43
  44struct efc_node *efc_node_alloc(struct efc_nport *nport,
  45                                u32 port_id, bool init, bool targ)
  46{
  47        int rc;
  48        struct efc_node *node = NULL;
  49        struct efc *efc = nport->efc;
  50        struct efc_dma *dma;
  51
  52        if (nport->shutting_down) {
  53                efc_log_debug(efc, "node allocation when shutting down %06x",
  54                              port_id);
  55                return NULL;
  56        }
  57
  58        node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
  59        if (!node) {
  60                efc_log_err(efc, "node allocation failed %06x", port_id);
  61                return NULL;
  62        }
  63        memset(node, 0, sizeof(*node));
  64
  65        dma = &node->sparm_dma_buf;
  66        dma->size = NODE_SPARAMS_SIZE;
  67        dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys);
  68        if (!dma->virt) {
  69                efc_log_err(efc, "node dma alloc failed\n");
  70                goto dma_fail;
  71        }
  72        node->rnode.indicator = U32_MAX;
  73        node->nport = nport;
  74
  75        node->efc = efc;
  76        node->init = init;
  77        node->targ = targ;
  78
  79        spin_lock_init(&node->pend_frames_lock);
  80        INIT_LIST_HEAD(&node->pend_frames);
  81        spin_lock_init(&node->els_ios_lock);
  82        INIT_LIST_HEAD(&node->els_ios_list);
  83        node->els_io_enabled = true;
  84
  85        rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
  86        if (rc) {
  87                efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc);
  88                goto hw_alloc_fail;
  89        }
  90
  91        node->rnode.node = node;
  92        node->sm.app = node;
  93        node->evtdepth = 0;
  94
  95        efc_node_update_display_name(node);
  96
  97        rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
  98        if (rc) {
  99                efc_log_err(efc, "Node lookup store failed: %d\n", rc);
 100                goto xa_fail;
 101        }
 102
 103        /* initialize refcount */
 104        kref_init(&node->ref);
 105        node->release = _efc_node_free;
 106        kref_get(&nport->ref);
 107
 108        return node;
 109
 110xa_fail:
 111        efc_node_free_resources(efc, &node->rnode);
 112hw_alloc_fail:
 113        dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
 114dma_fail:
 115        mempool_free(node, efc->node_pool);
 116        return NULL;
 117}
 118
 119void
 120efc_node_free(struct efc_node *node)
 121{
 122        struct efc_nport *nport;
 123        struct efc *efc;
 124        int rc = 0;
 125        struct efc_node *ns = NULL;
 126
 127        nport = node->nport;
 128        efc = node->efc;
 129
 130        node_printf(node, "Free'd\n");
 131
 132        if (node->refound) {
 133                /*
 134                 * Save the name server node. We will send fake RSCN event at
 135                 * the end to handle ignored RSCN event during node deletion
 136                 */
 137                ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
 138        }
 139
 140        if (!node->nport) {
 141                efc_log_err(efc, "Node already Freed\n");
 142                return;
 143        }
 144
 145        /* Free HW resources */
 146        rc = efc_node_free_resources(efc, &node->rnode);
 147        if (rc < 0)
 148                efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc);
 149
 150        /* if the gidpt_delay_timer is still running, then delete it */
 151        if (timer_pending(&node->gidpt_delay_timer))
 152                del_timer(&node->gidpt_delay_timer);
 153
 154        xa_erase(&nport->lookup, node->rnode.fc_id);
 155
 156        /*
 157         * If the node_list is empty,
 158         * then post a ALL_CHILD_NODES_FREE event to the nport,
 159         * after the lock is released.
 160         * The nport may be free'd as a result of the event.
 161         */
 162        if (xa_empty(&nport->lookup))
 163                efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE,
 164                                  NULL);
 165
 166        node->nport = NULL;
 167        node->sm.current_state = NULL;
 168
 169        kref_put(&nport->ref, nport->release);
 170        kref_put(&node->ref, node->release);
 171
 172        if (ns) {
 173                /* sending fake RSCN event to name server node */
 174                efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL);
 175        }
 176}
 177
 178static void
 179efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length)
 180{
 181        if (!dma || !buffer || !buffer_length)
 182                return;
 183
 184        if (buffer_length > dma->size)
 185                buffer_length = dma->size;
 186
 187        memcpy(dma->virt, buffer, buffer_length);
 188        dma->len = buffer_length;
 189}
 190
 191int
 192efc_node_attach(struct efc_node *node)
 193{
 194        int rc = 0;
 195        struct efc_nport *nport = node->nport;
 196        struct efc_domain *domain = nport->domain;
 197        struct efc *efc = node->efc;
 198
 199        if (!domain->attached) {
 200                efc_log_err(efc, "Warning: unattached domain\n");
 201                return -EIO;
 202        }
 203        /* Update node->wwpn/wwnn */
 204
 205        efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
 206                                efc_node_get_wwpn(node));
 207        efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
 208                                efc_node_get_wwnn(node));
 209
 210        efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
 211                        sizeof(node->service_params) - 4);
 212
 213        /* take lock to protect node->rnode.attached */
 214        rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
 215        if (rc < 0)
 216                efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc);
 217
 218        return rc;
 219}
 220
 221void
 222efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length)
 223{
 224        switch (fc_id) {
 225        case FC_FID_FLOGI:
 226                snprintf(buffer, buffer_length, "fabric");
 227                break;
 228        case FC_FID_FCTRL:
 229                snprintf(buffer, buffer_length, "fabctl");
 230                break;
 231        case FC_FID_DIR_SERV:
 232                snprintf(buffer, buffer_length, "nserve");
 233                break;
 234        default:
 235                if (fc_id == FC_FID_DOM_MGR) {
 236                        snprintf(buffer, buffer_length, "dctl%02x",
 237                                 (fc_id & 0x0000ff));
 238                } else {
 239                        snprintf(buffer, buffer_length, "%06x", fc_id);
 240                }
 241                break;
 242        }
 243}
 244
 245void
 246efc_node_update_display_name(struct efc_node *node)
 247{
 248        u32 port_id = node->rnode.fc_id;
 249        struct efc_nport *nport = node->nport;
 250        char portid_display[16];
 251
 252        efc_node_fcid_display(port_id, portid_display, sizeof(portid_display));
 253
 254        snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
 255                 nport->display_name, portid_display);
 256}
 257
 258void
 259efc_node_send_ls_io_cleanup(struct efc_node *node)
 260{
 261        if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
 262                efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
 263                              node->display_name, node->ls_acc_oxid);
 264
 265                node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
 266                node->ls_acc_io = NULL;
 267        }
 268}
 269
 270static void efc_node_handle_implicit_logo(struct efc_node *node)
 271{
 272        int rc;
 273
 274        /*
 275         * currently, only case for implicit logo is PLOGI
 276         * recvd. Thus, node's ELS IO pending list won't be
 277         * empty (PLOGI will be on it)
 278         */
 279        WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
 280        node_printf(node, "Reason: implicit logout, re-authenticate\n");
 281
 282        /* Re-attach node with the same HW node resources */
 283        node->req_free = false;
 284        rc = efc_node_attach(node);
 285        efc_node_transition(node, __efc_d_wait_node_attach, NULL);
 286        node->els_io_enabled = true;
 287
 288        if (rc < 0)
 289                efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
 290}
 291
 292static void efc_node_handle_explicit_logo(struct efc_node *node)
 293{
 294        s8 pend_frames_empty;
 295        unsigned long flags = 0;
 296
 297        /* cleanup any pending LS_ACC ELSs */
 298        efc_node_send_ls_io_cleanup(node);
 299
 300        spin_lock_irqsave(&node->pend_frames_lock, flags);
 301        pend_frames_empty = list_empty(&node->pend_frames);
 302        spin_unlock_irqrestore(&node->pend_frames_lock, flags);
 303
 304        /*
 305         * there are two scenarios where we want to keep
 306         * this node alive:
 307         * 1. there are pending frames that need to be
 308         *    processed or
 309         * 2. we're an initiator and the remote node is
 310         *    a target and we need to re-authenticate
 311         */
 312        node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
 313        node_printf(node, "nport.ini=%d node.tgt=%d\n",
 314                    node->nport->enable_ini, node->targ);
 315        if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
 316                u8 send_plogi = false;
 317
 318                if (node->nport->enable_ini && node->targ) {
 319                        /*
 320                         * we're an initiator and
 321                         * node shutting down is a target;
 322                         * we'll need to re-authenticate in
 323                         * initial state
 324                         */
 325                        send_plogi = true;
 326                }
 327
 328                /*
 329                 * transition to __efc_d_init
 330                 * (will retain HW node resources)
 331                 */
 332                node->els_io_enabled = true;
 333                node->req_free = false;
 334
 335                /*
 336                 * either pending frames exist or we are re-authenticating
 337                 * with PLOGI (or both); in either case, return to initial
 338                 * state
 339                 */
 340                efc_node_init_device(node, send_plogi);
 341        }
 342        /* else: let node shutdown occur */
 343}
 344
 345static void
 346efc_node_purge_pending(struct efc_node *node)
 347{
 348        struct efc *efc = node->efc;
 349        struct efc_hw_sequence *frame, *next;
 350        unsigned long flags = 0;
 351
 352        spin_lock_irqsave(&node->pend_frames_lock, flags);
 353
 354        list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
 355                list_del(&frame->list_entry);
 356                efc->tt.hw_seq_free(efc, frame);
 357        }
 358
 359        spin_unlock_irqrestore(&node->pend_frames_lock, flags);
 360}
 361
 362void
 363__efc_node_shutdown(struct efc_sm_ctx *ctx,
 364                    enum efc_sm_event evt, void *arg)
 365{
 366        struct efc_node *node = ctx->app;
 367
 368        efc_node_evt_set(ctx, evt, __func__);
 369
 370        node_sm_trace();
 371
 372        switch (evt) {
 373        case EFC_EVT_ENTER: {
 374                efc_node_hold_frames(node);
 375                WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
 376                /* by default, we will be freeing node after we unwind */
 377                node->req_free = true;
 378
 379                switch (node->shutdown_reason) {
 380                case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO:
 381                        /* Node shutdown b/c of PLOGI received when node
 382                         * already logged in. We have PLOGI service
 383                         * parameters, so submit node attach; we won't be
 384                         * freeing this node
 385                         */
 386
 387                        efc_node_handle_implicit_logo(node);
 388                        break;
 389
 390                case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO:
 391                        efc_node_handle_explicit_logo(node);
 392                        break;
 393
 394                case EFC_NODE_SHUTDOWN_DEFAULT:
 395                default: {
 396                        /*
 397                         * shutdown due to link down,
 398                         * node going away (xport event) or
 399                         * nport shutdown, purge pending and
 400                         * proceed to cleanup node
 401                         */
 402
 403                        /* cleanup any pending LS_ACC ELSs */
 404                        efc_node_send_ls_io_cleanup(node);
 405
 406                        node_printf(node,
 407                                    "Shutdown reason: default, purge pending\n");
 408                        efc_node_purge_pending(node);
 409                        break;
 410                }
 411                }
 412
 413                break;
 414        }
 415        case EFC_EVT_EXIT:
 416                efc_node_accept_frames(node);
 417                break;
 418
 419        default:
 420                __efc_node_common(__func__, ctx, evt, arg);
 421        }
 422}
 423
 424static bool
 425efc_node_check_els_quiesced(struct efc_node *node)
 426{
 427        /* check to see if ELS requests, completions are quiesced */
 428        if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
 429            efc_els_io_list_empty(node, &node->els_ios_list)) {
 430                if (!node->attached) {
 431                        /* hw node detach already completed, proceed */
 432                        node_printf(node, "HW node not attached\n");
 433                        efc_node_transition(node,
 434                                            __efc_node_wait_ios_shutdown,
 435                                             NULL);
 436                } else {
 437                        /*
 438                         * hw node detach hasn't completed,
 439                         * transition and wait
 440                         */
 441                        node_printf(node, "HW node still attached\n");
 442                        efc_node_transition(node, __efc_node_wait_node_free,
 443                                            NULL);
 444                }
 445                return true;
 446        }
 447        return false;
 448}
 449
 450void
 451efc_node_initiate_cleanup(struct efc_node *node)
 452{
 453        /*
 454         * if ELS's have already been quiesced, will move to next state
 455         * if ELS's have not been quiesced, abort them
 456         */
 457        if (!efc_node_check_els_quiesced(node)) {
 458                efc_node_hold_frames(node);
 459                efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
 460        }
 461}
 462
 463void
 464__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
 465                             enum efc_sm_event evt, void *arg)
 466{
 467        bool check_quiesce = false;
 468        struct efc_node *node = ctx->app;
 469
 470        efc_node_evt_set(ctx, evt, __func__);
 471
 472        node_sm_trace();
 473        /* Node state machine: Wait for all ELSs to complete */
 474        switch (evt) {
 475        case EFC_EVT_ENTER:
 476                efc_node_hold_frames(node);
 477                if (efc_els_io_list_empty(node, &node->els_ios_list)) {
 478                        node_printf(node, "All ELS IOs complete\n");
 479                        check_quiesce = true;
 480                }
 481                break;
 482        case EFC_EVT_EXIT:
 483                efc_node_accept_frames(node);
 484                break;
 485
 486        case EFC_EVT_SRRS_ELS_REQ_OK:
 487        case EFC_EVT_SRRS_ELS_REQ_FAIL:
 488        case EFC_EVT_SRRS_ELS_REQ_RJT:
 489        case EFC_EVT_ELS_REQ_ABORTED:
 490                if (WARN_ON(!node->els_req_cnt))
 491                        break;
 492                node->els_req_cnt--;
 493                check_quiesce = true;
 494                break;
 495
 496        case EFC_EVT_SRRS_ELS_CMPL_OK:
 497        case EFC_EVT_SRRS_ELS_CMPL_FAIL:
 498                if (WARN_ON(!node->els_cmpl_cnt))
 499                        break;
 500                node->els_cmpl_cnt--;
 501                check_quiesce = true;
 502                break;
 503
 504        case EFC_EVT_ALL_CHILD_NODES_FREE:
 505                /* all ELS IO's complete */
 506                node_printf(node, "All ELS IOs complete\n");
 507                WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
 508                check_quiesce = true;
 509                break;
 510
 511        case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
 512                check_quiesce = true;
 513                break;
 514
 515        case EFC_EVT_DOMAIN_ATTACH_OK:
 516                /* don't care about domain_attach_ok */
 517                break;
 518
 519        /* ignore shutdown events as we're already in shutdown path */
 520        case EFC_EVT_SHUTDOWN:
 521                /* have default shutdown event take precedence */
 522                node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
 523                fallthrough;
 524
 525        case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
 526        case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
 527                node_printf(node, "%s received\n", efc_sm_event_name(evt));
 528                break;
 529
 530        default:
 531                __efc_node_common(__func__, ctx, evt, arg);
 532        }
 533
 534        if (check_quiesce)
 535                efc_node_check_els_quiesced(node);
 536}
 537
 538void
 539__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
 540                          enum efc_sm_event evt, void *arg)
 541{
 542        struct efc_node *node = ctx->app;
 543
 544        efc_node_evt_set(ctx, evt, __func__);
 545
 546        node_sm_trace();
 547
 548        switch (evt) {
 549        case EFC_EVT_ENTER:
 550                efc_node_hold_frames(node);
 551                break;
 552
 553        case EFC_EVT_EXIT:
 554                efc_node_accept_frames(node);
 555                break;
 556
 557        case EFC_EVT_NODE_FREE_OK:
 558                /* node is officially no longer attached */
 559                node->attached = false;
 560                efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
 561                break;
 562
 563        case EFC_EVT_ALL_CHILD_NODES_FREE:
 564        case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
 565                /* As IOs and ELS IO's complete we expect to get these events */
 566                break;
 567
 568        case EFC_EVT_DOMAIN_ATTACH_OK:
 569                /* don't care about domain_attach_ok */
 570                break;
 571
 572        /* ignore shutdown events as we're already in shutdown path */
 573        case EFC_EVT_SHUTDOWN:
 574                /* have default shutdown event take precedence */
 575                node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
 576                fallthrough;
 577
 578        case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
 579        case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
 580                node_printf(node, "%s received\n", efc_sm_event_name(evt));
 581                break;
 582        default:
 583                __efc_node_common(__func__, ctx, evt, arg);
 584        }
 585}
 586
 587void
 588__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
 589                             enum efc_sm_event evt, void *arg)
 590{
 591        struct efc_node *node = ctx->app;
 592        struct efc *efc = node->efc;
 593
 594        efc_node_evt_set(ctx, evt, __func__);
 595
 596        node_sm_trace();
 597
 598        switch (evt) {
 599        case EFC_EVT_ENTER:
 600                efc_node_hold_frames(node);
 601
 602                /* first check to see if no ELS IOs are outstanding */
 603                if (efc_els_io_list_empty(node, &node->els_ios_list))
 604                        /* If there are any active IOS, Free them. */
 605                        efc_node_transition(node, __efc_node_shutdown, NULL);
 606                break;
 607
 608        case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
 609        case EFC_EVT_ALL_CHILD_NODES_FREE:
 610                if (efc_els_io_list_empty(node, &node->els_ios_list))
 611                        efc_node_transition(node, __efc_node_shutdown, NULL);
 612                break;
 613
 614        case EFC_EVT_EXIT:
 615                efc_node_accept_frames(node);
 616                break;
 617
 618        case EFC_EVT_SRRS_ELS_REQ_FAIL:
 619                /* Can happen as ELS IO IO's complete */
 620                if (WARN_ON(!node->els_req_cnt))
 621                        break;
 622                node->els_req_cnt--;
 623                break;
 624
 625        /* ignore shutdown events as we're already in shutdown path */
 626        case EFC_EVT_SHUTDOWN:
 627                /* have default shutdown event take precedence */
 628                node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
 629                fallthrough;
 630
 631        case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
 632        case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
 633                efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
 634                              efc_sm_event_name(evt));
 635                break;
 636        case EFC_EVT_DOMAIN_ATTACH_OK:
 637                /* don't care about domain_attach_ok */
 638                break;
 639        default:
 640                __efc_node_common(__func__, ctx, evt, arg);
 641        }
 642}
 643
 644void
 645__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
 646                  enum efc_sm_event evt, void *arg)
 647{
 648        struct efc_node *node = NULL;
 649        struct efc *efc = NULL;
 650        struct efc_node_cb *cbdata = arg;
 651
 652        node = ctx->app;
 653        efc = node->efc;
 654
 655        switch (evt) {
 656        case EFC_EVT_ENTER:
 657        case EFC_EVT_REENTER:
 658        case EFC_EVT_EXIT:
 659        case EFC_EVT_NPORT_TOPOLOGY_NOTIFY:
 660        case EFC_EVT_NODE_MISSING:
 661        case EFC_EVT_FCP_CMD_RCVD:
 662                break;
 663
 664        case EFC_EVT_NODE_REFOUND:
 665                node->refound = true;
 666                break;
 667
 668        /*
 669         * node->attached must be set appropriately
 670         * for all node attach/detach events
 671         */
 672        case EFC_EVT_NODE_ATTACH_OK:
 673                node->attached = true;
 674                break;
 675
 676        case EFC_EVT_NODE_FREE_OK:
 677        case EFC_EVT_NODE_ATTACH_FAIL:
 678                node->attached = false;
 679                break;
 680
 681        /*
 682         * handle any ELS completions that
 683         * other states either didn't care about
 684         * or forgot about
 685         */
 686        case EFC_EVT_SRRS_ELS_CMPL_OK:
 687        case EFC_EVT_SRRS_ELS_CMPL_FAIL:
 688                if (WARN_ON(!node->els_cmpl_cnt))
 689                        break;
 690                node->els_cmpl_cnt--;
 691                break;
 692
 693        /*
 694         * handle any ELS request completions that
 695         * other states either didn't care about
 696         * or forgot about
 697         */
 698        case EFC_EVT_SRRS_ELS_REQ_OK:
 699        case EFC_EVT_SRRS_ELS_REQ_FAIL:
 700        case EFC_EVT_SRRS_ELS_REQ_RJT:
 701        case EFC_EVT_ELS_REQ_ABORTED:
 702                if (WARN_ON(!node->els_req_cnt))
 703                        break;
 704                node->els_req_cnt--;
 705                break;
 706
 707        case EFC_EVT_ELS_RCVD: {
 708                struct fc_frame_header *hdr = cbdata->header->dma.virt;
 709
 710                /*
 711                 * Unsupported ELS was received,
 712                 * send LS_RJT, command not supported
 713                 */
 714                efc_log_debug(efc,
 715                              "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
 716                              node->display_name, funcname,
 717                              ((u8 *)cbdata->payload->dma.virt)[0]);
 718
 719                efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
 720                                ELS_RJT_UNSUP, ELS_EXPL_NONE, 0);
 721                break;
 722        }
 723
 724        case EFC_EVT_PLOGI_RCVD:
 725        case EFC_EVT_FLOGI_RCVD:
 726        case EFC_EVT_LOGO_RCVD:
 727        case EFC_EVT_PRLI_RCVD:
 728        case EFC_EVT_PRLO_RCVD:
 729        case EFC_EVT_PDISC_RCVD:
 730        case EFC_EVT_FDISC_RCVD:
 731        case EFC_EVT_ADISC_RCVD:
 732        case EFC_EVT_RSCN_RCVD:
 733        case EFC_EVT_SCR_RCVD: {
 734                struct fc_frame_header *hdr = cbdata->header->dma.virt;
 735
 736                /* sm: / send ELS_RJT */
 737                efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n",
 738                              node->display_name, funcname,
 739                              efc_sm_event_name(evt));
 740                /* if we didn't catch this in a state, send generic LS_RJT */
 741                efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
 742                                ELS_RJT_UNAB, ELS_EXPL_NONE, 0);
 743                break;
 744        }
 745        case EFC_EVT_ABTS_RCVD: {
 746                efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n",
 747                              node->display_name, funcname,
 748                              efc_sm_event_name(evt));
 749
 750                /* sm: / send BA_ACC */
 751                efc_send_bls_acc(node, cbdata->header->dma.virt);
 752                break;
 753        }
 754
 755        default:
 756                efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
 757                              node->display_name, funcname,
 758                              efc_sm_event_name(evt));
 759        }
 760}
 761
 762void
 763efc_node_save_sparms(struct efc_node *node, void *payload)
 764{
 765        memcpy(node->service_params, payload, sizeof(node->service_params));
 766}
 767
 768void
 769efc_node_post_event(struct efc_node *node,
 770                    enum efc_sm_event evt, void *arg)
 771{
 772        bool free_node = false;
 773
 774        node->evtdepth++;
 775
 776        efc_sm_post_event(&node->sm, evt, arg);
 777
 778        /* If our event call depth is one and
 779         * we're not holding frames
 780         * then we can dispatch any pending frames.
 781         * We don't want to allow the efc_process_node_pending()
 782         * call to recurse.
 783         */
 784        if (!node->hold_frames && node->evtdepth == 1)
 785                efc_process_node_pending(node);
 786
 787        node->evtdepth--;
 788
 789        /*
 790         * Free the node object if so requested,
 791         * and we're at an event call depth of zero
 792         */
 793        if (node->evtdepth == 0 && node->req_free)
 794                free_node = true;
 795
 796        if (free_node)
 797                efc_node_free(node);
 798}
 799
 800void
 801efc_node_transition(struct efc_node *node,
 802                    void (*state)(struct efc_sm_ctx *,
 803                                  enum efc_sm_event, void *), void *data)
 804{
 805        struct efc_sm_ctx *ctx = &node->sm;
 806
 807        if (ctx->current_state == state) {
 808                efc_node_post_event(node, EFC_EVT_REENTER, data);
 809        } else {
 810                efc_node_post_event(node, EFC_EVT_EXIT, data);
 811                ctx->current_state = state;
 812                efc_node_post_event(node, EFC_EVT_ENTER, data);
 813        }
 814}
 815
 816void
 817efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name)
 818{
 819        memset(buf, 0, buf_len);
 820
 821        snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name);
 822}
 823
 824u64
 825efc_node_get_wwpn(struct efc_node *node)
 826{
 827        struct fc_els_flogi *sp =
 828                        (struct fc_els_flogi *)node->service_params;
 829
 830        return be64_to_cpu(sp->fl_wwpn);
 831}
 832
 833u64
 834efc_node_get_wwnn(struct efc_node *node)
 835{
 836        struct fc_els_flogi *sp =
 837                        (struct fc_els_flogi *)node->service_params;
 838
 839        return be64_to_cpu(sp->fl_wwnn);
 840}
 841
 842int
 843efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
 844                u8 cmd, void (*efc_node_common_func)(const char *,
 845                                struct efc_sm_ctx *, enum efc_sm_event, void *),
 846                const char *funcname)
 847{
 848        return 0;
 849}
 850
 851int
 852efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
 853                u16 cmd, void (*efc_node_common_func)(const char *,
 854                                struct efc_sm_ctx *, enum efc_sm_event, void *),
 855                const char *funcname)
 856{
 857        return 0;
 858}
 859
 860int
 861efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
 862{
 863        int empty;
 864        unsigned long flags = 0;
 865
 866        spin_lock_irqsave(&node->els_ios_lock, flags);
 867        empty = list_empty(list);
 868        spin_unlock_irqrestore(&node->els_ios_lock, flags);
 869        return empty;
 870}
 871
 872void
 873efc_node_pause(struct efc_node *node,
 874               void (*state)(struct efc_sm_ctx *,
 875                             enum efc_sm_event, void *))
 876
 877{
 878        node->nodedb_state = state;
 879        efc_node_transition(node, __efc_node_paused, NULL);
 880}
 881
 882void
 883__efc_node_paused(struct efc_sm_ctx *ctx,
 884                  enum efc_sm_event evt, void *arg)
 885{
 886        struct efc_node *node = ctx->app;
 887
 888        efc_node_evt_set(ctx, evt, __func__);
 889
 890        node_sm_trace();
 891
 892        /*
 893         * This state is entered when a state is "paused". When resumed, the
 894         * node is transitioned to a previously saved state (node->ndoedb_state)
 895         */
 896        switch (evt) {
 897        case EFC_EVT_ENTER:
 898                node_printf(node, "Paused\n");
 899                break;
 900
 901        case EFC_EVT_RESUME: {
 902                void (*pf)(struct efc_sm_ctx *ctx,
 903                           enum efc_sm_event evt, void *arg);
 904
 905                pf = node->nodedb_state;
 906
 907                node->nodedb_state = NULL;
 908                efc_node_transition(node, pf, NULL);
 909                break;
 910        }
 911
 912        case EFC_EVT_DOMAIN_ATTACH_OK:
 913                break;
 914
 915        case EFC_EVT_SHUTDOWN:
 916                node->req_free = true;
 917                break;
 918
 919        default:
 920                __efc_node_common(__func__, ctx, evt, arg);
 921        }
 922}
 923
 924void
 925efc_node_recv_els_frame(struct efc_node *node,
 926                        struct efc_hw_sequence *seq)
 927{
 928        u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp);
 929        struct {
 930                u32 cmd;
 931                enum efc_sm_event evt;
 932                u32 payload_size;
 933        } els_cmd_list[] = {
 934                {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)},
 935                {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)},
 936                {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)},
 937                {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size},
 938                {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size},
 939                {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
 940                {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
 941                {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)},
 942                {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
 943                {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
 944        };
 945        struct efc_node_cb cbdata;
 946        u8 *buf = seq->payload->dma.virt;
 947        enum efc_sm_event evt = EFC_EVT_ELS_RCVD;
 948        u32 i;
 949
 950        memset(&cbdata, 0, sizeof(cbdata));
 951        cbdata.header = seq->header;
 952        cbdata.payload = seq->payload;
 953
 954        /* find a matching event for the ELS command */
 955        for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) {
 956                if (els_cmd_list[i].cmd == buf[0]) {
 957                        evt = els_cmd_list[i].evt;
 958                        break;
 959                }
 960        }
 961
 962        efc_node_post_event(node, evt, &cbdata);
 963}
 964
 965void
 966efc_node_recv_ct_frame(struct efc_node *node,
 967                       struct efc_hw_sequence *seq)
 968{
 969        struct fc_ct_hdr *iu = seq->payload->dma.virt;
 970        struct fc_frame_header *hdr = seq->header->dma.virt;
 971        struct efc *efc = node->efc;
 972        u16 gscmd = be16_to_cpu(iu->ct_cmd);
 973
 974        efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n",
 975                    node->display_name, gscmd);
 976        efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
 977                        FC_FS_RJT, FC_FS_RJT_UNSUP, 0);
 978}
 979
 980void
 981efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
 982{
 983        struct efc_node_cb cbdata;
 984
 985        memset(&cbdata, 0, sizeof(cbdata));
 986        cbdata.header = seq->header;
 987        cbdata.payload = seq->payload;
 988
 989        efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
 990}
 991
 992void
 993efc_process_node_pending(struct efc_node *node)
 994{
 995        struct efc *efc = node->efc;
 996        struct efc_hw_sequence *seq = NULL;
 997        u32 pend_frames_processed = 0;
 998        unsigned long flags = 0;
 999
1000        for (;;) {
1001                /* need to check for hold frames condition after each frame
1002                 * processed because any given frame could cause a transition
1003                 * to a state that holds frames
1004                 */
1005                if (node->hold_frames)
1006                        break;
1007
1008                seq = NULL;
1009                /* Get next frame/sequence */
1010                spin_lock_irqsave(&node->pend_frames_lock, flags);
1011
1012                if (!list_empty(&node->pend_frames)) {
1013                        seq = list_first_entry(&node->pend_frames,
1014                                        struct efc_hw_sequence, list_entry);
1015                        list_del(&seq->list_entry);
1016                }
1017                spin_unlock_irqrestore(&node->pend_frames_lock, flags);
1018
1019                if (!seq) {
1020                        pend_frames_processed = node->pend_frames_processed;
1021                        node->pend_frames_processed = 0;
1022                        break;
1023                }
1024                node->pend_frames_processed++;
1025
1026                /* now dispatch frame(s) to dispatch function */
1027                efc_node_dispatch_frame(node, seq);
1028                efc->tt.hw_seq_free(efc, seq);
1029        }
1030
1031        if (pend_frames_processed != 0)
1032                efc_log_debug(efc, "%u node frames held and processed\n",
1033                              pend_frames_processed);
1034}
1035
1036void
1037efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
1038{
1039        unsigned long flags = 0;
1040        enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK;
1041        struct efc *efc = node->efc;
1042
1043        if (status)
1044                evt = EFC_EVT_NODE_SESS_REG_FAIL;
1045
1046        spin_lock_irqsave(&efc->lock, flags);
1047        /* Notify the node to resume */
1048        efc_node_post_event(node, evt, NULL);
1049        spin_unlock_irqrestore(&efc->lock, flags);
1050}
1051
1052void
1053efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
1054{
1055        unsigned long flags = 0;
1056
1057        spin_lock_irqsave(&efc->lock, flags);
1058        /* Notify the node to resume */
1059        efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
1060        spin_unlock_irqrestore(&efc->lock, flags);
1061}
1062
1063void
1064efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
1065{
1066        unsigned long flags = 0;
1067
1068        spin_lock_irqsave(&efc->lock, flags);
1069        /* Notify the node to resume */
1070        efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
1071        spin_unlock_irqrestore(&efc->lock, flags);
1072}
1073
1074void
1075efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
1076{
1077        unsigned long flags = 0;
1078
1079        spin_lock_irqsave(&efc->lock, flags);
1080        efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
1081        spin_unlock_irqrestore(&efc->lock, flags);
1082}
1083
1084void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
1085{
1086        struct efc *efc = node->efc;
1087        unsigned long flags = 0;
1088
1089        spin_lock_irqsave(&efc->lock, flags);
1090        efc_node_post_event(node, evt, arg);
1091        spin_unlock_irqrestore(&efc->lock, flags);
1092}
1093
1094void efc_node_post_shutdown(struct efc_node *node, void *arg)
1095{
1096        unsigned long flags = 0;
1097        struct efc *efc = node->efc;
1098
1099        spin_lock_irqsave(&efc->lock, flags);
1100        efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
1101        spin_unlock_irqrestore(&efc->lock, flags);
1102}
1103