linux/drivers/net/ethernet/brocade/bna/bna_enet.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Linux network driver for QLogic BR-series Converged Network Adapter.
   4 */
   5/*
   6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   7 * Copyright (c) 2014-2015 QLogic Corporation
   8 * All rights reserved
   9 * www.qlogic.com
  10 */
  11#include "bna.h"
  12
  13static inline int
  14ethport_can_be_up(struct bna_ethport *ethport)
  15{
  16        int ready = 0;
  17        if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  18                ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  19                         (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  20                         (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  21        else
  22                ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  23                         (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  24                         !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  25        return ready;
  26}
  27
  28#define ethport_is_up ethport_can_be_up
  29
  30enum bna_ethport_event {
  31        ETHPORT_E_START                 = 1,
  32        ETHPORT_E_STOP                  = 2,
  33        ETHPORT_E_FAIL                  = 3,
  34        ETHPORT_E_UP                    = 4,
  35        ETHPORT_E_DOWN                  = 5,
  36        ETHPORT_E_FWRESP_UP_OK          = 6,
  37        ETHPORT_E_FWRESP_DOWN           = 7,
  38        ETHPORT_E_FWRESP_UP_FAIL        = 8,
  39};
  40
  41enum bna_enet_event {
  42        ENET_E_START                    = 1,
  43        ENET_E_STOP                     = 2,
  44        ENET_E_FAIL                     = 3,
  45        ENET_E_PAUSE_CFG                = 4,
  46        ENET_E_MTU_CFG                  = 5,
  47        ENET_E_FWRESP_PAUSE             = 6,
  48        ENET_E_CHLD_STOPPED             = 7,
  49};
  50
  51enum bna_ioceth_event {
  52        IOCETH_E_ENABLE                 = 1,
  53        IOCETH_E_DISABLE                = 2,
  54        IOCETH_E_IOC_RESET              = 3,
  55        IOCETH_E_IOC_FAILED             = 4,
  56        IOCETH_E_IOC_READY              = 5,
  57        IOCETH_E_ENET_ATTR_RESP         = 6,
  58        IOCETH_E_ENET_STOPPED           = 7,
  59        IOCETH_E_IOC_DISABLED           = 8,
  60};
  61
  62#define bna_stats_copy(_name, _type)                                    \
  63do {                                                                    \
  64        count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64);  \
  65        stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats;   \
  66        stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats;        \
  67        for (i = 0; i < count; i++)                                     \
  68                stats_dst[i] = be64_to_cpu(stats_src[i]);               \
  69} while (0)                                                             \
  70
  71/*
  72 * FW response handlers
  73 */
  74
  75static void
  76bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
  77                                struct bfi_msgq_mhdr *msghdr)
  78{
  79        ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  80
  81        if (ethport_can_be_up(ethport))
  82                bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  83}
  84
  85static void
  86bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
  87                                struct bfi_msgq_mhdr *msghdr)
  88{
  89        int ethport_up = ethport_is_up(ethport);
  90
  91        ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  92
  93        if (ethport_up)
  94                bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  95}
  96
  97static void
  98bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
  99                                struct bfi_msgq_mhdr *msghdr)
 100{
 101        struct bfi_enet_enable_req *admin_req =
 102                &ethport->bfi_enet_cmd.admin_req;
 103        struct bfi_enet_rsp *rsp =
 104                container_of(msghdr, struct bfi_enet_rsp, mh);
 105
 106        switch (admin_req->enable) {
 107        case BNA_STATUS_T_ENABLED:
 108                if (rsp->error == BFI_ENET_CMD_OK)
 109                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
 110                else {
 111                        ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
 112                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
 113                }
 114                break;
 115
 116        case BNA_STATUS_T_DISABLED:
 117                bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
 118                ethport->link_status = BNA_LINK_DOWN;
 119                ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
 120                break;
 121        }
 122}
 123
 124static void
 125bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
 126                                struct bfi_msgq_mhdr *msghdr)
 127{
 128        struct bfi_enet_diag_lb_req *diag_lb_req =
 129                &ethport->bfi_enet_cmd.lpbk_req;
 130        struct bfi_enet_rsp *rsp =
 131                container_of(msghdr, struct bfi_enet_rsp, mh);
 132
 133        switch (diag_lb_req->enable) {
 134        case BNA_STATUS_T_ENABLED:
 135                if (rsp->error == BFI_ENET_CMD_OK)
 136                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
 137                else {
 138                        ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
 139                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
 140                }
 141                break;
 142
 143        case BNA_STATUS_T_DISABLED:
 144                bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
 145                break;
 146        }
 147}
 148
 149static void
 150bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
 151{
 152        bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
 153}
 154
 155static void
 156bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
 157                        struct bfi_msgq_mhdr *msghdr)
 158{
 159        struct bfi_enet_attr_rsp *rsp =
 160                container_of(msghdr, struct bfi_enet_attr_rsp, mh);
 161
 162        /**
 163         * Store only if not set earlier, since BNAD can override the HW
 164         * attributes
 165         */
 166        if (!ioceth->attr.fw_query_complete) {
 167                ioceth->attr.num_txq = ntohl(rsp->max_cfg);
 168                ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
 169                ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
 170                ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
 171                ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
 172                ioceth->attr.fw_query_complete = true;
 173        }
 174
 175        bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
 176}
 177
 178static void
 179bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
 180{
 181        struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
 182        u64 *stats_src;
 183        u64 *stats_dst;
 184        u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
 185        u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
 186        int count;
 187        int i;
 188
 189        bna_stats_copy(mac, mac);
 190        bna_stats_copy(bpc, bpc);
 191        bna_stats_copy(rad, rad);
 192        bna_stats_copy(rlb, rad);
 193        bna_stats_copy(fc_rx, fc_rx);
 194        bna_stats_copy(fc_tx, fc_tx);
 195
 196        stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
 197
 198        /* Copy Rxf stats to SW area, scatter them while copying */
 199        for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
 200                stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
 201                memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
 202                if (rx_enet_mask & BIT(i)) {
 203                        int k;
 204                        count = sizeof(struct bfi_enet_stats_rxf) /
 205                                sizeof(u64);
 206                        for (k = 0; k < count; k++) {
 207                                stats_dst[k] = be64_to_cpu(*stats_src);
 208                                stats_src++;
 209                        }
 210                }
 211        }
 212
 213        /* Copy Txf stats to SW area, scatter them while copying */
 214        for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
 215                stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
 216                memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
 217                if (tx_enet_mask & BIT(i)) {
 218                        int k;
 219                        count = sizeof(struct bfi_enet_stats_txf) /
 220                                sizeof(u64);
 221                        for (k = 0; k < count; k++) {
 222                                stats_dst[k] = be64_to_cpu(*stats_src);
 223                                stats_src++;
 224                        }
 225                }
 226        }
 227
 228        bna->stats_mod.stats_get_busy = false;
 229        bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
 230}
 231
 232static void
 233bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
 234                        struct bfi_msgq_mhdr *msghdr)
 235{
 236        ethport->link_status = BNA_LINK_UP;
 237
 238        /* Dispatch events */
 239        ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
 240}
 241
 242static void
 243bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
 244                                struct bfi_msgq_mhdr *msghdr)
 245{
 246        ethport->link_status = BNA_LINK_DOWN;
 247
 248        /* Dispatch events */
 249        ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
 250}
 251
 252static void
 253bna_err_handler(struct bna *bna, u32 intr_status)
 254{
 255        if (BNA_IS_HALT_INTR(bna, intr_status))
 256                bna_halt_clear(bna);
 257
 258        bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
 259}
 260
 261void
 262bna_mbox_handler(struct bna *bna, u32 intr_status)
 263{
 264        if (BNA_IS_ERR_INTR(bna, intr_status)) {
 265                bna_err_handler(bna, intr_status);
 266                return;
 267        }
 268        if (BNA_IS_MBOX_INTR(bna, intr_status))
 269                bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
 270}
 271
 272static void
 273bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
 274{
 275        struct bna *bna = (struct bna *)arg;
 276        struct bna_tx *tx;
 277        struct bna_rx *rx;
 278
 279        switch (msghdr->msg_id) {
 280        case BFI_ENET_I2H_RX_CFG_SET_RSP:
 281                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 282                if (rx)
 283                        bna_bfi_rx_enet_start_rsp(rx, msghdr);
 284                break;
 285
 286        case BFI_ENET_I2H_RX_CFG_CLR_RSP:
 287                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 288                if (rx)
 289                        bna_bfi_rx_enet_stop_rsp(rx, msghdr);
 290                break;
 291
 292        case BFI_ENET_I2H_RIT_CFG_RSP:
 293        case BFI_ENET_I2H_RSS_CFG_RSP:
 294        case BFI_ENET_I2H_RSS_ENABLE_RSP:
 295        case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
 296        case BFI_ENET_I2H_RX_DEFAULT_RSP:
 297        case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
 298        case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
 299        case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
 300        case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
 301        case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
 302        case BFI_ENET_I2H_RX_VLAN_SET_RSP:
 303        case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
 304                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 305                if (rx)
 306                        bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
 307                break;
 308
 309        case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
 310                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 311                if (rx)
 312                        bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
 313                break;
 314
 315        case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
 316                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 317                if (rx)
 318                        bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
 319                break;
 320
 321        case BFI_ENET_I2H_TX_CFG_SET_RSP:
 322                bna_tx_from_rid(bna, msghdr->enet_id, tx);
 323                if (tx)
 324                        bna_bfi_tx_enet_start_rsp(tx, msghdr);
 325                break;
 326
 327        case BFI_ENET_I2H_TX_CFG_CLR_RSP:
 328                bna_tx_from_rid(bna, msghdr->enet_id, tx);
 329                if (tx)
 330                        bna_bfi_tx_enet_stop_rsp(tx, msghdr);
 331                break;
 332
 333        case BFI_ENET_I2H_PORT_ADMIN_RSP:
 334                bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
 335                break;
 336
 337        case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
 338                bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
 339                break;
 340
 341        case BFI_ENET_I2H_SET_PAUSE_RSP:
 342                bna_bfi_pause_set_rsp(&bna->enet, msghdr);
 343                break;
 344
 345        case BFI_ENET_I2H_GET_ATTR_RSP:
 346                bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
 347                break;
 348
 349        case BFI_ENET_I2H_STATS_GET_RSP:
 350                bna_bfi_stats_get_rsp(bna, msghdr);
 351                break;
 352
 353        case BFI_ENET_I2H_STATS_CLR_RSP:
 354                /* No-op */
 355                break;
 356
 357        case BFI_ENET_I2H_LINK_UP_AEN:
 358                bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
 359                break;
 360
 361        case BFI_ENET_I2H_LINK_DOWN_AEN:
 362                bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
 363                break;
 364
 365        case BFI_ENET_I2H_PORT_ENABLE_AEN:
 366                bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
 367                break;
 368
 369        case BFI_ENET_I2H_PORT_DISABLE_AEN:
 370                bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
 371                break;
 372
 373        case BFI_ENET_I2H_BW_UPDATE_AEN:
 374                bna_bfi_bw_update_aen(&bna->tx_mod);
 375                break;
 376
 377        default:
 378                break;
 379        }
 380}
 381
 382/* ETHPORT */
 383
 384#define call_ethport_stop_cbfn(_ethport)                                \
 385do {                                                                    \
 386        if ((_ethport)->stop_cbfn) {                                    \
 387                void (*cbfn)(struct bna_enet *);                        \
 388                cbfn = (_ethport)->stop_cbfn;                           \
 389                (_ethport)->stop_cbfn = NULL;                           \
 390                cbfn(&(_ethport)->bna->enet);                           \
 391        }                                                               \
 392} while (0)
 393
 394#define call_ethport_adminup_cbfn(ethport, status)                      \
 395do {                                                                    \
 396        if ((ethport)->adminup_cbfn) {                                  \
 397                void (*cbfn)(struct bnad *, enum bna_cb_status);        \
 398                cbfn = (ethport)->adminup_cbfn;                         \
 399                (ethport)->adminup_cbfn = NULL;                         \
 400                cbfn((ethport)->bna->bnad, status);                     \
 401        }                                                               \
 402} while (0)
 403
 404static void
 405bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
 406{
 407        struct bfi_enet_enable_req *admin_up_req =
 408                &ethport->bfi_enet_cmd.admin_req;
 409
 410        bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
 411                BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
 412        admin_up_req->mh.num_entries = htons(
 413                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 414        admin_up_req->enable = BNA_STATUS_T_ENABLED;
 415
 416        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 417                sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
 418        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 419}
 420
 421static void
 422bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
 423{
 424        struct bfi_enet_enable_req *admin_down_req =
 425                &ethport->bfi_enet_cmd.admin_req;
 426
 427        bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
 428                BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
 429        admin_down_req->mh.num_entries = htons(
 430                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 431        admin_down_req->enable = BNA_STATUS_T_DISABLED;
 432
 433        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 434                sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
 435        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 436}
 437
 438static void
 439bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
 440{
 441        struct bfi_enet_diag_lb_req *lpbk_up_req =
 442                &ethport->bfi_enet_cmd.lpbk_req;
 443
 444        bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
 445                BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
 446        lpbk_up_req->mh.num_entries = htons(
 447                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
 448        lpbk_up_req->mode = (ethport->bna->enet.type ==
 449                                BNA_ENET_T_LOOPBACK_INTERNAL) ?
 450                                BFI_ENET_DIAG_LB_OPMODE_EXT :
 451                                BFI_ENET_DIAG_LB_OPMODE_CBL;
 452        lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
 453
 454        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 455                sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
 456        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 457}
 458
 459static void
 460bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
 461{
 462        struct bfi_enet_diag_lb_req *lpbk_down_req =
 463                &ethport->bfi_enet_cmd.lpbk_req;
 464
 465        bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
 466                BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
 467        lpbk_down_req->mh.num_entries = htons(
 468                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
 469        lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
 470
 471        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 472                sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
 473        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 474}
 475
 476static void
 477bna_bfi_ethport_up(struct bna_ethport *ethport)
 478{
 479        if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
 480                bna_bfi_ethport_admin_up(ethport);
 481        else
 482                bna_bfi_ethport_lpbk_up(ethport);
 483}
 484
 485static void
 486bna_bfi_ethport_down(struct bna_ethport *ethport)
 487{
 488        if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
 489                bna_bfi_ethport_admin_down(ethport);
 490        else
 491                bna_bfi_ethport_lpbk_down(ethport);
 492}
 493
 494bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
 495                        enum bna_ethport_event);
 496bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
 497                        enum bna_ethport_event);
 498bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
 499                        enum bna_ethport_event);
 500bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
 501                        enum bna_ethport_event);
 502bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
 503                        enum bna_ethport_event);
 504bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
 505                        enum bna_ethport_event);
 506
 507static void
 508bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
 509{
 510        call_ethport_stop_cbfn(ethport);
 511}
 512
 513static void
 514bna_ethport_sm_stopped(struct bna_ethport *ethport,
 515                        enum bna_ethport_event event)
 516{
 517        switch (event) {
 518        case ETHPORT_E_START:
 519                bfa_fsm_set_state(ethport, bna_ethport_sm_down);
 520                break;
 521
 522        case ETHPORT_E_STOP:
 523                call_ethport_stop_cbfn(ethport);
 524                break;
 525
 526        case ETHPORT_E_FAIL:
 527                /* No-op */
 528                break;
 529
 530        case ETHPORT_E_DOWN:
 531                /* This event is received due to Rx objects failing */
 532                /* No-op */
 533                break;
 534
 535        default:
 536                bfa_sm_fault(event);
 537        }
 538}
 539
 540static void
 541bna_ethport_sm_down_entry(struct bna_ethport *ethport)
 542{
 543}
 544
 545static void
 546bna_ethport_sm_down(struct bna_ethport *ethport,
 547                        enum bna_ethport_event event)
 548{
 549        switch (event) {
 550        case ETHPORT_E_STOP:
 551                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 552                break;
 553
 554        case ETHPORT_E_FAIL:
 555                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 556                break;
 557
 558        case ETHPORT_E_UP:
 559                bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
 560                bna_bfi_ethport_up(ethport);
 561                break;
 562
 563        default:
 564                bfa_sm_fault(event);
 565        }
 566}
 567
 568static void
 569bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
 570{
 571}
 572
 573static void
 574bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
 575                        enum bna_ethport_event event)
 576{
 577        switch (event) {
 578        case ETHPORT_E_STOP:
 579                bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
 580                break;
 581
 582        case ETHPORT_E_FAIL:
 583                call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
 584                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 585                break;
 586
 587        case ETHPORT_E_DOWN:
 588                call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
 589                bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
 590                break;
 591
 592        case ETHPORT_E_FWRESP_UP_OK:
 593                call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
 594                bfa_fsm_set_state(ethport, bna_ethport_sm_up);
 595                break;
 596
 597        case ETHPORT_E_FWRESP_UP_FAIL:
 598                call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
 599                bfa_fsm_set_state(ethport, bna_ethport_sm_down);
 600                break;
 601
 602        case ETHPORT_E_FWRESP_DOWN:
 603                /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
 604                bna_bfi_ethport_up(ethport);
 605                break;
 606
 607        default:
 608                bfa_sm_fault(event);
 609        }
 610}
 611
 612static void
 613bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
 614{
 615        /**
 616         * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
 617         * mbox due to up_resp_wait -> down_resp_wait transition on event
 618         * ETHPORT_E_DOWN
 619         */
 620}
 621
 622static void
 623bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
 624                        enum bna_ethport_event event)
 625{
 626        switch (event) {
 627        case ETHPORT_E_STOP:
 628                bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
 629                break;
 630
 631        case ETHPORT_E_FAIL:
 632                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 633                break;
 634
 635        case ETHPORT_E_UP:
 636                bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
 637                break;
 638
 639        case ETHPORT_E_FWRESP_UP_OK:
 640                /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
 641                bna_bfi_ethport_down(ethport);
 642                break;
 643
 644        case ETHPORT_E_FWRESP_UP_FAIL:
 645        case ETHPORT_E_FWRESP_DOWN:
 646                bfa_fsm_set_state(ethport, bna_ethport_sm_down);
 647                break;
 648
 649        default:
 650                bfa_sm_fault(event);
 651        }
 652}
 653
 654static void
 655bna_ethport_sm_up_entry(struct bna_ethport *ethport)
 656{
 657}
 658
 659static void
 660bna_ethport_sm_up(struct bna_ethport *ethport,
 661                        enum bna_ethport_event event)
 662{
 663        switch (event) {
 664        case ETHPORT_E_STOP:
 665                bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
 666                bna_bfi_ethport_down(ethport);
 667                break;
 668
 669        case ETHPORT_E_FAIL:
 670                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 671                break;
 672
 673        case ETHPORT_E_DOWN:
 674                bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
 675                bna_bfi_ethport_down(ethport);
 676                break;
 677
 678        default:
 679                bfa_sm_fault(event);
 680        }
 681}
 682
 683static void
 684bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
 685{
 686}
 687
 688static void
 689bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
 690                        enum bna_ethport_event event)
 691{
 692        switch (event) {
 693        case ETHPORT_E_FAIL:
 694                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 695                break;
 696
 697        case ETHPORT_E_DOWN:
 698                /**
 699                 * This event is received due to Rx objects stopping in
 700                 * parallel to ethport
 701                 */
 702                /* No-op */
 703                break;
 704
 705        case ETHPORT_E_FWRESP_UP_OK:
 706                /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
 707                bna_bfi_ethport_down(ethport);
 708                break;
 709
 710        case ETHPORT_E_FWRESP_UP_FAIL:
 711        case ETHPORT_E_FWRESP_DOWN:
 712                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 713                break;
 714
 715        default:
 716                bfa_sm_fault(event);
 717        }
 718}
 719
 720static void
 721bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
 722{
 723        ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
 724        ethport->bna = bna;
 725
 726        ethport->link_status = BNA_LINK_DOWN;
 727        ethport->link_cbfn = bnad_cb_ethport_link_status;
 728
 729        ethport->rx_started_count = 0;
 730
 731        ethport->stop_cbfn = NULL;
 732        ethport->adminup_cbfn = NULL;
 733
 734        bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 735}
 736
 737static void
 738bna_ethport_uninit(struct bna_ethport *ethport)
 739{
 740        ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
 741        ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
 742
 743        ethport->bna = NULL;
 744}
 745
 746static void
 747bna_ethport_start(struct bna_ethport *ethport)
 748{
 749        bfa_fsm_send_event(ethport, ETHPORT_E_START);
 750}
 751
 752static void
 753bna_enet_cb_ethport_stopped(struct bna_enet *enet)
 754{
 755        bfa_wc_down(&enet->chld_stop_wc);
 756}
 757
 758static void
 759bna_ethport_stop(struct bna_ethport *ethport)
 760{
 761        ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
 762        bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
 763}
 764
 765static void
 766bna_ethport_fail(struct bna_ethport *ethport)
 767{
 768        /* Reset the physical port status to enabled */
 769        ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
 770
 771        if (ethport->link_status != BNA_LINK_DOWN) {
 772                ethport->link_status = BNA_LINK_DOWN;
 773                ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
 774        }
 775        bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
 776}
 777
 778/* Should be called only when ethport is disabled */
 779void
 780bna_ethport_cb_rx_started(struct bna_ethport *ethport)
 781{
 782        ethport->rx_started_count++;
 783
 784        if (ethport->rx_started_count == 1) {
 785                ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
 786
 787                if (ethport_can_be_up(ethport))
 788                        bfa_fsm_send_event(ethport, ETHPORT_E_UP);
 789        }
 790}
 791
 792void
 793bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
 794{
 795        int ethport_up = ethport_is_up(ethport);
 796
 797        ethport->rx_started_count--;
 798
 799        if (ethport->rx_started_count == 0) {
 800                ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
 801
 802                if (ethport_up)
 803                        bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
 804        }
 805}
 806
 807/* ENET */
 808
 809#define bna_enet_chld_start(enet)                                       \
 810do {                                                                    \
 811        enum bna_tx_type tx_type =                                      \
 812                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 813                BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
 814        enum bna_rx_type rx_type =                                      \
 815                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 816                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 817        bna_ethport_start(&(enet)->bna->ethport);                       \
 818        bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type);                \
 819        bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
 820} while (0)
 821
 822#define bna_enet_chld_stop(enet)                                        \
 823do {                                                                    \
 824        enum bna_tx_type tx_type =                                      \
 825                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 826                BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
 827        enum bna_rx_type rx_type =                                      \
 828                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 829                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 830        bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
 831        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 832        bna_ethport_stop(&(enet)->bna->ethport);                        \
 833        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 834        bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type);                 \
 835        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 836        bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
 837        bfa_wc_wait(&(enet)->chld_stop_wc);                             \
 838} while (0)
 839
 840#define bna_enet_chld_fail(enet)                                        \
 841do {                                                                    \
 842        bna_ethport_fail(&(enet)->bna->ethport);                        \
 843        bna_tx_mod_fail(&(enet)->bna->tx_mod);                          \
 844        bna_rx_mod_fail(&(enet)->bna->rx_mod);                          \
 845} while (0)
 846
 847#define bna_enet_rx_start(enet)                                         \
 848do {                                                                    \
 849        enum bna_rx_type rx_type =                                      \
 850                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 851                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 852        bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
 853} while (0)
 854
 855#define bna_enet_rx_stop(enet)                                          \
 856do {                                                                    \
 857        enum bna_rx_type rx_type =                                      \
 858                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 859                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 860        bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
 861        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 862        bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
 863        bfa_wc_wait(&(enet)->chld_stop_wc);                             \
 864} while (0)
 865
 866#define call_enet_stop_cbfn(enet)                                       \
 867do {                                                                    \
 868        if ((enet)->stop_cbfn) {                                        \
 869                void (*cbfn)(void *);                                   \
 870                void *cbarg;                                            \
 871                cbfn = (enet)->stop_cbfn;                               \
 872                cbarg = (enet)->stop_cbarg;                             \
 873                (enet)->stop_cbfn = NULL;                               \
 874                (enet)->stop_cbarg = NULL;                              \
 875                cbfn(cbarg);                                            \
 876        }                                                               \
 877} while (0)
 878
 879#define call_enet_mtu_cbfn(enet)                                        \
 880do {                                                                    \
 881        if ((enet)->mtu_cbfn) {                                         \
 882                void (*cbfn)(struct bnad *);                            \
 883                cbfn = (enet)->mtu_cbfn;                                \
 884                (enet)->mtu_cbfn = NULL;                                \
 885                cbfn((enet)->bna->bnad);                                \
 886        }                                                               \
 887} while (0)
 888
 889static void bna_enet_cb_chld_stopped(void *arg);
 890static void bna_bfi_pause_set(struct bna_enet *enet);
 891
 892bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
 893                        enum bna_enet_event);
 894bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
 895                        enum bna_enet_event);
 896bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
 897                        enum bna_enet_event);
 898bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
 899                        enum bna_enet_event);
 900bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
 901                        enum bna_enet_event);
 902bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
 903                        enum bna_enet_event);
 904bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
 905                        enum bna_enet_event);
 906
 907static void
 908bna_enet_sm_stopped_entry(struct bna_enet *enet)
 909{
 910        call_enet_mtu_cbfn(enet);
 911        call_enet_stop_cbfn(enet);
 912}
 913
 914static void
 915bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
 916{
 917        switch (event) {
 918        case ENET_E_START:
 919                bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
 920                break;
 921
 922        case ENET_E_STOP:
 923                call_enet_stop_cbfn(enet);
 924                break;
 925
 926        case ENET_E_FAIL:
 927                /* No-op */
 928                break;
 929
 930        case ENET_E_PAUSE_CFG:
 931                break;
 932
 933        case ENET_E_MTU_CFG:
 934                call_enet_mtu_cbfn(enet);
 935                break;
 936
 937        case ENET_E_CHLD_STOPPED:
 938                /**
 939                 * This event is received due to Ethport, Tx and Rx objects
 940                 * failing
 941                 */
 942                /* No-op */
 943                break;
 944
 945        default:
 946                bfa_sm_fault(event);
 947        }
 948}
 949
 950static void
 951bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
 952{
 953        bna_bfi_pause_set(enet);
 954}
 955
 956static void
 957bna_enet_sm_pause_init_wait(struct bna_enet *enet,
 958                                enum bna_enet_event event)
 959{
 960        switch (event) {
 961        case ENET_E_STOP:
 962                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
 963                bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
 964                break;
 965
 966        case ENET_E_FAIL:
 967                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
 968                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
 969                break;
 970
 971        case ENET_E_PAUSE_CFG:
 972                enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
 973                break;
 974
 975        case ENET_E_MTU_CFG:
 976                /* No-op */
 977                break;
 978
 979        case ENET_E_FWRESP_PAUSE:
 980                if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
 981                        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
 982                        bna_bfi_pause_set(enet);
 983                } else {
 984                        bfa_fsm_set_state(enet, bna_enet_sm_started);
 985                        bna_enet_chld_start(enet);
 986                }
 987                break;
 988
 989        default:
 990                bfa_sm_fault(event);
 991        }
 992}
 993
 994static void
 995bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
 996{
 997        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
 998}
 999
1000static void
1001bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1002                                enum bna_enet_event event)
1003{
1004        switch (event) {
1005        case ENET_E_FAIL:
1006        case ENET_E_FWRESP_PAUSE:
1007                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1008                break;
1009
1010        default:
1011                bfa_sm_fault(event);
1012        }
1013}
1014
1015static void
1016bna_enet_sm_started_entry(struct bna_enet *enet)
1017{
1018        /**
1019         * NOTE: Do not call bna_enet_chld_start() here, since it will be
1020         * inadvertently called during cfg_wait->started transition as well
1021         */
1022        call_enet_mtu_cbfn(enet);
1023}
1024
1025static void
1026bna_enet_sm_started(struct bna_enet *enet,
1027                        enum bna_enet_event event)
1028{
1029        switch (event) {
1030        case ENET_E_STOP:
1031                bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1032                break;
1033
1034        case ENET_E_FAIL:
1035                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1036                bna_enet_chld_fail(enet);
1037                break;
1038
1039        case ENET_E_PAUSE_CFG:
1040                bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1041                bna_bfi_pause_set(enet);
1042                break;
1043
1044        case ENET_E_MTU_CFG:
1045                bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1046                bna_enet_rx_stop(enet);
1047                break;
1048
1049        default:
1050                bfa_sm_fault(event);
1051        }
1052}
1053
1054static void
1055bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1056{
1057}
1058
1059static void
1060bna_enet_sm_cfg_wait(struct bna_enet *enet,
1061                        enum bna_enet_event event)
1062{
1063        switch (event) {
1064        case ENET_E_STOP:
1065                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1066                enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1067                bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1068                break;
1069
1070        case ENET_E_FAIL:
1071                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1072                enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1073                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1074                bna_enet_chld_fail(enet);
1075                break;
1076
1077        case ENET_E_PAUSE_CFG:
1078                enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1079                break;
1080
1081        case ENET_E_MTU_CFG:
1082                enet->flags |= BNA_ENET_F_MTU_CHANGED;
1083                break;
1084
1085        case ENET_E_CHLD_STOPPED:
1086                bna_enet_rx_start(enet);
1087                /* Fall through */
1088        case ENET_E_FWRESP_PAUSE:
1089                if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1090                        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1091                        bna_bfi_pause_set(enet);
1092                } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1093                        enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1094                        bna_enet_rx_stop(enet);
1095                } else {
1096                        bfa_fsm_set_state(enet, bna_enet_sm_started);
1097                }
1098                break;
1099
1100        default:
1101                bfa_sm_fault(event);
1102        }
1103}
1104
1105static void
1106bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1107{
1108        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1109        enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1110}
1111
1112static void
1113bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1114                                enum bna_enet_event event)
1115{
1116        switch (event) {
1117        case ENET_E_FAIL:
1118                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1119                bna_enet_chld_fail(enet);
1120                break;
1121
1122        case ENET_E_FWRESP_PAUSE:
1123        case ENET_E_CHLD_STOPPED:
1124                bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1125                break;
1126
1127        default:
1128                bfa_sm_fault(event);
1129        }
1130}
1131
1132static void
1133bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1134{
1135        bna_enet_chld_stop(enet);
1136}
1137
1138static void
1139bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1140                                enum bna_enet_event event)
1141{
1142        switch (event) {
1143        case ENET_E_FAIL:
1144                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1145                bna_enet_chld_fail(enet);
1146                break;
1147
1148        case ENET_E_CHLD_STOPPED:
1149                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1150                break;
1151
1152        default:
1153                bfa_sm_fault(event);
1154        }
1155}
1156
1157static void
1158bna_bfi_pause_set(struct bna_enet *enet)
1159{
1160        struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1161
1162        bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1163                BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1164        pause_req->mh.num_entries = htons(
1165        bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1166        pause_req->tx_pause = enet->pause_config.tx_pause;
1167        pause_req->rx_pause = enet->pause_config.rx_pause;
1168
1169        bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1170                sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1171        bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1172}
1173
1174static void
1175bna_enet_cb_chld_stopped(void *arg)
1176{
1177        struct bna_enet *enet = (struct bna_enet *)arg;
1178
1179        bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1180}
1181
1182static void
1183bna_enet_init(struct bna_enet *enet, struct bna *bna)
1184{
1185        enet->bna = bna;
1186        enet->flags = 0;
1187        enet->mtu = 0;
1188        enet->type = BNA_ENET_T_REGULAR;
1189
1190        enet->stop_cbfn = NULL;
1191        enet->stop_cbarg = NULL;
1192
1193        enet->mtu_cbfn = NULL;
1194
1195        bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1196}
1197
1198static void
1199bna_enet_uninit(struct bna_enet *enet)
1200{
1201        enet->flags = 0;
1202
1203        enet->bna = NULL;
1204}
1205
1206static void
1207bna_enet_start(struct bna_enet *enet)
1208{
1209        enet->flags |= BNA_ENET_F_IOCETH_READY;
1210        if (enet->flags & BNA_ENET_F_ENABLED)
1211                bfa_fsm_send_event(enet, ENET_E_START);
1212}
1213
1214static void
1215bna_ioceth_cb_enet_stopped(void *arg)
1216{
1217        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1218
1219        bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1220}
1221
1222static void
1223bna_enet_stop(struct bna_enet *enet)
1224{
1225        enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1226        enet->stop_cbarg = &enet->bna->ioceth;
1227
1228        enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1229        bfa_fsm_send_event(enet, ENET_E_STOP);
1230}
1231
1232static void
1233bna_enet_fail(struct bna_enet *enet)
1234{
1235        enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1236        bfa_fsm_send_event(enet, ENET_E_FAIL);
1237}
1238
1239void
1240bna_enet_cb_tx_stopped(struct bna_enet *enet)
1241{
1242        bfa_wc_down(&enet->chld_stop_wc);
1243}
1244
1245void
1246bna_enet_cb_rx_stopped(struct bna_enet *enet)
1247{
1248        bfa_wc_down(&enet->chld_stop_wc);
1249}
1250
1251int
1252bna_enet_mtu_get(struct bna_enet *enet)
1253{
1254        return enet->mtu;
1255}
1256
1257void
1258bna_enet_enable(struct bna_enet *enet)
1259{
1260        if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1261                return;
1262
1263        enet->flags |= BNA_ENET_F_ENABLED;
1264
1265        if (enet->flags & BNA_ENET_F_IOCETH_READY)
1266                bfa_fsm_send_event(enet, ENET_E_START);
1267}
1268
1269void
1270bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1271                 void (*cbfn)(void *))
1272{
1273        if (type == BNA_SOFT_CLEANUP) {
1274                (*cbfn)(enet->bna->bnad);
1275                return;
1276        }
1277
1278        enet->stop_cbfn = cbfn;
1279        enet->stop_cbarg = enet->bna->bnad;
1280
1281        enet->flags &= ~BNA_ENET_F_ENABLED;
1282
1283        bfa_fsm_send_event(enet, ENET_E_STOP);
1284}
1285
1286void
1287bna_enet_pause_config(struct bna_enet *enet,
1288                      struct bna_pause_config *pause_config)
1289{
1290        enet->pause_config = *pause_config;
1291
1292        bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1293}
1294
1295void
1296bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1297                 void (*cbfn)(struct bnad *))
1298{
1299        enet->mtu = mtu;
1300
1301        enet->mtu_cbfn = cbfn;
1302
1303        bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1304}
1305
1306void
1307bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
1308{
1309        bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
1310}
1311
1312/* IOCETH */
1313
1314#define enable_mbox_intr(_ioceth)                                       \
1315do {                                                                    \
1316        u32 intr_status;                                                \
1317        bna_intr_status_get((_ioceth)->bna, intr_status);               \
1318        bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad);                 \
1319        bna_mbox_intr_enable((_ioceth)->bna);                           \
1320} while (0)
1321
1322#define disable_mbox_intr(_ioceth)                                      \
1323do {                                                                    \
1324        bna_mbox_intr_disable((_ioceth)->bna);                          \
1325        bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad);                \
1326} while (0)
1327
1328#define call_ioceth_stop_cbfn(_ioceth)                                  \
1329do {                                                                    \
1330        if ((_ioceth)->stop_cbfn) {                                     \
1331                void (*cbfn)(struct bnad *);                            \
1332                struct bnad *cbarg;                                     \
1333                cbfn = (_ioceth)->stop_cbfn;                            \
1334                cbarg = (_ioceth)->stop_cbarg;                          \
1335                (_ioceth)->stop_cbfn = NULL;                            \
1336                (_ioceth)->stop_cbarg = NULL;                           \
1337                cbfn(cbarg);                                            \
1338        }                                                               \
1339} while (0)
1340
1341#define bna_stats_mod_uninit(_stats_mod)                                \
1342do {                                                                    \
1343} while (0)
1344
1345#define bna_stats_mod_start(_stats_mod)                                 \
1346do {                                                                    \
1347        (_stats_mod)->ioc_ready = true;                                 \
1348} while (0)
1349
1350#define bna_stats_mod_stop(_stats_mod)                                  \
1351do {                                                                    \
1352        (_stats_mod)->ioc_ready = false;                                \
1353} while (0)
1354
1355#define bna_stats_mod_fail(_stats_mod)                                  \
1356do {                                                                    \
1357        (_stats_mod)->ioc_ready = false;                                \
1358        (_stats_mod)->stats_get_busy = false;                           \
1359        (_stats_mod)->stats_clr_busy = false;                           \
1360} while (0)
1361
1362static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1363
1364bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1365                        enum bna_ioceth_event);
1366bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1367                        enum bna_ioceth_event);
1368bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1369                        enum bna_ioceth_event);
1370bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1371                        enum bna_ioceth_event);
1372bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1373                        enum bna_ioceth_event);
1374bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1375                        enum bna_ioceth_event);
1376bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1377                        enum bna_ioceth_event);
1378bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1379                        enum bna_ioceth_event);
1380
1381static void
1382bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1383{
1384        call_ioceth_stop_cbfn(ioceth);
1385}
1386
1387static void
1388bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1389                        enum bna_ioceth_event event)
1390{
1391        switch (event) {
1392        case IOCETH_E_ENABLE:
1393                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1394                bfa_nw_ioc_enable(&ioceth->ioc);
1395                break;
1396
1397        case IOCETH_E_DISABLE:
1398                bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1399                break;
1400
1401        case IOCETH_E_IOC_RESET:
1402                enable_mbox_intr(ioceth);
1403                break;
1404
1405        case IOCETH_E_IOC_FAILED:
1406                disable_mbox_intr(ioceth);
1407                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1408                break;
1409
1410        default:
1411                bfa_sm_fault(event);
1412        }
1413}
1414
1415static void
1416bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1417{
1418        /**
1419         * Do not call bfa_nw_ioc_enable() here. It must be called in the
1420         * previous state due to failed -> ioc_ready_wait transition.
1421         */
1422}
1423
1424static void
1425bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1426                                enum bna_ioceth_event event)
1427{
1428        switch (event) {
1429        case IOCETH_E_DISABLE:
1430                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1431                bfa_nw_ioc_disable(&ioceth->ioc);
1432                break;
1433
1434        case IOCETH_E_IOC_RESET:
1435                enable_mbox_intr(ioceth);
1436                break;
1437
1438        case IOCETH_E_IOC_FAILED:
1439                disable_mbox_intr(ioceth);
1440                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1441                break;
1442
1443        case IOCETH_E_IOC_READY:
1444                bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1445                break;
1446
1447        default:
1448                bfa_sm_fault(event);
1449        }
1450}
1451
1452static void
1453bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1454{
1455        bna_bfi_attr_get(ioceth);
1456}
1457
1458static void
1459bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1460                                enum bna_ioceth_event event)
1461{
1462        switch (event) {
1463        case IOCETH_E_DISABLE:
1464                bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1465                break;
1466
1467        case IOCETH_E_IOC_FAILED:
1468                disable_mbox_intr(ioceth);
1469                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1470                break;
1471
1472        case IOCETH_E_ENET_ATTR_RESP:
1473                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1474                break;
1475
1476        default:
1477                bfa_sm_fault(event);
1478        }
1479}
1480
1481static void
1482bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1483{
1484        bna_enet_start(&ioceth->bna->enet);
1485        bna_stats_mod_start(&ioceth->bna->stats_mod);
1486        bnad_cb_ioceth_ready(ioceth->bna->bnad);
1487}
1488
1489static void
1490bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1491{
1492        switch (event) {
1493        case IOCETH_E_DISABLE:
1494                bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1495                break;
1496
1497        case IOCETH_E_IOC_FAILED:
1498                disable_mbox_intr(ioceth);
1499                bna_enet_fail(&ioceth->bna->enet);
1500                bna_stats_mod_fail(&ioceth->bna->stats_mod);
1501                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1502                break;
1503
1504        default:
1505                bfa_sm_fault(event);
1506        }
1507}
1508
1509static void
1510bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1511{
1512}
1513
1514static void
1515bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1516                                enum bna_ioceth_event event)
1517{
1518        switch (event) {
1519        case IOCETH_E_IOC_FAILED:
1520                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1521                disable_mbox_intr(ioceth);
1522                bfa_nw_ioc_disable(&ioceth->ioc);
1523                break;
1524
1525        case IOCETH_E_ENET_ATTR_RESP:
1526                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1527                bfa_nw_ioc_disable(&ioceth->ioc);
1528                break;
1529
1530        default:
1531                bfa_sm_fault(event);
1532        }
1533}
1534
1535static void
1536bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1537{
1538        bna_stats_mod_stop(&ioceth->bna->stats_mod);
1539        bna_enet_stop(&ioceth->bna->enet);
1540}
1541
1542static void
1543bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1544                                enum bna_ioceth_event event)
1545{
1546        switch (event) {
1547        case IOCETH_E_IOC_FAILED:
1548                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1549                disable_mbox_intr(ioceth);
1550                bna_enet_fail(&ioceth->bna->enet);
1551                bna_stats_mod_fail(&ioceth->bna->stats_mod);
1552                bfa_nw_ioc_disable(&ioceth->ioc);
1553                break;
1554
1555        case IOCETH_E_ENET_STOPPED:
1556                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1557                bfa_nw_ioc_disable(&ioceth->ioc);
1558                break;
1559
1560        default:
1561                bfa_sm_fault(event);
1562        }
1563}
1564
1565static void
1566bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1567{
1568}
1569
1570static void
1571bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1572                                enum bna_ioceth_event event)
1573{
1574        switch (event) {
1575        case IOCETH_E_IOC_DISABLED:
1576                disable_mbox_intr(ioceth);
1577                bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1578                break;
1579
1580        case IOCETH_E_ENET_STOPPED:
1581                /* This event is received due to enet failing */
1582                /* No-op */
1583                break;
1584
1585        default:
1586                bfa_sm_fault(event);
1587        }
1588}
1589
1590static void
1591bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1592{
1593        bnad_cb_ioceth_failed(ioceth->bna->bnad);
1594}
1595
1596static void
1597bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1598                        enum bna_ioceth_event event)
1599{
1600        switch (event) {
1601        case IOCETH_E_DISABLE:
1602                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1603                bfa_nw_ioc_disable(&ioceth->ioc);
1604                break;
1605
1606        case IOCETH_E_IOC_RESET:
1607                enable_mbox_intr(ioceth);
1608                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1609                break;
1610
1611        case IOCETH_E_IOC_FAILED:
1612                break;
1613
1614        default:
1615                bfa_sm_fault(event);
1616        }
1617}
1618
1619static void
1620bna_bfi_attr_get(struct bna_ioceth *ioceth)
1621{
1622        struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1623
1624        bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1625                BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1626        attr_req->mh.num_entries = htons(
1627        bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1628        bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1629                sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1630        bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1631}
1632
1633/* IOC callback functions */
1634
1635static void
1636bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1637{
1638        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1639
1640        if (error)
1641                bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1642        else
1643                bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1644}
1645
1646static void
1647bna_cb_ioceth_disable(void *arg)
1648{
1649        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1650
1651        bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1652}
1653
1654static void
1655bna_cb_ioceth_hbfail(void *arg)
1656{
1657        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1658
1659        bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1660}
1661
1662static void
1663bna_cb_ioceth_reset(void *arg)
1664{
1665        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1666
1667        bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1668}
1669
1670static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1671        .enable_cbfn = bna_cb_ioceth_enable,
1672        .disable_cbfn = bna_cb_ioceth_disable,
1673        .hbfail_cbfn = bna_cb_ioceth_hbfail,
1674        .reset_cbfn = bna_cb_ioceth_reset
1675};
1676
1677static void bna_attr_init(struct bna_ioceth *ioceth)
1678{
1679        ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1680        ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1681        ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1682        ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1683        ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1684        ioceth->attr.fw_query_complete = false;
1685}
1686
1687static void
1688bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1689                struct bna_res_info *res_info)
1690{
1691        u64 dma;
1692        u8 *kva;
1693
1694        ioceth->bna = bna;
1695
1696        /**
1697         * Attach IOC and claim:
1698         *      1. DMA memory for IOC attributes
1699         *      2. Kernel memory for FW trace
1700         */
1701        bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1702        bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1703
1704        BNA_GET_DMA_ADDR(
1705                &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1706        kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1707        bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1708
1709        kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1710        bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1711
1712        /**
1713         * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1714         * DMA memory.
1715         */
1716        BNA_GET_DMA_ADDR(
1717                &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1718        kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1719        bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1720        bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1721        kva += bfa_nw_cee_meminfo();
1722        dma += bfa_nw_cee_meminfo();
1723
1724        bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1725        bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1726        kva += bfa_nw_flash_meminfo();
1727        dma += bfa_nw_flash_meminfo();
1728
1729        bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1730        bfa_msgq_memclaim(&bna->msgq, kva, dma);
1731        bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1732        kva += bfa_msgq_meminfo();
1733        dma += bfa_msgq_meminfo();
1734
1735        ioceth->stop_cbfn = NULL;
1736        ioceth->stop_cbarg = NULL;
1737
1738        bna_attr_init(ioceth);
1739
1740        bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1741}
1742
1743static void
1744bna_ioceth_uninit(struct bna_ioceth *ioceth)
1745{
1746        bfa_nw_ioc_detach(&ioceth->ioc);
1747
1748        ioceth->bna = NULL;
1749}
1750
1751void
1752bna_ioceth_enable(struct bna_ioceth *ioceth)
1753{
1754        if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1755                bnad_cb_ioceth_ready(ioceth->bna->bnad);
1756                return;
1757        }
1758
1759        if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1760                bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1761}
1762
1763void
1764bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1765{
1766        if (type == BNA_SOFT_CLEANUP) {
1767                bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1768                return;
1769        }
1770
1771        ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1772        ioceth->stop_cbarg = ioceth->bna->bnad;
1773
1774        bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1775}
1776
1777static void
1778bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1779                  struct bna_res_info *res_info)
1780{
1781        int i;
1782
1783        ucam_mod->ucmac = (struct bna_mac *)
1784        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1785
1786        INIT_LIST_HEAD(&ucam_mod->free_q);
1787        for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
1788                list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1789
1790        /* A separate queue to allow synchronous setting of a list of MACs */
1791        INIT_LIST_HEAD(&ucam_mod->del_q);
1792        for (; i < (bna->ioceth.attr.num_ucmac * 2); i++)
1793                list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1794
1795        ucam_mod->bna = bna;
1796}
1797
1798static void
1799bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1800{
1801        ucam_mod->bna = NULL;
1802}
1803
1804static void
1805bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1806                  struct bna_res_info *res_info)
1807{
1808        int i;
1809
1810        mcam_mod->mcmac = (struct bna_mac *)
1811        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1812
1813        INIT_LIST_HEAD(&mcam_mod->free_q);
1814        for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
1815                list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1816
1817        mcam_mod->mchandle = (struct bna_mcam_handle *)
1818        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1819
1820        INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1821        for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
1822                list_add_tail(&mcam_mod->mchandle[i].qe,
1823                              &mcam_mod->free_handle_q);
1824
1825        /* A separate queue to allow synchronous setting of a list of MACs */
1826        INIT_LIST_HEAD(&mcam_mod->del_q);
1827        for (; i < (bna->ioceth.attr.num_mcmac * 2); i++)
1828                list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1829
1830        mcam_mod->bna = bna;
1831}
1832
1833static void
1834bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1835{
1836        mcam_mod->bna = NULL;
1837}
1838
1839static void
1840bna_bfi_stats_get(struct bna *bna)
1841{
1842        struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1843
1844        bna->stats_mod.stats_get_busy = true;
1845
1846        bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1847                BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1848        stats_req->mh.num_entries = htons(
1849                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1850        stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1851        stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1852        stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1853        stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1854        stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1855
1856        bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1857                sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1858        bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1859}
1860
1861void
1862bna_res_req(struct bna_res_info *res_info)
1863{
1864        /* DMA memory for COMMON_MODULE */
1865        res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1866        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1867        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1868        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1869                                (bfa_nw_cee_meminfo() +
1870                                 bfa_nw_flash_meminfo() +
1871                                 bfa_msgq_meminfo()), PAGE_SIZE);
1872
1873        /* DMA memory for retrieving IOC attributes */
1874        res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1875        res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1876        res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1877        res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1878                                ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1879
1880        /* Virtual memory for retreiving fw_trc */
1881        res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1882        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1883        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1884        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1885
1886        /* DMA memory for retreiving stats */
1887        res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1888        res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1889        res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1890        res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1891                                ALIGN(sizeof(struct bfi_enet_stats),
1892                                        PAGE_SIZE);
1893}
1894
1895void
1896bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1897{
1898        struct bna_attr *attr = &bna->ioceth.attr;
1899
1900        /* Virtual memory for Tx objects - stored by Tx module */
1901        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1902        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1903                BNA_MEM_T_KVA;
1904        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1905        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1906                attr->num_txq * sizeof(struct bna_tx);
1907
1908        /* Virtual memory for TxQ - stored by Tx module */
1909        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1910        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1911                BNA_MEM_T_KVA;
1912        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1913        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1914                attr->num_txq * sizeof(struct bna_txq);
1915
1916        /* Virtual memory for Rx objects - stored by Rx module */
1917        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1918        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1919                BNA_MEM_T_KVA;
1920        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1921        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1922                attr->num_rxp * sizeof(struct bna_rx);
1923
1924        /* Virtual memory for RxPath - stored by Rx module */
1925        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1926        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1927                BNA_MEM_T_KVA;
1928        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1929        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1930                attr->num_rxp * sizeof(struct bna_rxp);
1931
1932        /* Virtual memory for RxQ - stored by Rx module */
1933        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1934        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1935                BNA_MEM_T_KVA;
1936        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1937        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1938                (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1939
1940        /* Virtual memory for Unicast MAC address - stored by ucam module */
1941        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1942        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1943                BNA_MEM_T_KVA;
1944        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1945        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1946                (attr->num_ucmac * 2) * sizeof(struct bna_mac);
1947
1948        /* Virtual memory for Multicast MAC address - stored by mcam module */
1949        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1950        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1951                BNA_MEM_T_KVA;
1952        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1953        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1954                (attr->num_mcmac * 2) * sizeof(struct bna_mac);
1955
1956        /* Virtual memory for Multicast handle - stored by mcam module */
1957        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1958        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1959                BNA_MEM_T_KVA;
1960        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1961        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1962                attr->num_mcmac * sizeof(struct bna_mcam_handle);
1963}
1964
1965void
1966bna_init(struct bna *bna, struct bnad *bnad,
1967                struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1968{
1969        bna->bnad = bnad;
1970        bna->pcidev = *pcidev;
1971
1972        bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
1973                res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
1974        bna->stats.hw_stats_dma.msb =
1975                res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
1976        bna->stats.hw_stats_dma.lsb =
1977                res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
1978
1979        bna_reg_addr_init(bna, &bna->pcidev);
1980
1981        /* Also initializes diag, cee, sfp, phy_port, msgq */
1982        bna_ioceth_init(&bna->ioceth, bna, res_info);
1983
1984        bna_enet_init(&bna->enet, bna);
1985        bna_ethport_init(&bna->ethport, bna);
1986}
1987
1988void
1989bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
1990{
1991        bna_tx_mod_init(&bna->tx_mod, bna, res_info);
1992
1993        bna_rx_mod_init(&bna->rx_mod, bna, res_info);
1994
1995        bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
1996
1997        bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
1998
1999        bna->default_mode_rid = BFI_INVALID_RID;
2000        bna->promisc_rid = BFI_INVALID_RID;
2001
2002        bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2003}
2004
2005void
2006bna_uninit(struct bna *bna)
2007{
2008        if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2009                bna_mcam_mod_uninit(&bna->mcam_mod);
2010                bna_ucam_mod_uninit(&bna->ucam_mod);
2011                bna_rx_mod_uninit(&bna->rx_mod);
2012                bna_tx_mod_uninit(&bna->tx_mod);
2013                bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2014        }
2015
2016        bna_stats_mod_uninit(&bna->stats_mod);
2017        bna_ethport_uninit(&bna->ethport);
2018        bna_enet_uninit(&bna->enet);
2019
2020        bna_ioceth_uninit(&bna->ioceth);
2021
2022        bna->bnad = NULL;
2023}
2024
2025int
2026bna_num_txq_set(struct bna *bna, int num_txq)
2027{
2028        if (bna->ioceth.attr.fw_query_complete &&
2029                (num_txq <= bna->ioceth.attr.num_txq)) {
2030                bna->ioceth.attr.num_txq = num_txq;
2031                return BNA_CB_SUCCESS;
2032        }
2033
2034        return BNA_CB_FAIL;
2035}
2036
2037int
2038bna_num_rxp_set(struct bna *bna, int num_rxp)
2039{
2040        if (bna->ioceth.attr.fw_query_complete &&
2041                (num_rxp <= bna->ioceth.attr.num_rxp)) {
2042                bna->ioceth.attr.num_rxp = num_rxp;
2043                return BNA_CB_SUCCESS;
2044        }
2045
2046        return BNA_CB_FAIL;
2047}
2048
2049struct bna_mac *
2050bna_cam_mod_mac_get(struct list_head *head)
2051{
2052        struct bna_mac *mac;
2053
2054        mac = list_first_entry_or_null(head, struct bna_mac, qe);
2055        if (mac)
2056                list_del(&mac->qe);
2057
2058        return mac;
2059}
2060
2061struct bna_mcam_handle *
2062bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2063{
2064        struct bna_mcam_handle *handle;
2065
2066        handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
2067                                          struct bna_mcam_handle, qe);
2068        if (handle)
2069                list_del(&handle->qe);
2070
2071        return handle;
2072}
2073
2074void
2075bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2076                        struct bna_mcam_handle *handle)
2077{
2078        list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2079}
2080
2081void
2082bna_hw_stats_get(struct bna *bna)
2083{
2084        if (!bna->stats_mod.ioc_ready) {
2085                bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2086                return;
2087        }
2088        if (bna->stats_mod.stats_get_busy) {
2089                bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2090                return;
2091        }
2092
2093        bna_bfi_stats_get(bna);
2094}
2095