linux/drivers/net/ethernet/brocade/bna/bna_enet.c
<<
>>
Prefs
   1/*
   2 * Linux network driver for QLogic BR-series Converged Network Adapter.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License (GPL) Version 2 as
   6 * published by the Free Software Foundation
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 */
  13/*
  14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  15 * Copyright (c) 2014-2015 QLogic Corporation
  16 * All rights reserved
  17 * www.qlogic.com
  18 */
  19#include "bna.h"
  20
  21static inline int
  22ethport_can_be_up(struct bna_ethport *ethport)
  23{
  24        int ready = 0;
  25        if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  26                ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  27                         (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  28                         (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  29        else
  30                ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  31                         (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  32                         !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  33        return ready;
  34}
  35
  36#define ethport_is_up ethport_can_be_up
  37
  38enum bna_ethport_event {
  39        ETHPORT_E_START                 = 1,
  40        ETHPORT_E_STOP                  = 2,
  41        ETHPORT_E_FAIL                  = 3,
  42        ETHPORT_E_UP                    = 4,
  43        ETHPORT_E_DOWN                  = 5,
  44        ETHPORT_E_FWRESP_UP_OK          = 6,
  45        ETHPORT_E_FWRESP_DOWN           = 7,
  46        ETHPORT_E_FWRESP_UP_FAIL        = 8,
  47};
  48
  49enum bna_enet_event {
  50        ENET_E_START                    = 1,
  51        ENET_E_STOP                     = 2,
  52        ENET_E_FAIL                     = 3,
  53        ENET_E_PAUSE_CFG                = 4,
  54        ENET_E_MTU_CFG                  = 5,
  55        ENET_E_FWRESP_PAUSE             = 6,
  56        ENET_E_CHLD_STOPPED             = 7,
  57};
  58
  59enum bna_ioceth_event {
  60        IOCETH_E_ENABLE                 = 1,
  61        IOCETH_E_DISABLE                = 2,
  62        IOCETH_E_IOC_RESET              = 3,
  63        IOCETH_E_IOC_FAILED             = 4,
  64        IOCETH_E_IOC_READY              = 5,
  65        IOCETH_E_ENET_ATTR_RESP         = 6,
  66        IOCETH_E_ENET_STOPPED           = 7,
  67        IOCETH_E_IOC_DISABLED           = 8,
  68};
  69
  70#define bna_stats_copy(_name, _type)                                    \
  71do {                                                                    \
  72        count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64);  \
  73        stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats;   \
  74        stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats;        \
  75        for (i = 0; i < count; i++)                                     \
  76                stats_dst[i] = be64_to_cpu(stats_src[i]);               \
  77} while (0)                                                             \
  78
  79/*
  80 * FW response handlers
  81 */
  82
  83static void
  84bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
  85                                struct bfi_msgq_mhdr *msghdr)
  86{
  87        ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  88
  89        if (ethport_can_be_up(ethport))
  90                bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  91}
  92
  93static void
  94bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
  95                                struct bfi_msgq_mhdr *msghdr)
  96{
  97        int ethport_up = ethport_is_up(ethport);
  98
  99        ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
 100
 101        if (ethport_up)
 102                bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
 103}
 104
 105static void
 106bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
 107                                struct bfi_msgq_mhdr *msghdr)
 108{
 109        struct bfi_enet_enable_req *admin_req =
 110                &ethport->bfi_enet_cmd.admin_req;
 111        struct bfi_enet_rsp *rsp =
 112                container_of(msghdr, struct bfi_enet_rsp, mh);
 113
 114        switch (admin_req->enable) {
 115        case BNA_STATUS_T_ENABLED:
 116                if (rsp->error == BFI_ENET_CMD_OK)
 117                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
 118                else {
 119                        ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
 120                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
 121                }
 122                break;
 123
 124        case BNA_STATUS_T_DISABLED:
 125                bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
 126                ethport->link_status = BNA_LINK_DOWN;
 127                ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
 128                break;
 129        }
 130}
 131
 132static void
 133bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
 134                                struct bfi_msgq_mhdr *msghdr)
 135{
 136        struct bfi_enet_diag_lb_req *diag_lb_req =
 137                &ethport->bfi_enet_cmd.lpbk_req;
 138        struct bfi_enet_rsp *rsp =
 139                container_of(msghdr, struct bfi_enet_rsp, mh);
 140
 141        switch (diag_lb_req->enable) {
 142        case BNA_STATUS_T_ENABLED:
 143                if (rsp->error == BFI_ENET_CMD_OK)
 144                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
 145                else {
 146                        ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
 147                        bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
 148                }
 149                break;
 150
 151        case BNA_STATUS_T_DISABLED:
 152                bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
 153                break;
 154        }
 155}
 156
 157static void
 158bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
 159{
 160        bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
 161}
 162
 163static void
 164bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
 165                        struct bfi_msgq_mhdr *msghdr)
 166{
 167        struct bfi_enet_attr_rsp *rsp =
 168                container_of(msghdr, struct bfi_enet_attr_rsp, mh);
 169
 170        /**
 171         * Store only if not set earlier, since BNAD can override the HW
 172         * attributes
 173         */
 174        if (!ioceth->attr.fw_query_complete) {
 175                ioceth->attr.num_txq = ntohl(rsp->max_cfg);
 176                ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
 177                ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
 178                ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
 179                ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
 180                ioceth->attr.fw_query_complete = true;
 181        }
 182
 183        bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
 184}
 185
 186static void
 187bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
 188{
 189        struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
 190        u64 *stats_src;
 191        u64 *stats_dst;
 192        u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
 193        u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
 194        int count;
 195        int i;
 196
 197        bna_stats_copy(mac, mac);
 198        bna_stats_copy(bpc, bpc);
 199        bna_stats_copy(rad, rad);
 200        bna_stats_copy(rlb, rad);
 201        bna_stats_copy(fc_rx, fc_rx);
 202        bna_stats_copy(fc_tx, fc_tx);
 203
 204        stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
 205
 206        /* Copy Rxf stats to SW area, scatter them while copying */
 207        for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
 208                stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
 209                memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
 210                if (rx_enet_mask & BIT(i)) {
 211                        int k;
 212                        count = sizeof(struct bfi_enet_stats_rxf) /
 213                                sizeof(u64);
 214                        for (k = 0; k < count; k++) {
 215                                stats_dst[k] = be64_to_cpu(*stats_src);
 216                                stats_src++;
 217                        }
 218                }
 219        }
 220
 221        /* Copy Txf stats to SW area, scatter them while copying */
 222        for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
 223                stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
 224                memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
 225                if (tx_enet_mask & BIT(i)) {
 226                        int k;
 227                        count = sizeof(struct bfi_enet_stats_txf) /
 228                                sizeof(u64);
 229                        for (k = 0; k < count; k++) {
 230                                stats_dst[k] = be64_to_cpu(*stats_src);
 231                                stats_src++;
 232                        }
 233                }
 234        }
 235
 236        bna->stats_mod.stats_get_busy = false;
 237        bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
 238}
 239
 240static void
 241bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
 242                        struct bfi_msgq_mhdr *msghdr)
 243{
 244        ethport->link_status = BNA_LINK_UP;
 245
 246        /* Dispatch events */
 247        ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
 248}
 249
 250static void
 251bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
 252                                struct bfi_msgq_mhdr *msghdr)
 253{
 254        ethport->link_status = BNA_LINK_DOWN;
 255
 256        /* Dispatch events */
 257        ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
 258}
 259
 260static void
 261bna_err_handler(struct bna *bna, u32 intr_status)
 262{
 263        if (BNA_IS_HALT_INTR(bna, intr_status))
 264                bna_halt_clear(bna);
 265
 266        bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
 267}
 268
 269void
 270bna_mbox_handler(struct bna *bna, u32 intr_status)
 271{
 272        if (BNA_IS_ERR_INTR(bna, intr_status)) {
 273                bna_err_handler(bna, intr_status);
 274                return;
 275        }
 276        if (BNA_IS_MBOX_INTR(bna, intr_status))
 277                bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
 278}
 279
 280static void
 281bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
 282{
 283        struct bna *bna = (struct bna *)arg;
 284        struct bna_tx *tx;
 285        struct bna_rx *rx;
 286
 287        switch (msghdr->msg_id) {
 288        case BFI_ENET_I2H_RX_CFG_SET_RSP:
 289                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 290                if (rx)
 291                        bna_bfi_rx_enet_start_rsp(rx, msghdr);
 292                break;
 293
 294        case BFI_ENET_I2H_RX_CFG_CLR_RSP:
 295                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 296                if (rx)
 297                        bna_bfi_rx_enet_stop_rsp(rx, msghdr);
 298                break;
 299
 300        case BFI_ENET_I2H_RIT_CFG_RSP:
 301        case BFI_ENET_I2H_RSS_CFG_RSP:
 302        case BFI_ENET_I2H_RSS_ENABLE_RSP:
 303        case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
 304        case BFI_ENET_I2H_RX_DEFAULT_RSP:
 305        case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
 306        case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
 307        case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
 308        case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
 309        case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
 310        case BFI_ENET_I2H_RX_VLAN_SET_RSP:
 311        case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
 312                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 313                if (rx)
 314                        bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
 315                break;
 316
 317        case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
 318                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 319                if (rx)
 320                        bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
 321                break;
 322
 323        case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
 324                bna_rx_from_rid(bna, msghdr->enet_id, rx);
 325                if (rx)
 326                        bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
 327                break;
 328
 329        case BFI_ENET_I2H_TX_CFG_SET_RSP:
 330                bna_tx_from_rid(bna, msghdr->enet_id, tx);
 331                if (tx)
 332                        bna_bfi_tx_enet_start_rsp(tx, msghdr);
 333                break;
 334
 335        case BFI_ENET_I2H_TX_CFG_CLR_RSP:
 336                bna_tx_from_rid(bna, msghdr->enet_id, tx);
 337                if (tx)
 338                        bna_bfi_tx_enet_stop_rsp(tx, msghdr);
 339                break;
 340
 341        case BFI_ENET_I2H_PORT_ADMIN_RSP:
 342                bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
 343                break;
 344
 345        case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
 346                bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
 347                break;
 348
 349        case BFI_ENET_I2H_SET_PAUSE_RSP:
 350                bna_bfi_pause_set_rsp(&bna->enet, msghdr);
 351                break;
 352
 353        case BFI_ENET_I2H_GET_ATTR_RSP:
 354                bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
 355                break;
 356
 357        case BFI_ENET_I2H_STATS_GET_RSP:
 358                bna_bfi_stats_get_rsp(bna, msghdr);
 359                break;
 360
 361        case BFI_ENET_I2H_STATS_CLR_RSP:
 362                /* No-op */
 363                break;
 364
 365        case BFI_ENET_I2H_LINK_UP_AEN:
 366                bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
 367                break;
 368
 369        case BFI_ENET_I2H_LINK_DOWN_AEN:
 370                bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
 371                break;
 372
 373        case BFI_ENET_I2H_PORT_ENABLE_AEN:
 374                bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
 375                break;
 376
 377        case BFI_ENET_I2H_PORT_DISABLE_AEN:
 378                bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
 379                break;
 380
 381        case BFI_ENET_I2H_BW_UPDATE_AEN:
 382                bna_bfi_bw_update_aen(&bna->tx_mod);
 383                break;
 384
 385        default:
 386                break;
 387        }
 388}
 389
 390/* ETHPORT */
 391
 392#define call_ethport_stop_cbfn(_ethport)                                \
 393do {                                                                    \
 394        if ((_ethport)->stop_cbfn) {                                    \
 395                void (*cbfn)(struct bna_enet *);                        \
 396                cbfn = (_ethport)->stop_cbfn;                           \
 397                (_ethport)->stop_cbfn = NULL;                           \
 398                cbfn(&(_ethport)->bna->enet);                           \
 399        }                                                               \
 400} while (0)
 401
 402#define call_ethport_adminup_cbfn(ethport, status)                      \
 403do {                                                                    \
 404        if ((ethport)->adminup_cbfn) {                                  \
 405                void (*cbfn)(struct bnad *, enum bna_cb_status);        \
 406                cbfn = (ethport)->adminup_cbfn;                         \
 407                (ethport)->adminup_cbfn = NULL;                         \
 408                cbfn((ethport)->bna->bnad, status);                     \
 409        }                                                               \
 410} while (0)
 411
 412static void
 413bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
 414{
 415        struct bfi_enet_enable_req *admin_up_req =
 416                &ethport->bfi_enet_cmd.admin_req;
 417
 418        bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
 419                BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
 420        admin_up_req->mh.num_entries = htons(
 421                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 422        admin_up_req->enable = BNA_STATUS_T_ENABLED;
 423
 424        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 425                sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
 426        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 427}
 428
 429static void
 430bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
 431{
 432        struct bfi_enet_enable_req *admin_down_req =
 433                &ethport->bfi_enet_cmd.admin_req;
 434
 435        bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
 436                BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
 437        admin_down_req->mh.num_entries = htons(
 438                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 439        admin_down_req->enable = BNA_STATUS_T_DISABLED;
 440
 441        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 442                sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
 443        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 444}
 445
 446static void
 447bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
 448{
 449        struct bfi_enet_diag_lb_req *lpbk_up_req =
 450                &ethport->bfi_enet_cmd.lpbk_req;
 451
 452        bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
 453                BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
 454        lpbk_up_req->mh.num_entries = htons(
 455                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
 456        lpbk_up_req->mode = (ethport->bna->enet.type ==
 457                                BNA_ENET_T_LOOPBACK_INTERNAL) ?
 458                                BFI_ENET_DIAG_LB_OPMODE_EXT :
 459                                BFI_ENET_DIAG_LB_OPMODE_CBL;
 460        lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
 461
 462        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 463                sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
 464        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 465}
 466
 467static void
 468bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
 469{
 470        struct bfi_enet_diag_lb_req *lpbk_down_req =
 471                &ethport->bfi_enet_cmd.lpbk_req;
 472
 473        bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
 474                BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
 475        lpbk_down_req->mh.num_entries = htons(
 476                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
 477        lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
 478
 479        bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
 480                sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
 481        bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
 482}
 483
 484static void
 485bna_bfi_ethport_up(struct bna_ethport *ethport)
 486{
 487        if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
 488                bna_bfi_ethport_admin_up(ethport);
 489        else
 490                bna_bfi_ethport_lpbk_up(ethport);
 491}
 492
 493static void
 494bna_bfi_ethport_down(struct bna_ethport *ethport)
 495{
 496        if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
 497                bna_bfi_ethport_admin_down(ethport);
 498        else
 499                bna_bfi_ethport_lpbk_down(ethport);
 500}
 501
 502bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
 503                        enum bna_ethport_event);
 504bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
 505                        enum bna_ethport_event);
 506bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
 507                        enum bna_ethport_event);
 508bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
 509                        enum bna_ethport_event);
 510bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
 511                        enum bna_ethport_event);
 512bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
 513                        enum bna_ethport_event);
 514
 515static void
 516bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
 517{
 518        call_ethport_stop_cbfn(ethport);
 519}
 520
 521static void
 522bna_ethport_sm_stopped(struct bna_ethport *ethport,
 523                        enum bna_ethport_event event)
 524{
 525        switch (event) {
 526        case ETHPORT_E_START:
 527                bfa_fsm_set_state(ethport, bna_ethport_sm_down);
 528                break;
 529
 530        case ETHPORT_E_STOP:
 531                call_ethport_stop_cbfn(ethport);
 532                break;
 533
 534        case ETHPORT_E_FAIL:
 535                /* No-op */
 536                break;
 537
 538        case ETHPORT_E_DOWN:
 539                /* This event is received due to Rx objects failing */
 540                /* No-op */
 541                break;
 542
 543        default:
 544                bfa_sm_fault(event);
 545        }
 546}
 547
 548static void
 549bna_ethport_sm_down_entry(struct bna_ethport *ethport)
 550{
 551}
 552
 553static void
 554bna_ethport_sm_down(struct bna_ethport *ethport,
 555                        enum bna_ethport_event event)
 556{
 557        switch (event) {
 558        case ETHPORT_E_STOP:
 559                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 560                break;
 561
 562        case ETHPORT_E_FAIL:
 563                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 564                break;
 565
 566        case ETHPORT_E_UP:
 567                bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
 568                bna_bfi_ethport_up(ethport);
 569                break;
 570
 571        default:
 572                bfa_sm_fault(event);
 573        }
 574}
 575
 576static void
 577bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
 578{
 579}
 580
 581static void
 582bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
 583                        enum bna_ethport_event event)
 584{
 585        switch (event) {
 586        case ETHPORT_E_STOP:
 587                bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
 588                break;
 589
 590        case ETHPORT_E_FAIL:
 591                call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
 592                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 593                break;
 594
 595        case ETHPORT_E_DOWN:
 596                call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
 597                bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
 598                break;
 599
 600        case ETHPORT_E_FWRESP_UP_OK:
 601                call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
 602                bfa_fsm_set_state(ethport, bna_ethport_sm_up);
 603                break;
 604
 605        case ETHPORT_E_FWRESP_UP_FAIL:
 606                call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
 607                bfa_fsm_set_state(ethport, bna_ethport_sm_down);
 608                break;
 609
 610        case ETHPORT_E_FWRESP_DOWN:
 611                /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
 612                bna_bfi_ethport_up(ethport);
 613                break;
 614
 615        default:
 616                bfa_sm_fault(event);
 617        }
 618}
 619
 620static void
 621bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
 622{
 623        /**
 624         * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
 625         * mbox due to up_resp_wait -> down_resp_wait transition on event
 626         * ETHPORT_E_DOWN
 627         */
 628}
 629
 630static void
 631bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
 632                        enum bna_ethport_event event)
 633{
 634        switch (event) {
 635        case ETHPORT_E_STOP:
 636                bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
 637                break;
 638
 639        case ETHPORT_E_FAIL:
 640                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 641                break;
 642
 643        case ETHPORT_E_UP:
 644                bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
 645                break;
 646
 647        case ETHPORT_E_FWRESP_UP_OK:
 648                /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
 649                bna_bfi_ethport_down(ethport);
 650                break;
 651
 652        case ETHPORT_E_FWRESP_UP_FAIL:
 653        case ETHPORT_E_FWRESP_DOWN:
 654                bfa_fsm_set_state(ethport, bna_ethport_sm_down);
 655                break;
 656
 657        default:
 658                bfa_sm_fault(event);
 659        }
 660}
 661
 662static void
 663bna_ethport_sm_up_entry(struct bna_ethport *ethport)
 664{
 665}
 666
 667static void
 668bna_ethport_sm_up(struct bna_ethport *ethport,
 669                        enum bna_ethport_event event)
 670{
 671        switch (event) {
 672        case ETHPORT_E_STOP:
 673                bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
 674                bna_bfi_ethport_down(ethport);
 675                break;
 676
 677        case ETHPORT_E_FAIL:
 678                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 679                break;
 680
 681        case ETHPORT_E_DOWN:
 682                bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
 683                bna_bfi_ethport_down(ethport);
 684                break;
 685
 686        default:
 687                bfa_sm_fault(event);
 688        }
 689}
 690
 691static void
 692bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
 693{
 694}
 695
 696static void
 697bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
 698                        enum bna_ethport_event event)
 699{
 700        switch (event) {
 701        case ETHPORT_E_FAIL:
 702                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 703                break;
 704
 705        case ETHPORT_E_DOWN:
 706                /**
 707                 * This event is received due to Rx objects stopping in
 708                 * parallel to ethport
 709                 */
 710                /* No-op */
 711                break;
 712
 713        case ETHPORT_E_FWRESP_UP_OK:
 714                /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
 715                bna_bfi_ethport_down(ethport);
 716                break;
 717
 718        case ETHPORT_E_FWRESP_UP_FAIL:
 719        case ETHPORT_E_FWRESP_DOWN:
 720                bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 721                break;
 722
 723        default:
 724                bfa_sm_fault(event);
 725        }
 726}
 727
 728static void
 729bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
 730{
 731        ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
 732        ethport->bna = bna;
 733
 734        ethport->link_status = BNA_LINK_DOWN;
 735        ethport->link_cbfn = bnad_cb_ethport_link_status;
 736
 737        ethport->rx_started_count = 0;
 738
 739        ethport->stop_cbfn = NULL;
 740        ethport->adminup_cbfn = NULL;
 741
 742        bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
 743}
 744
 745static void
 746bna_ethport_uninit(struct bna_ethport *ethport)
 747{
 748        ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
 749        ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
 750
 751        ethport->bna = NULL;
 752}
 753
 754static void
 755bna_ethport_start(struct bna_ethport *ethport)
 756{
 757        bfa_fsm_send_event(ethport, ETHPORT_E_START);
 758}
 759
 760static void
 761bna_enet_cb_ethport_stopped(struct bna_enet *enet)
 762{
 763        bfa_wc_down(&enet->chld_stop_wc);
 764}
 765
 766static void
 767bna_ethport_stop(struct bna_ethport *ethport)
 768{
 769        ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
 770        bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
 771}
 772
 773static void
 774bna_ethport_fail(struct bna_ethport *ethport)
 775{
 776        /* Reset the physical port status to enabled */
 777        ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
 778
 779        if (ethport->link_status != BNA_LINK_DOWN) {
 780                ethport->link_status = BNA_LINK_DOWN;
 781                ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
 782        }
 783        bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
 784}
 785
 786/* Should be called only when ethport is disabled */
 787void
 788bna_ethport_cb_rx_started(struct bna_ethport *ethport)
 789{
 790        ethport->rx_started_count++;
 791
 792        if (ethport->rx_started_count == 1) {
 793                ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
 794
 795                if (ethport_can_be_up(ethport))
 796                        bfa_fsm_send_event(ethport, ETHPORT_E_UP);
 797        }
 798}
 799
 800void
 801bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
 802{
 803        int ethport_up = ethport_is_up(ethport);
 804
 805        ethport->rx_started_count--;
 806
 807        if (ethport->rx_started_count == 0) {
 808                ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
 809
 810                if (ethport_up)
 811                        bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
 812        }
 813}
 814
 815/* ENET */
 816
 817#define bna_enet_chld_start(enet)                                       \
 818do {                                                                    \
 819        enum bna_tx_type tx_type =                                      \
 820                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 821                BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
 822        enum bna_rx_type rx_type =                                      \
 823                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 824                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 825        bna_ethport_start(&(enet)->bna->ethport);                       \
 826        bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type);                \
 827        bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
 828} while (0)
 829
 830#define bna_enet_chld_stop(enet)                                        \
 831do {                                                                    \
 832        enum bna_tx_type tx_type =                                      \
 833                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 834                BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
 835        enum bna_rx_type rx_type =                                      \
 836                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 837                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 838        bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
 839        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 840        bna_ethport_stop(&(enet)->bna->ethport);                        \
 841        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 842        bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type);                 \
 843        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 844        bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
 845        bfa_wc_wait(&(enet)->chld_stop_wc);                             \
 846} while (0)
 847
 848#define bna_enet_chld_fail(enet)                                        \
 849do {                                                                    \
 850        bna_ethport_fail(&(enet)->bna->ethport);                        \
 851        bna_tx_mod_fail(&(enet)->bna->tx_mod);                          \
 852        bna_rx_mod_fail(&(enet)->bna->rx_mod);                          \
 853} while (0)
 854
 855#define bna_enet_rx_start(enet)                                         \
 856do {                                                                    \
 857        enum bna_rx_type rx_type =                                      \
 858                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 859                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 860        bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
 861} while (0)
 862
 863#define bna_enet_rx_stop(enet)                                          \
 864do {                                                                    \
 865        enum bna_rx_type rx_type =                                      \
 866                ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
 867                BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
 868        bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
 869        bfa_wc_up(&(enet)->chld_stop_wc);                               \
 870        bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
 871        bfa_wc_wait(&(enet)->chld_stop_wc);                             \
 872} while (0)
 873
 874#define call_enet_stop_cbfn(enet)                                       \
 875do {                                                                    \
 876        if ((enet)->stop_cbfn) {                                        \
 877                void (*cbfn)(void *);                                   \
 878                void *cbarg;                                            \
 879                cbfn = (enet)->stop_cbfn;                               \
 880                cbarg = (enet)->stop_cbarg;                             \
 881                (enet)->stop_cbfn = NULL;                               \
 882                (enet)->stop_cbarg = NULL;                              \
 883                cbfn(cbarg);                                            \
 884        }                                                               \
 885} while (0)
 886
 887#define call_enet_mtu_cbfn(enet)                                        \
 888do {                                                                    \
 889        if ((enet)->mtu_cbfn) {                                         \
 890                void (*cbfn)(struct bnad *);                            \
 891                cbfn = (enet)->mtu_cbfn;                                \
 892                (enet)->mtu_cbfn = NULL;                                \
 893                cbfn((enet)->bna->bnad);                                \
 894        }                                                               \
 895} while (0)
 896
 897static void bna_enet_cb_chld_stopped(void *arg);
 898static void bna_bfi_pause_set(struct bna_enet *enet);
 899
 900bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
 901                        enum bna_enet_event);
 902bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
 903                        enum bna_enet_event);
 904bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
 905                        enum bna_enet_event);
 906bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
 907                        enum bna_enet_event);
 908bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
 909                        enum bna_enet_event);
 910bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
 911                        enum bna_enet_event);
 912bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
 913                        enum bna_enet_event);
 914
 915static void
 916bna_enet_sm_stopped_entry(struct bna_enet *enet)
 917{
 918        call_enet_mtu_cbfn(enet);
 919        call_enet_stop_cbfn(enet);
 920}
 921
 922static void
 923bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
 924{
 925        switch (event) {
 926        case ENET_E_START:
 927                bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
 928                break;
 929
 930        case ENET_E_STOP:
 931                call_enet_stop_cbfn(enet);
 932                break;
 933
 934        case ENET_E_FAIL:
 935                /* No-op */
 936                break;
 937
 938        case ENET_E_PAUSE_CFG:
 939                break;
 940
 941        case ENET_E_MTU_CFG:
 942                call_enet_mtu_cbfn(enet);
 943                break;
 944
 945        case ENET_E_CHLD_STOPPED:
 946                /**
 947                 * This event is received due to Ethport, Tx and Rx objects
 948                 * failing
 949                 */
 950                /* No-op */
 951                break;
 952
 953        default:
 954                bfa_sm_fault(event);
 955        }
 956}
 957
 958static void
 959bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
 960{
 961        bna_bfi_pause_set(enet);
 962}
 963
 964static void
 965bna_enet_sm_pause_init_wait(struct bna_enet *enet,
 966                                enum bna_enet_event event)
 967{
 968        switch (event) {
 969        case ENET_E_STOP:
 970                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
 971                bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
 972                break;
 973
 974        case ENET_E_FAIL:
 975                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
 976                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
 977                break;
 978
 979        case ENET_E_PAUSE_CFG:
 980                enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
 981                break;
 982
 983        case ENET_E_MTU_CFG:
 984                /* No-op */
 985                break;
 986
 987        case ENET_E_FWRESP_PAUSE:
 988                if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
 989                        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
 990                        bna_bfi_pause_set(enet);
 991                } else {
 992                        bfa_fsm_set_state(enet, bna_enet_sm_started);
 993                        bna_enet_chld_start(enet);
 994                }
 995                break;
 996
 997        default:
 998                bfa_sm_fault(event);
 999        }
1000}
1001
1002static void
1003bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1004{
1005        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1006}
1007
1008static void
1009bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1010                                enum bna_enet_event event)
1011{
1012        switch (event) {
1013        case ENET_E_FAIL:
1014        case ENET_E_FWRESP_PAUSE:
1015                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1016                break;
1017
1018        default:
1019                bfa_sm_fault(event);
1020        }
1021}
1022
1023static void
1024bna_enet_sm_started_entry(struct bna_enet *enet)
1025{
1026        /**
1027         * NOTE: Do not call bna_enet_chld_start() here, since it will be
1028         * inadvertently called during cfg_wait->started transition as well
1029         */
1030        call_enet_mtu_cbfn(enet);
1031}
1032
1033static void
1034bna_enet_sm_started(struct bna_enet *enet,
1035                        enum bna_enet_event event)
1036{
1037        switch (event) {
1038        case ENET_E_STOP:
1039                bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1040                break;
1041
1042        case ENET_E_FAIL:
1043                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1044                bna_enet_chld_fail(enet);
1045                break;
1046
1047        case ENET_E_PAUSE_CFG:
1048                bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1049                bna_bfi_pause_set(enet);
1050                break;
1051
1052        case ENET_E_MTU_CFG:
1053                bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1054                bna_enet_rx_stop(enet);
1055                break;
1056
1057        default:
1058                bfa_sm_fault(event);
1059        }
1060}
1061
1062static void
1063bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1064{
1065}
1066
1067static void
1068bna_enet_sm_cfg_wait(struct bna_enet *enet,
1069                        enum bna_enet_event event)
1070{
1071        switch (event) {
1072        case ENET_E_STOP:
1073                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1074                enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1075                bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1076                break;
1077
1078        case ENET_E_FAIL:
1079                enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1080                enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1081                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1082                bna_enet_chld_fail(enet);
1083                break;
1084
1085        case ENET_E_PAUSE_CFG:
1086                enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1087                break;
1088
1089        case ENET_E_MTU_CFG:
1090                enet->flags |= BNA_ENET_F_MTU_CHANGED;
1091                break;
1092
1093        case ENET_E_CHLD_STOPPED:
1094                bna_enet_rx_start(enet);
1095                /* Fall through */
1096        case ENET_E_FWRESP_PAUSE:
1097                if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1098                        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1099                        bna_bfi_pause_set(enet);
1100                } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1101                        enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1102                        bna_enet_rx_stop(enet);
1103                } else {
1104                        bfa_fsm_set_state(enet, bna_enet_sm_started);
1105                }
1106                break;
1107
1108        default:
1109                bfa_sm_fault(event);
1110        }
1111}
1112
1113static void
1114bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1115{
1116        enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1117        enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1118}
1119
1120static void
1121bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1122                                enum bna_enet_event event)
1123{
1124        switch (event) {
1125        case ENET_E_FAIL:
1126                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1127                bna_enet_chld_fail(enet);
1128                break;
1129
1130        case ENET_E_FWRESP_PAUSE:
1131        case ENET_E_CHLD_STOPPED:
1132                bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1133                break;
1134
1135        default:
1136                bfa_sm_fault(event);
1137        }
1138}
1139
1140static void
1141bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1142{
1143        bna_enet_chld_stop(enet);
1144}
1145
1146static void
1147bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1148                                enum bna_enet_event event)
1149{
1150        switch (event) {
1151        case ENET_E_FAIL:
1152                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1153                bna_enet_chld_fail(enet);
1154                break;
1155
1156        case ENET_E_CHLD_STOPPED:
1157                bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1158                break;
1159
1160        default:
1161                bfa_sm_fault(event);
1162        }
1163}
1164
1165static void
1166bna_bfi_pause_set(struct bna_enet *enet)
1167{
1168        struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1169
1170        bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1171                BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1172        pause_req->mh.num_entries = htons(
1173        bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1174        pause_req->tx_pause = enet->pause_config.tx_pause;
1175        pause_req->rx_pause = enet->pause_config.rx_pause;
1176
1177        bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1178                sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1179        bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1180}
1181
1182static void
1183bna_enet_cb_chld_stopped(void *arg)
1184{
1185        struct bna_enet *enet = (struct bna_enet *)arg;
1186
1187        bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1188}
1189
1190static void
1191bna_enet_init(struct bna_enet *enet, struct bna *bna)
1192{
1193        enet->bna = bna;
1194        enet->flags = 0;
1195        enet->mtu = 0;
1196        enet->type = BNA_ENET_T_REGULAR;
1197
1198        enet->stop_cbfn = NULL;
1199        enet->stop_cbarg = NULL;
1200
1201        enet->mtu_cbfn = NULL;
1202
1203        bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1204}
1205
1206static void
1207bna_enet_uninit(struct bna_enet *enet)
1208{
1209        enet->flags = 0;
1210
1211        enet->bna = NULL;
1212}
1213
1214static void
1215bna_enet_start(struct bna_enet *enet)
1216{
1217        enet->flags |= BNA_ENET_F_IOCETH_READY;
1218        if (enet->flags & BNA_ENET_F_ENABLED)
1219                bfa_fsm_send_event(enet, ENET_E_START);
1220}
1221
1222static void
1223bna_ioceth_cb_enet_stopped(void *arg)
1224{
1225        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1226
1227        bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1228}
1229
1230static void
1231bna_enet_stop(struct bna_enet *enet)
1232{
1233        enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1234        enet->stop_cbarg = &enet->bna->ioceth;
1235
1236        enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1237        bfa_fsm_send_event(enet, ENET_E_STOP);
1238}
1239
1240static void
1241bna_enet_fail(struct bna_enet *enet)
1242{
1243        enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1244        bfa_fsm_send_event(enet, ENET_E_FAIL);
1245}
1246
1247void
1248bna_enet_cb_tx_stopped(struct bna_enet *enet)
1249{
1250        bfa_wc_down(&enet->chld_stop_wc);
1251}
1252
1253void
1254bna_enet_cb_rx_stopped(struct bna_enet *enet)
1255{
1256        bfa_wc_down(&enet->chld_stop_wc);
1257}
1258
1259int
1260bna_enet_mtu_get(struct bna_enet *enet)
1261{
1262        return enet->mtu;
1263}
1264
1265void
1266bna_enet_enable(struct bna_enet *enet)
1267{
1268        if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1269                return;
1270
1271        enet->flags |= BNA_ENET_F_ENABLED;
1272
1273        if (enet->flags & BNA_ENET_F_IOCETH_READY)
1274                bfa_fsm_send_event(enet, ENET_E_START);
1275}
1276
1277void
1278bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1279                 void (*cbfn)(void *))
1280{
1281        if (type == BNA_SOFT_CLEANUP) {
1282                (*cbfn)(enet->bna->bnad);
1283                return;
1284        }
1285
1286        enet->stop_cbfn = cbfn;
1287        enet->stop_cbarg = enet->bna->bnad;
1288
1289        enet->flags &= ~BNA_ENET_F_ENABLED;
1290
1291        bfa_fsm_send_event(enet, ENET_E_STOP);
1292}
1293
1294void
1295bna_enet_pause_config(struct bna_enet *enet,
1296                      struct bna_pause_config *pause_config)
1297{
1298        enet->pause_config = *pause_config;
1299
1300        bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1301}
1302
1303void
1304bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1305                 void (*cbfn)(struct bnad *))
1306{
1307        enet->mtu = mtu;
1308
1309        enet->mtu_cbfn = cbfn;
1310
1311        bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1312}
1313
1314void
1315bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
1316{
1317        bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
1318}
1319
1320/* IOCETH */
1321
1322#define enable_mbox_intr(_ioceth)                                       \
1323do {                                                                    \
1324        u32 intr_status;                                                \
1325        bna_intr_status_get((_ioceth)->bna, intr_status);               \
1326        bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad);                 \
1327        bna_mbox_intr_enable((_ioceth)->bna);                           \
1328} while (0)
1329
1330#define disable_mbox_intr(_ioceth)                                      \
1331do {                                                                    \
1332        bna_mbox_intr_disable((_ioceth)->bna);                          \
1333        bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad);                \
1334} while (0)
1335
1336#define call_ioceth_stop_cbfn(_ioceth)                                  \
1337do {                                                                    \
1338        if ((_ioceth)->stop_cbfn) {                                     \
1339                void (*cbfn)(struct bnad *);                            \
1340                struct bnad *cbarg;                                     \
1341                cbfn = (_ioceth)->stop_cbfn;                            \
1342                cbarg = (_ioceth)->stop_cbarg;                          \
1343                (_ioceth)->stop_cbfn = NULL;                            \
1344                (_ioceth)->stop_cbarg = NULL;                           \
1345                cbfn(cbarg);                                            \
1346        }                                                               \
1347} while (0)
1348
1349#define bna_stats_mod_uninit(_stats_mod)                                \
1350do {                                                                    \
1351} while (0)
1352
1353#define bna_stats_mod_start(_stats_mod)                                 \
1354do {                                                                    \
1355        (_stats_mod)->ioc_ready = true;                                 \
1356} while (0)
1357
1358#define bna_stats_mod_stop(_stats_mod)                                  \
1359do {                                                                    \
1360        (_stats_mod)->ioc_ready = false;                                \
1361} while (0)
1362
1363#define bna_stats_mod_fail(_stats_mod)                                  \
1364do {                                                                    \
1365        (_stats_mod)->ioc_ready = false;                                \
1366        (_stats_mod)->stats_get_busy = false;                           \
1367        (_stats_mod)->stats_clr_busy = false;                           \
1368} while (0)
1369
1370static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1371
1372bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1373                        enum bna_ioceth_event);
1374bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1375                        enum bna_ioceth_event);
1376bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1377                        enum bna_ioceth_event);
1378bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1379                        enum bna_ioceth_event);
1380bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1381                        enum bna_ioceth_event);
1382bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1383                        enum bna_ioceth_event);
1384bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1385                        enum bna_ioceth_event);
1386bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1387                        enum bna_ioceth_event);
1388
1389static void
1390bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1391{
1392        call_ioceth_stop_cbfn(ioceth);
1393}
1394
1395static void
1396bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1397                        enum bna_ioceth_event event)
1398{
1399        switch (event) {
1400        case IOCETH_E_ENABLE:
1401                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1402                bfa_nw_ioc_enable(&ioceth->ioc);
1403                break;
1404
1405        case IOCETH_E_DISABLE:
1406                bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1407                break;
1408
1409        case IOCETH_E_IOC_RESET:
1410                enable_mbox_intr(ioceth);
1411                break;
1412
1413        case IOCETH_E_IOC_FAILED:
1414                disable_mbox_intr(ioceth);
1415                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1416                break;
1417
1418        default:
1419                bfa_sm_fault(event);
1420        }
1421}
1422
1423static void
1424bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1425{
1426        /**
1427         * Do not call bfa_nw_ioc_enable() here. It must be called in the
1428         * previous state due to failed -> ioc_ready_wait transition.
1429         */
1430}
1431
1432static void
1433bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1434                                enum bna_ioceth_event event)
1435{
1436        switch (event) {
1437        case IOCETH_E_DISABLE:
1438                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1439                bfa_nw_ioc_disable(&ioceth->ioc);
1440                break;
1441
1442        case IOCETH_E_IOC_RESET:
1443                enable_mbox_intr(ioceth);
1444                break;
1445
1446        case IOCETH_E_IOC_FAILED:
1447                disable_mbox_intr(ioceth);
1448                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1449                break;
1450
1451        case IOCETH_E_IOC_READY:
1452                bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1453                break;
1454
1455        default:
1456                bfa_sm_fault(event);
1457        }
1458}
1459
1460static void
1461bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1462{
1463        bna_bfi_attr_get(ioceth);
1464}
1465
1466static void
1467bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1468                                enum bna_ioceth_event event)
1469{
1470        switch (event) {
1471        case IOCETH_E_DISABLE:
1472                bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1473                break;
1474
1475        case IOCETH_E_IOC_FAILED:
1476                disable_mbox_intr(ioceth);
1477                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1478                break;
1479
1480        case IOCETH_E_ENET_ATTR_RESP:
1481                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1482                break;
1483
1484        default:
1485                bfa_sm_fault(event);
1486        }
1487}
1488
1489static void
1490bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1491{
1492        bna_enet_start(&ioceth->bna->enet);
1493        bna_stats_mod_start(&ioceth->bna->stats_mod);
1494        bnad_cb_ioceth_ready(ioceth->bna->bnad);
1495}
1496
1497static void
1498bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1499{
1500        switch (event) {
1501        case IOCETH_E_DISABLE:
1502                bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1503                break;
1504
1505        case IOCETH_E_IOC_FAILED:
1506                disable_mbox_intr(ioceth);
1507                bna_enet_fail(&ioceth->bna->enet);
1508                bna_stats_mod_fail(&ioceth->bna->stats_mod);
1509                bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1510                break;
1511
1512        default:
1513                bfa_sm_fault(event);
1514        }
1515}
1516
1517static void
1518bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1519{
1520}
1521
1522static void
1523bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1524                                enum bna_ioceth_event event)
1525{
1526        switch (event) {
1527        case IOCETH_E_IOC_FAILED:
1528                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1529                disable_mbox_intr(ioceth);
1530                bfa_nw_ioc_disable(&ioceth->ioc);
1531                break;
1532
1533        case IOCETH_E_ENET_ATTR_RESP:
1534                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1535                bfa_nw_ioc_disable(&ioceth->ioc);
1536                break;
1537
1538        default:
1539                bfa_sm_fault(event);
1540        }
1541}
1542
1543static void
1544bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1545{
1546        bna_stats_mod_stop(&ioceth->bna->stats_mod);
1547        bna_enet_stop(&ioceth->bna->enet);
1548}
1549
1550static void
1551bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1552                                enum bna_ioceth_event event)
1553{
1554        switch (event) {
1555        case IOCETH_E_IOC_FAILED:
1556                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1557                disable_mbox_intr(ioceth);
1558                bna_enet_fail(&ioceth->bna->enet);
1559                bna_stats_mod_fail(&ioceth->bna->stats_mod);
1560                bfa_nw_ioc_disable(&ioceth->ioc);
1561                break;
1562
1563        case IOCETH_E_ENET_STOPPED:
1564                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1565                bfa_nw_ioc_disable(&ioceth->ioc);
1566                break;
1567
1568        default:
1569                bfa_sm_fault(event);
1570        }
1571}
1572
1573static void
1574bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1575{
1576}
1577
1578static void
1579bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1580                                enum bna_ioceth_event event)
1581{
1582        switch (event) {
1583        case IOCETH_E_IOC_DISABLED:
1584                disable_mbox_intr(ioceth);
1585                bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1586                break;
1587
1588        case IOCETH_E_ENET_STOPPED:
1589                /* This event is received due to enet failing */
1590                /* No-op */
1591                break;
1592
1593        default:
1594                bfa_sm_fault(event);
1595        }
1596}
1597
1598static void
1599bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1600{
1601        bnad_cb_ioceth_failed(ioceth->bna->bnad);
1602}
1603
1604static void
1605bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1606                        enum bna_ioceth_event event)
1607{
1608        switch (event) {
1609        case IOCETH_E_DISABLE:
1610                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1611                bfa_nw_ioc_disable(&ioceth->ioc);
1612                break;
1613
1614        case IOCETH_E_IOC_RESET:
1615                enable_mbox_intr(ioceth);
1616                bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1617                break;
1618
1619        case IOCETH_E_IOC_FAILED:
1620                break;
1621
1622        default:
1623                bfa_sm_fault(event);
1624        }
1625}
1626
1627static void
1628bna_bfi_attr_get(struct bna_ioceth *ioceth)
1629{
1630        struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1631
1632        bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1633                BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1634        attr_req->mh.num_entries = htons(
1635        bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1636        bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1637                sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1638        bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1639}
1640
1641/* IOC callback functions */
1642
1643static void
1644bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1645{
1646        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1647
1648        if (error)
1649                bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1650        else
1651                bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1652}
1653
1654static void
1655bna_cb_ioceth_disable(void *arg)
1656{
1657        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1658
1659        bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1660}
1661
1662static void
1663bna_cb_ioceth_hbfail(void *arg)
1664{
1665        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1666
1667        bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1668}
1669
1670static void
1671bna_cb_ioceth_reset(void *arg)
1672{
1673        struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1674
1675        bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1676}
1677
1678static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1679        .enable_cbfn = bna_cb_ioceth_enable,
1680        .disable_cbfn = bna_cb_ioceth_disable,
1681        .hbfail_cbfn = bna_cb_ioceth_hbfail,
1682        .reset_cbfn = bna_cb_ioceth_reset
1683};
1684
1685static void bna_attr_init(struct bna_ioceth *ioceth)
1686{
1687        ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1688        ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1689        ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1690        ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1691        ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1692        ioceth->attr.fw_query_complete = false;
1693}
1694
1695static void
1696bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1697                struct bna_res_info *res_info)
1698{
1699        u64 dma;
1700        u8 *kva;
1701
1702        ioceth->bna = bna;
1703
1704        /**
1705         * Attach IOC and claim:
1706         *      1. DMA memory for IOC attributes
1707         *      2. Kernel memory for FW trace
1708         */
1709        bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1710        bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1711
1712        BNA_GET_DMA_ADDR(
1713                &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1714        kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1715        bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1716
1717        kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1718        bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1719
1720        /**
1721         * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1722         * DMA memory.
1723         */
1724        BNA_GET_DMA_ADDR(
1725                &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1726        kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1727        bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1728        bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1729        kva += bfa_nw_cee_meminfo();
1730        dma += bfa_nw_cee_meminfo();
1731
1732        bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1733        bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1734        kva += bfa_nw_flash_meminfo();
1735        dma += bfa_nw_flash_meminfo();
1736
1737        bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1738        bfa_msgq_memclaim(&bna->msgq, kva, dma);
1739        bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1740        kva += bfa_msgq_meminfo();
1741        dma += bfa_msgq_meminfo();
1742
1743        ioceth->stop_cbfn = NULL;
1744        ioceth->stop_cbarg = NULL;
1745
1746        bna_attr_init(ioceth);
1747
1748        bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1749}
1750
1751static void
1752bna_ioceth_uninit(struct bna_ioceth *ioceth)
1753{
1754        bfa_nw_ioc_detach(&ioceth->ioc);
1755
1756        ioceth->bna = NULL;
1757}
1758
1759void
1760bna_ioceth_enable(struct bna_ioceth *ioceth)
1761{
1762        if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1763                bnad_cb_ioceth_ready(ioceth->bna->bnad);
1764                return;
1765        }
1766
1767        if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1768                bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1769}
1770
1771void
1772bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1773{
1774        if (type == BNA_SOFT_CLEANUP) {
1775                bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1776                return;
1777        }
1778
1779        ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1780        ioceth->stop_cbarg = ioceth->bna->bnad;
1781
1782        bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1783}
1784
1785static void
1786bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1787                  struct bna_res_info *res_info)
1788{
1789        int i;
1790
1791        ucam_mod->ucmac = (struct bna_mac *)
1792        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1793
1794        INIT_LIST_HEAD(&ucam_mod->free_q);
1795        for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
1796                list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1797
1798        /* A separate queue to allow synchronous setting of a list of MACs */
1799        INIT_LIST_HEAD(&ucam_mod->del_q);
1800        for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
1801                list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1802
1803        ucam_mod->bna = bna;
1804}
1805
1806static void
1807bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1808{
1809        ucam_mod->bna = NULL;
1810}
1811
1812static void
1813bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1814                  struct bna_res_info *res_info)
1815{
1816        int i;
1817
1818        mcam_mod->mcmac = (struct bna_mac *)
1819        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1820
1821        INIT_LIST_HEAD(&mcam_mod->free_q);
1822        for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
1823                list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1824
1825        mcam_mod->mchandle = (struct bna_mcam_handle *)
1826        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1827
1828        INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1829        for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
1830                list_add_tail(&mcam_mod->mchandle[i].qe,
1831                              &mcam_mod->free_handle_q);
1832
1833        /* A separate queue to allow synchronous setting of a list of MACs */
1834        INIT_LIST_HEAD(&mcam_mod->del_q);
1835        for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
1836                list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1837
1838        mcam_mod->bna = bna;
1839}
1840
1841static void
1842bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1843{
1844        mcam_mod->bna = NULL;
1845}
1846
1847static void
1848bna_bfi_stats_get(struct bna *bna)
1849{
1850        struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1851
1852        bna->stats_mod.stats_get_busy = true;
1853
1854        bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1855                BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1856        stats_req->mh.num_entries = htons(
1857                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1858        stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1859        stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1860        stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1861        stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1862        stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1863
1864        bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1865                sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1866        bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1867}
1868
1869void
1870bna_res_req(struct bna_res_info *res_info)
1871{
1872        /* DMA memory for COMMON_MODULE */
1873        res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1874        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1875        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1876        res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1877                                (bfa_nw_cee_meminfo() +
1878                                 bfa_nw_flash_meminfo() +
1879                                 bfa_msgq_meminfo()), PAGE_SIZE);
1880
1881        /* DMA memory for retrieving IOC attributes */
1882        res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1883        res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1884        res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1885        res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1886                                ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1887
1888        /* Virtual memory for retreiving fw_trc */
1889        res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1890        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1891        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1892        res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1893
1894        /* DMA memory for retreiving stats */
1895        res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1896        res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1897        res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1898        res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1899                                ALIGN(sizeof(struct bfi_enet_stats),
1900                                        PAGE_SIZE);
1901}
1902
1903void
1904bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1905{
1906        struct bna_attr *attr = &bna->ioceth.attr;
1907
1908        /* Virtual memory for Tx objects - stored by Tx module */
1909        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1910        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1911                BNA_MEM_T_KVA;
1912        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1913        res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1914                attr->num_txq * sizeof(struct bna_tx);
1915
1916        /* Virtual memory for TxQ - stored by Tx module */
1917        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1918        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1919                BNA_MEM_T_KVA;
1920        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1921        res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1922                attr->num_txq * sizeof(struct bna_txq);
1923
1924        /* Virtual memory for Rx objects - stored by Rx module */
1925        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1926        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1927                BNA_MEM_T_KVA;
1928        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1929        res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1930                attr->num_rxp * sizeof(struct bna_rx);
1931
1932        /* Virtual memory for RxPath - stored by Rx module */
1933        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1934        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1935                BNA_MEM_T_KVA;
1936        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1937        res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1938                attr->num_rxp * sizeof(struct bna_rxp);
1939
1940        /* Virtual memory for RxQ - stored by Rx module */
1941        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1942        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1943                BNA_MEM_T_KVA;
1944        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1945        res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1946                (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1947
1948        /* Virtual memory for Unicast MAC address - stored by ucam module */
1949        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1950        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1951                BNA_MEM_T_KVA;
1952        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1953        res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1954                (attr->num_ucmac * 2) * sizeof(struct bna_mac);
1955
1956        /* Virtual memory for Multicast MAC address - stored by mcam module */
1957        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1958        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1959                BNA_MEM_T_KVA;
1960        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1961        res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1962                (attr->num_mcmac * 2) * sizeof(struct bna_mac);
1963
1964        /* Virtual memory for Multicast handle - stored by mcam module */
1965        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1966        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1967                BNA_MEM_T_KVA;
1968        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1969        res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1970                attr->num_mcmac * sizeof(struct bna_mcam_handle);
1971}
1972
1973void
1974bna_init(struct bna *bna, struct bnad *bnad,
1975                struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1976{
1977        bna->bnad = bnad;
1978        bna->pcidev = *pcidev;
1979
1980        bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
1981                res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
1982        bna->stats.hw_stats_dma.msb =
1983                res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
1984        bna->stats.hw_stats_dma.lsb =
1985                res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
1986
1987        bna_reg_addr_init(bna, &bna->pcidev);
1988
1989        /* Also initializes diag, cee, sfp, phy_port, msgq */
1990        bna_ioceth_init(&bna->ioceth, bna, res_info);
1991
1992        bna_enet_init(&bna->enet, bna);
1993        bna_ethport_init(&bna->ethport, bna);
1994}
1995
1996void
1997bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
1998{
1999        bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2000
2001        bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2002
2003        bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2004
2005        bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2006
2007        bna->default_mode_rid = BFI_INVALID_RID;
2008        bna->promisc_rid = BFI_INVALID_RID;
2009
2010        bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2011}
2012
2013void
2014bna_uninit(struct bna *bna)
2015{
2016        if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2017                bna_mcam_mod_uninit(&bna->mcam_mod);
2018                bna_ucam_mod_uninit(&bna->ucam_mod);
2019                bna_rx_mod_uninit(&bna->rx_mod);
2020                bna_tx_mod_uninit(&bna->tx_mod);
2021                bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2022        }
2023
2024        bna_stats_mod_uninit(&bna->stats_mod);
2025        bna_ethport_uninit(&bna->ethport);
2026        bna_enet_uninit(&bna->enet);
2027
2028        bna_ioceth_uninit(&bna->ioceth);
2029
2030        bna->bnad = NULL;
2031}
2032
2033int
2034bna_num_txq_set(struct bna *bna, int num_txq)
2035{
2036        if (bna->ioceth.attr.fw_query_complete &&
2037                (num_txq <= bna->ioceth.attr.num_txq)) {
2038                bna->ioceth.attr.num_txq = num_txq;
2039                return BNA_CB_SUCCESS;
2040        }
2041
2042        return BNA_CB_FAIL;
2043}
2044
2045int
2046bna_num_rxp_set(struct bna *bna, int num_rxp)
2047{
2048        if (bna->ioceth.attr.fw_query_complete &&
2049                (num_rxp <= bna->ioceth.attr.num_rxp)) {
2050                bna->ioceth.attr.num_rxp = num_rxp;
2051                return BNA_CB_SUCCESS;
2052        }
2053
2054        return BNA_CB_FAIL;
2055}
2056
2057struct bna_mac *
2058bna_cam_mod_mac_get(struct list_head *head)
2059{
2060        struct bna_mac *mac;
2061
2062        mac = list_first_entry_or_null(head, struct bna_mac, qe);
2063        if (mac)
2064                list_del(&mac->qe);
2065
2066        return mac;
2067}
2068
2069struct bna_mcam_handle *
2070bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2071{
2072        struct bna_mcam_handle *handle;
2073
2074        handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
2075                                          struct bna_mcam_handle, qe);
2076        if (handle)
2077                list_del(&handle->qe);
2078
2079        return handle;
2080}
2081
2082void
2083bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2084                        struct bna_mcam_handle *handle)
2085{
2086        list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2087}
2088
2089void
2090bna_hw_stats_get(struct bna *bna)
2091{
2092        if (!bna->stats_mod.ioc_ready) {
2093                bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2094                return;
2095        }
2096        if (bna->stats_mod.stats_get_busy) {
2097                bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2098                return;
2099        }
2100
2101        bna_bfi_stats_get(bna);
2102}
2103