linux/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
<<
>>
Prefs
   1/*
   2 * Linux network driver for Brocade Converged Network Adapter.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License (GPL) Version 2 as
   6 * published by the Free Software Foundation
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12  */
  13/*
  14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
  15 * All rights reserved
  16 * www.brocade.com
  17 */
  18#include "bna.h"
  19#include "bfi.h"
  20
  21/**
  22 * IB
  23 */
  24static void
  25bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
  26{
  27        ib->coalescing_timeo = coalescing_timeo;
  28        ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
  29                                (u32)ib->coalescing_timeo, 0);
  30}
  31
  32/**
  33 * RXF
  34 */
  35
  36#define bna_rxf_vlan_cfg_soft_reset(rxf)                                \
  37do {                                                                    \
  38        (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;           \
  39        (rxf)->vlan_strip_pending = true;                               \
  40} while (0)
  41
  42#define bna_rxf_rss_cfg_soft_reset(rxf)                                 \
  43do {                                                                    \
  44        if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)                  \
  45                (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |           \
  46                                BNA_RSS_F_CFG_PENDING |                 \
  47                                BNA_RSS_F_STATUS_PENDING);              \
  48} while (0)
  49
  50static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
  51static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
  52static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
  53static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
  54static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
  55static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
  56static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
  57static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
  58                                        enum bna_cleanup_type cleanup);
  59static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
  60                                        enum bna_cleanup_type cleanup);
  61static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
  62                                        enum bna_cleanup_type cleanup);
  63
  64bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
  65                        enum bna_rxf_event);
  66bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
  67                        enum bna_rxf_event);
  68bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
  69                        enum bna_rxf_event);
  70bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
  71                        enum bna_rxf_event);
  72bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
  73                        enum bna_rxf_event);
  74bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
  75                        enum bna_rxf_event);
  76
  77static void
  78bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
  79{
  80        call_rxf_stop_cbfn(rxf);
  81}
  82
  83static void
  84bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
  85{
  86        switch (event) {
  87        case RXF_E_START:
  88                if (rxf->flags & BNA_RXF_F_PAUSED) {
  89                        bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
  90                        call_rxf_start_cbfn(rxf);
  91                } else
  92                        bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
  93                break;
  94
  95        case RXF_E_STOP:
  96                call_rxf_stop_cbfn(rxf);
  97                break;
  98
  99        case RXF_E_FAIL:
 100                /* No-op */
 101                break;
 102
 103        case RXF_E_CONFIG:
 104                call_rxf_cam_fltr_cbfn(rxf);
 105                break;
 106
 107        case RXF_E_PAUSE:
 108                rxf->flags |= BNA_RXF_F_PAUSED;
 109                call_rxf_pause_cbfn(rxf);
 110                break;
 111
 112        case RXF_E_RESUME:
 113                rxf->flags &= ~BNA_RXF_F_PAUSED;
 114                call_rxf_resume_cbfn(rxf);
 115                break;
 116
 117        default:
 118                bfa_sm_fault(event);
 119        }
 120}
 121
 122static void
 123bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
 124{
 125        call_rxf_pause_cbfn(rxf);
 126}
 127
 128static void
 129bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
 130{
 131        switch (event) {
 132        case RXF_E_STOP:
 133        case RXF_E_FAIL:
 134                bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 135                break;
 136
 137        case RXF_E_CONFIG:
 138                call_rxf_cam_fltr_cbfn(rxf);
 139                break;
 140
 141        case RXF_E_RESUME:
 142                rxf->flags &= ~BNA_RXF_F_PAUSED;
 143                bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
 144                break;
 145
 146        default:
 147                bfa_sm_fault(event);
 148        }
 149}
 150
 151static void
 152bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
 153{
 154        if (!bna_rxf_cfg_apply(rxf)) {
 155                /* No more pending config updates */
 156                bfa_fsm_set_state(rxf, bna_rxf_sm_started);
 157        }
 158}
 159
 160static void
 161bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 162{
 163        switch (event) {
 164        case RXF_E_STOP:
 165                bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
 166                break;
 167
 168        case RXF_E_FAIL:
 169                bna_rxf_cfg_reset(rxf);
 170                call_rxf_start_cbfn(rxf);
 171                call_rxf_cam_fltr_cbfn(rxf);
 172                call_rxf_resume_cbfn(rxf);
 173                bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 174                break;
 175
 176        case RXF_E_CONFIG:
 177                /* No-op */
 178                break;
 179
 180        case RXF_E_PAUSE:
 181                rxf->flags |= BNA_RXF_F_PAUSED;
 182                call_rxf_start_cbfn(rxf);
 183                bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
 184                break;
 185
 186        case RXF_E_FW_RESP:
 187                if (!bna_rxf_cfg_apply(rxf)) {
 188                        /* No more pending config updates */
 189                        bfa_fsm_set_state(rxf, bna_rxf_sm_started);
 190                }
 191                break;
 192
 193        default:
 194                bfa_sm_fault(event);
 195        }
 196}
 197
 198static void
 199bna_rxf_sm_started_entry(struct bna_rxf *rxf)
 200{
 201        call_rxf_start_cbfn(rxf);
 202        call_rxf_cam_fltr_cbfn(rxf);
 203        call_rxf_resume_cbfn(rxf);
 204}
 205
 206static void
 207bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
 208{
 209        switch (event) {
 210        case RXF_E_STOP:
 211        case RXF_E_FAIL:
 212                bna_rxf_cfg_reset(rxf);
 213                bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 214                break;
 215
 216        case RXF_E_CONFIG:
 217                bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
 218                break;
 219
 220        case RXF_E_PAUSE:
 221                rxf->flags |= BNA_RXF_F_PAUSED;
 222                if (!bna_rxf_fltr_clear(rxf))
 223                        bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
 224                else
 225                        bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
 226                break;
 227
 228        default:
 229                bfa_sm_fault(event);
 230        }
 231}
 232
 233static void
 234bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
 235{
 236}
 237
 238static void
 239bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 240{
 241        switch (event) {
 242        case RXF_E_FAIL:
 243                bna_rxf_cfg_reset(rxf);
 244                call_rxf_pause_cbfn(rxf);
 245                bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 246                break;
 247
 248        case RXF_E_FW_RESP:
 249                if (!bna_rxf_fltr_clear(rxf)) {
 250                        /* No more pending CAM entries to clear */
 251                        bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
 252                }
 253                break;
 254
 255        default:
 256                bfa_sm_fault(event);
 257        }
 258}
 259
 260static void
 261bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
 262{
 263}
 264
 265static void
 266bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
 267{
 268        switch (event) {
 269        case RXF_E_FAIL:
 270        case RXF_E_FW_RESP:
 271                bna_rxf_cfg_reset(rxf);
 272                bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 273                break;
 274
 275        default:
 276                bfa_sm_fault(event);
 277        }
 278}
 279
 280static void
 281bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
 282                enum bfi_enet_h2i_msgs req_type)
 283{
 284        struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
 285
 286        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
 287        req->mh.num_entries = htons(
 288        bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
 289        memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
 290        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 291                sizeof(struct bfi_enet_ucast_req), &req->mh);
 292        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 293}
 294
 295static void
 296bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
 297{
 298        struct bfi_enet_mcast_add_req *req =
 299                &rxf->bfi_enet_cmd.mcast_add_req;
 300
 301        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
 302                0, rxf->rx->rid);
 303        req->mh.num_entries = htons(
 304        bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
 305        memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
 306        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 307                sizeof(struct bfi_enet_mcast_add_req), &req->mh);
 308        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 309}
 310
 311static void
 312bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
 313{
 314        struct bfi_enet_mcast_del_req *req =
 315                &rxf->bfi_enet_cmd.mcast_del_req;
 316
 317        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
 318                0, rxf->rx->rid);
 319        req->mh.num_entries = htons(
 320        bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
 321        req->handle = htons(handle);
 322        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 323                sizeof(struct bfi_enet_mcast_del_req), &req->mh);
 324        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 325}
 326
 327static void
 328bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
 329{
 330        struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
 331
 332        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
 333                BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
 334        req->mh.num_entries = htons(
 335                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 336        req->enable = status;
 337        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 338                sizeof(struct bfi_enet_enable_req), &req->mh);
 339        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 340}
 341
 342static void
 343bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
 344{
 345        struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
 346
 347        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
 348                BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
 349        req->mh.num_entries = htons(
 350                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 351        req->enable = status;
 352        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 353                sizeof(struct bfi_enet_enable_req), &req->mh);
 354        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 355}
 356
 357static void
 358bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
 359{
 360        struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
 361        int i;
 362        int j;
 363
 364        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
 365                BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
 366        req->mh.num_entries = htons(
 367                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
 368        req->block_idx = block_idx;
 369        for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
 370                j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
 371                if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
 372                        req->bit_mask[i] =
 373                                htonl(rxf->vlan_filter_table[j]);
 374                else
 375                        req->bit_mask[i] = 0xFFFFFFFF;
 376        }
 377        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 378                sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
 379        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 380}
 381
 382static void
 383bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
 384{
 385        struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
 386
 387        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
 388                BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
 389        req->mh.num_entries = htons(
 390                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 391        req->enable = rxf->vlan_strip_status;
 392        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 393                sizeof(struct bfi_enet_enable_req), &req->mh);
 394        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 395}
 396
 397static void
 398bna_bfi_rit_cfg(struct bna_rxf *rxf)
 399{
 400        struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
 401
 402        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
 403                BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
 404        req->mh.num_entries = htons(
 405                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
 406        req->size = htons(rxf->rit_size);
 407        memcpy(&req->table[0], rxf->rit, rxf->rit_size);
 408        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 409                sizeof(struct bfi_enet_rit_req), &req->mh);
 410        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 411}
 412
 413static void
 414bna_bfi_rss_cfg(struct bna_rxf *rxf)
 415{
 416        struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
 417        int i;
 418
 419        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
 420                BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
 421        req->mh.num_entries = htons(
 422                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
 423        req->cfg.type = rxf->rss_cfg.hash_type;
 424        req->cfg.mask = rxf->rss_cfg.hash_mask;
 425        for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
 426                req->cfg.key[i] =
 427                        htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
 428        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 429                sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
 430        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 431}
 432
 433static void
 434bna_bfi_rss_enable(struct bna_rxf *rxf)
 435{
 436        struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
 437
 438        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
 439                BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
 440        req->mh.num_entries = htons(
 441                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
 442        req->enable = rxf->rss_status;
 443        bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
 444                sizeof(struct bfi_enet_enable_req), &req->mh);
 445        bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
 446}
 447
 448/* This function gets the multicast MAC that has already been added to CAM */
 449static struct bna_mac *
 450bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
 451{
 452        struct bna_mac *mac;
 453        struct list_head *qe;
 454
 455        list_for_each(qe, &rxf->mcast_active_q) {
 456                mac = (struct bna_mac *)qe;
 457                if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
 458                        return mac;
 459        }
 460
 461        list_for_each(qe, &rxf->mcast_pending_del_q) {
 462                mac = (struct bna_mac *)qe;
 463                if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
 464                        return mac;
 465        }
 466
 467        return NULL;
 468}
 469
 470static struct bna_mcam_handle *
 471bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
 472{
 473        struct bna_mcam_handle *mchandle;
 474        struct list_head *qe;
 475
 476        list_for_each(qe, &rxf->mcast_handle_q) {
 477                mchandle = (struct bna_mcam_handle *)qe;
 478                if (mchandle->handle == handle)
 479                        return mchandle;
 480        }
 481
 482        return NULL;
 483}
 484
 485static void
 486bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
 487{
 488        struct bna_mac *mcmac;
 489        struct bna_mcam_handle *mchandle;
 490
 491        mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
 492        mchandle = bna_rxf_mchandle_get(rxf, handle);
 493        if (mchandle == NULL) {
 494                mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
 495                mchandle->handle = handle;
 496                mchandle->refcnt = 0;
 497                list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
 498        }
 499        mchandle->refcnt++;
 500        mcmac->handle = mchandle;
 501}
 502
 503static int
 504bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
 505                enum bna_cleanup_type cleanup)
 506{
 507        struct bna_mcam_handle *mchandle;
 508        int ret = 0;
 509
 510        mchandle = mac->handle;
 511        if (mchandle == NULL)
 512                return ret;
 513
 514        mchandle->refcnt--;
 515        if (mchandle->refcnt == 0) {
 516                if (cleanup == BNA_HARD_CLEANUP) {
 517                        bna_bfi_mcast_del_req(rxf, mchandle->handle);
 518                        ret = 1;
 519                }
 520                list_del(&mchandle->qe);
 521                bfa_q_qe_init(&mchandle->qe);
 522                bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
 523        }
 524        mac->handle = NULL;
 525
 526        return ret;
 527}
 528
 529static int
 530bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
 531{
 532        struct bna_mac *mac = NULL;
 533        struct list_head *qe;
 534        int ret;
 535
 536        /* Delete multicast entries previousely added */
 537        while (!list_empty(&rxf->mcast_pending_del_q)) {
 538                bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
 539                bfa_q_qe_init(qe);
 540                mac = (struct bna_mac *)qe;
 541                ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
 542                bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
 543                if (ret)
 544                        return ret;
 545        }
 546
 547        /* Add multicast entries */
 548        if (!list_empty(&rxf->mcast_pending_add_q)) {
 549                bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
 550                bfa_q_qe_init(qe);
 551                mac = (struct bna_mac *)qe;
 552                list_add_tail(&mac->qe, &rxf->mcast_active_q);
 553                bna_bfi_mcast_add_req(rxf, mac);
 554                return 1;
 555        }
 556
 557        return 0;
 558}
 559
 560static int
 561bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
 562{
 563        u8 vlan_pending_bitmask;
 564        int block_idx = 0;
 565
 566        if (rxf->vlan_pending_bitmask) {
 567                vlan_pending_bitmask = rxf->vlan_pending_bitmask;
 568                while (!(vlan_pending_bitmask & 0x1)) {
 569                        block_idx++;
 570                        vlan_pending_bitmask >>= 1;
 571                }
 572                rxf->vlan_pending_bitmask &= ~(1 << block_idx);
 573                bna_bfi_rx_vlan_filter_set(rxf, block_idx);
 574                return 1;
 575        }
 576
 577        return 0;
 578}
 579
 580static int
 581bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
 582{
 583        struct list_head *qe;
 584        struct bna_mac *mac;
 585        int ret;
 586
 587        /* Throw away delete pending mcast entries */
 588        while (!list_empty(&rxf->mcast_pending_del_q)) {
 589                bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
 590                bfa_q_qe_init(qe);
 591                mac = (struct bna_mac *)qe;
 592                ret = bna_rxf_mcast_del(rxf, mac, cleanup);
 593                bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
 594                if (ret)
 595                        return ret;
 596        }
 597
 598        /* Move active mcast entries to pending_add_q */
 599        while (!list_empty(&rxf->mcast_active_q)) {
 600                bfa_q_deq(&rxf->mcast_active_q, &qe);
 601                bfa_q_qe_init(qe);
 602                list_add_tail(qe, &rxf->mcast_pending_add_q);
 603                mac = (struct bna_mac *)qe;
 604                if (bna_rxf_mcast_del(rxf, mac, cleanup))
 605                        return 1;
 606        }
 607
 608        return 0;
 609}
 610
 611static int
 612bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
 613{
 614        if (rxf->rss_pending) {
 615                if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
 616                        rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
 617                        bna_bfi_rit_cfg(rxf);
 618                        return 1;
 619                }
 620
 621                if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
 622                        rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
 623                        bna_bfi_rss_cfg(rxf);
 624                        return 1;
 625                }
 626
 627                if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
 628                        rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
 629                        bna_bfi_rss_enable(rxf);
 630                        return 1;
 631                }
 632        }
 633
 634        return 0;
 635}
 636
 637static int
 638bna_rxf_cfg_apply(struct bna_rxf *rxf)
 639{
 640        if (bna_rxf_ucast_cfg_apply(rxf))
 641                return 1;
 642
 643        if (bna_rxf_mcast_cfg_apply(rxf))
 644                return 1;
 645
 646        if (bna_rxf_promisc_cfg_apply(rxf))
 647                return 1;
 648
 649        if (bna_rxf_allmulti_cfg_apply(rxf))
 650                return 1;
 651
 652        if (bna_rxf_vlan_cfg_apply(rxf))
 653                return 1;
 654
 655        if (bna_rxf_vlan_strip_cfg_apply(rxf))
 656                return 1;
 657
 658        if (bna_rxf_rss_cfg_apply(rxf))
 659                return 1;
 660
 661        return 0;
 662}
 663
 664/* Only software reset */
 665static int
 666bna_rxf_fltr_clear(struct bna_rxf *rxf)
 667{
 668        if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
 669                return 1;
 670
 671        if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
 672                return 1;
 673
 674        if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
 675                return 1;
 676
 677        if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
 678                return 1;
 679
 680        return 0;
 681}
 682
 683static void
 684bna_rxf_cfg_reset(struct bna_rxf *rxf)
 685{
 686        bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
 687        bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
 688        bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
 689        bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
 690        bna_rxf_vlan_cfg_soft_reset(rxf);
 691        bna_rxf_rss_cfg_soft_reset(rxf);
 692}
 693
 694static void
 695bna_rit_init(struct bna_rxf *rxf, int rit_size)
 696{
 697        struct bna_rx *rx = rxf->rx;
 698        struct bna_rxp *rxp;
 699        struct list_head *qe;
 700        int offset = 0;
 701
 702        rxf->rit_size = rit_size;
 703        list_for_each(qe, &rx->rxp_q) {
 704                rxp = (struct bna_rxp *)qe;
 705                rxf->rit[offset] = rxp->cq.ccb->id;
 706                offset++;
 707        }
 708
 709}
 710
 711void
 712bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
 713{
 714        bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
 715}
 716
 717void
 718bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
 719                        struct bfi_msgq_mhdr *msghdr)
 720{
 721        struct bfi_enet_mcast_add_req *req =
 722                &rxf->bfi_enet_cmd.mcast_add_req;
 723        struct bfi_enet_mcast_add_rsp *rsp =
 724                (struct bfi_enet_mcast_add_rsp *)msghdr;
 725
 726        bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
 727                ntohs(rsp->handle));
 728        bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
 729}
 730
 731static void
 732bna_rxf_init(struct bna_rxf *rxf,
 733                struct bna_rx *rx,
 734                struct bna_rx_config *q_config,
 735                struct bna_res_info *res_info)
 736{
 737        rxf->rx = rx;
 738
 739        INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
 740        INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
 741        rxf->ucast_pending_set = 0;
 742        rxf->ucast_active_set = 0;
 743        INIT_LIST_HEAD(&rxf->ucast_active_q);
 744        rxf->ucast_pending_mac = NULL;
 745
 746        INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
 747        INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
 748        INIT_LIST_HEAD(&rxf->mcast_active_q);
 749        INIT_LIST_HEAD(&rxf->mcast_handle_q);
 750
 751        if (q_config->paused)
 752                rxf->flags |= BNA_RXF_F_PAUSED;
 753
 754        rxf->rit = (u8 *)
 755                res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
 756        bna_rit_init(rxf, q_config->num_paths);
 757
 758        rxf->rss_status = q_config->rss_status;
 759        if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
 760                rxf->rss_cfg = q_config->rss_config;
 761                rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
 762                rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
 763                rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
 764        }
 765
 766        rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
 767        memset(rxf->vlan_filter_table, 0,
 768                        (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
 769        rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
 770        rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
 771
 772        rxf->vlan_strip_status = q_config->vlan_strip_status;
 773
 774        bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 775}
 776
 777static void
 778bna_rxf_uninit(struct bna_rxf *rxf)
 779{
 780        struct bna_mac *mac;
 781
 782        rxf->ucast_pending_set = 0;
 783        rxf->ucast_active_set = 0;
 784
 785        while (!list_empty(&rxf->ucast_pending_add_q)) {
 786                bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
 787                bfa_q_qe_init(&mac->qe);
 788                bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
 789        }
 790
 791        if (rxf->ucast_pending_mac) {
 792                bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
 793                bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
 794                        rxf->ucast_pending_mac);
 795                rxf->ucast_pending_mac = NULL;
 796        }
 797
 798        while (!list_empty(&rxf->mcast_pending_add_q)) {
 799                bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
 800                bfa_q_qe_init(&mac->qe);
 801                bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
 802        }
 803
 804        rxf->rxmode_pending = 0;
 805        rxf->rxmode_pending_bitmask = 0;
 806        if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
 807                rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
 808        if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
 809                rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
 810
 811        rxf->rss_pending = 0;
 812        rxf->vlan_strip_pending = false;
 813
 814        rxf->flags = 0;
 815
 816        rxf->rx = NULL;
 817}
 818
 819static void
 820bna_rx_cb_rxf_started(struct bna_rx *rx)
 821{
 822        bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
 823}
 824
 825static void
 826bna_rxf_start(struct bna_rxf *rxf)
 827{
 828        rxf->start_cbfn = bna_rx_cb_rxf_started;
 829        rxf->start_cbarg = rxf->rx;
 830        bfa_fsm_send_event(rxf, RXF_E_START);
 831}
 832
 833static void
 834bna_rx_cb_rxf_stopped(struct bna_rx *rx)
 835{
 836        bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
 837}
 838
 839static void
 840bna_rxf_stop(struct bna_rxf *rxf)
 841{
 842        rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
 843        rxf->stop_cbarg = rxf->rx;
 844        bfa_fsm_send_event(rxf, RXF_E_STOP);
 845}
 846
 847static void
 848bna_rxf_fail(struct bna_rxf *rxf)
 849{
 850        bfa_fsm_send_event(rxf, RXF_E_FAIL);
 851}
 852
 853enum bna_cb_status
 854bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
 855                 void (*cbfn)(struct bnad *, struct bna_rx *))
 856{
 857        struct bna_rxf *rxf = &rx->rxf;
 858
 859        if (rxf->ucast_pending_mac == NULL) {
 860                rxf->ucast_pending_mac =
 861                                bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
 862                if (rxf->ucast_pending_mac == NULL)
 863                        return BNA_CB_UCAST_CAM_FULL;
 864                bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
 865        }
 866
 867        memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
 868        rxf->ucast_pending_set = 1;
 869        rxf->cam_fltr_cbfn = cbfn;
 870        rxf->cam_fltr_cbarg = rx->bna->bnad;
 871
 872        bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 873
 874        return BNA_CB_SUCCESS;
 875}
 876
 877enum bna_cb_status
 878bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
 879                 void (*cbfn)(struct bnad *, struct bna_rx *))
 880{
 881        struct bna_rxf *rxf = &rx->rxf;
 882        struct bna_mac *mac;
 883
 884        /* Check if already added or pending addition */
 885        if (bna_mac_find(&rxf->mcast_active_q, addr) ||
 886                bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
 887                if (cbfn)
 888                        cbfn(rx->bna->bnad, rx);
 889                return BNA_CB_SUCCESS;
 890        }
 891
 892        mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
 893        if (mac == NULL)
 894                return BNA_CB_MCAST_LIST_FULL;
 895        bfa_q_qe_init(&mac->qe);
 896        memcpy(mac->addr, addr, ETH_ALEN);
 897        list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
 898
 899        rxf->cam_fltr_cbfn = cbfn;
 900        rxf->cam_fltr_cbarg = rx->bna->bnad;
 901
 902        bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 903
 904        return BNA_CB_SUCCESS;
 905}
 906
 907enum bna_cb_status
 908bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
 909                     void (*cbfn)(struct bnad *, struct bna_rx *))
 910{
 911        struct bna_rxf *rxf = &rx->rxf;
 912        struct list_head list_head;
 913        struct list_head *qe;
 914        u8 *mcaddr;
 915        struct bna_mac *mac;
 916        int i;
 917
 918        /* Allocate nodes */
 919        INIT_LIST_HEAD(&list_head);
 920        for (i = 0, mcaddr = mclist; i < count; i++) {
 921                mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
 922                if (mac == NULL)
 923                        goto err_return;
 924                bfa_q_qe_init(&mac->qe);
 925                memcpy(mac->addr, mcaddr, ETH_ALEN);
 926                list_add_tail(&mac->qe, &list_head);
 927
 928                mcaddr += ETH_ALEN;
 929        }
 930
 931        /* Purge the pending_add_q */
 932        while (!list_empty(&rxf->mcast_pending_add_q)) {
 933                bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
 934                bfa_q_qe_init(qe);
 935                mac = (struct bna_mac *)qe;
 936                bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
 937        }
 938
 939        /* Schedule active_q entries for deletion */
 940        while (!list_empty(&rxf->mcast_active_q)) {
 941                bfa_q_deq(&rxf->mcast_active_q, &qe);
 942                mac = (struct bna_mac *)qe;
 943                bfa_q_qe_init(&mac->qe);
 944                list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
 945        }
 946
 947        /* Add the new entries */
 948        while (!list_empty(&list_head)) {
 949                bfa_q_deq(&list_head, &qe);
 950                mac = (struct bna_mac *)qe;
 951                bfa_q_qe_init(&mac->qe);
 952                list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
 953        }
 954
 955        rxf->cam_fltr_cbfn = cbfn;
 956        rxf->cam_fltr_cbarg = rx->bna->bnad;
 957        bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 958
 959        return BNA_CB_SUCCESS;
 960
 961err_return:
 962        while (!list_empty(&list_head)) {
 963                bfa_q_deq(&list_head, &qe);
 964                mac = (struct bna_mac *)qe;
 965                bfa_q_qe_init(&mac->qe);
 966                bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
 967        }
 968
 969        return BNA_CB_MCAST_LIST_FULL;
 970}
 971
 972void
 973bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
 974{
 975        struct bna_rxf *rxf = &rx->rxf;
 976        int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
 977        int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
 978        int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
 979
 980        rxf->vlan_filter_table[index] |= bit;
 981        if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
 982                rxf->vlan_pending_bitmask |= (1 << group_id);
 983                bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 984        }
 985}
 986
 987void
 988bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
 989{
 990        struct bna_rxf *rxf = &rx->rxf;
 991        int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
 992        int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
 993        int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
 994
 995        rxf->vlan_filter_table[index] &= ~bit;
 996        if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
 997                rxf->vlan_pending_bitmask |= (1 << group_id);
 998                bfa_fsm_send_event(rxf, RXF_E_CONFIG);
 999        }
1000}
1001
1002static int
1003bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1004{
1005        struct bna_mac *mac = NULL;
1006        struct list_head *qe;
1007
1008        /* Delete MAC addresses previousely added */
1009        if (!list_empty(&rxf->ucast_pending_del_q)) {
1010                bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1011                bfa_q_qe_init(qe);
1012                mac = (struct bna_mac *)qe;
1013                bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1014                bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1015                return 1;
1016        }
1017
1018        /* Set default unicast MAC */
1019        if (rxf->ucast_pending_set) {
1020                rxf->ucast_pending_set = 0;
1021                memcpy(rxf->ucast_active_mac.addr,
1022                        rxf->ucast_pending_mac->addr, ETH_ALEN);
1023                rxf->ucast_active_set = 1;
1024                bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1025                        BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1026                return 1;
1027        }
1028
1029        /* Add additional MAC entries */
1030        if (!list_empty(&rxf->ucast_pending_add_q)) {
1031                bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1032                bfa_q_qe_init(qe);
1033                mac = (struct bna_mac *)qe;
1034                list_add_tail(&mac->qe, &rxf->ucast_active_q);
1035                bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1036                return 1;
1037        }
1038
1039        return 0;
1040}
1041
1042static int
1043bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1044{
1045        struct list_head *qe;
1046        struct bna_mac *mac;
1047
1048        /* Throw away delete pending ucast entries */
1049        while (!list_empty(&rxf->ucast_pending_del_q)) {
1050                bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1051                bfa_q_qe_init(qe);
1052                mac = (struct bna_mac *)qe;
1053                if (cleanup == BNA_SOFT_CLEANUP)
1054                        bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1055                else {
1056                        bna_bfi_ucast_req(rxf, mac,
1057                                BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1058                        bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1059                        return 1;
1060                }
1061        }
1062
1063        /* Move active ucast entries to pending_add_q */
1064        while (!list_empty(&rxf->ucast_active_q)) {
1065                bfa_q_deq(&rxf->ucast_active_q, &qe);
1066                bfa_q_qe_init(qe);
1067                list_add_tail(qe, &rxf->ucast_pending_add_q);
1068                if (cleanup == BNA_HARD_CLEANUP) {
1069                        mac = (struct bna_mac *)qe;
1070                        bna_bfi_ucast_req(rxf, mac,
1071                                BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1072                        return 1;
1073                }
1074        }
1075
1076        if (rxf->ucast_active_set) {
1077                rxf->ucast_pending_set = 1;
1078                rxf->ucast_active_set = 0;
1079                if (cleanup == BNA_HARD_CLEANUP) {
1080                        bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1081                                BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1082                        return 1;
1083                }
1084        }
1085
1086        return 0;
1087}
1088
1089static int
1090bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1091{
1092        struct bna *bna = rxf->rx->bna;
1093
1094        /* Enable/disable promiscuous mode */
1095        if (is_promisc_enable(rxf->rxmode_pending,
1096                                rxf->rxmode_pending_bitmask)) {
1097                /* move promisc configuration from pending -> active */
1098                promisc_inactive(rxf->rxmode_pending,
1099                                rxf->rxmode_pending_bitmask);
1100                rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1101                bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1102                return 1;
1103        } else if (is_promisc_disable(rxf->rxmode_pending,
1104                                rxf->rxmode_pending_bitmask)) {
1105                /* move promisc configuration from pending -> active */
1106                promisc_inactive(rxf->rxmode_pending,
1107                                rxf->rxmode_pending_bitmask);
1108                rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1109                bna->promisc_rid = BFI_INVALID_RID;
1110                bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1111                return 1;
1112        }
1113
1114        return 0;
1115}
1116
1117static int
1118bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1119{
1120        struct bna *bna = rxf->rx->bna;
1121
1122        /* Clear pending promisc mode disable */
1123        if (is_promisc_disable(rxf->rxmode_pending,
1124                                rxf->rxmode_pending_bitmask)) {
1125                promisc_inactive(rxf->rxmode_pending,
1126                                rxf->rxmode_pending_bitmask);
1127                rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1128                bna->promisc_rid = BFI_INVALID_RID;
1129                if (cleanup == BNA_HARD_CLEANUP) {
1130                        bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1131                        return 1;
1132                }
1133        }
1134
1135        /* Move promisc mode config from active -> pending */
1136        if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1137                promisc_enable(rxf->rxmode_pending,
1138                                rxf->rxmode_pending_bitmask);
1139                rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1140                if (cleanup == BNA_HARD_CLEANUP) {
1141                        bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1142                        return 1;
1143                }
1144        }
1145
1146        return 0;
1147}
1148
1149static int
1150bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1151{
1152        /* Enable/disable allmulti mode */
1153        if (is_allmulti_enable(rxf->rxmode_pending,
1154                                rxf->rxmode_pending_bitmask)) {
1155                /* move allmulti configuration from pending -> active */
1156                allmulti_inactive(rxf->rxmode_pending,
1157                                rxf->rxmode_pending_bitmask);
1158                rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1159                bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1160                return 1;
1161        } else if (is_allmulti_disable(rxf->rxmode_pending,
1162                                        rxf->rxmode_pending_bitmask)) {
1163                /* move allmulti configuration from pending -> active */
1164                allmulti_inactive(rxf->rxmode_pending,
1165                                rxf->rxmode_pending_bitmask);
1166                rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1167                bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1168                return 1;
1169        }
1170
1171        return 0;
1172}
1173
1174static int
1175bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1176{
1177        /* Clear pending allmulti mode disable */
1178        if (is_allmulti_disable(rxf->rxmode_pending,
1179                                rxf->rxmode_pending_bitmask)) {
1180                allmulti_inactive(rxf->rxmode_pending,
1181                                rxf->rxmode_pending_bitmask);
1182                rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1183                if (cleanup == BNA_HARD_CLEANUP) {
1184                        bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1185                        return 1;
1186                }
1187        }
1188
1189        /* Move allmulti mode config from active -> pending */
1190        if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1191                allmulti_enable(rxf->rxmode_pending,
1192                                rxf->rxmode_pending_bitmask);
1193                rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1194                if (cleanup == BNA_HARD_CLEANUP) {
1195                        bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1196                        return 1;
1197                }
1198        }
1199
1200        return 0;
1201}
1202
1203static int
1204bna_rxf_promisc_enable(struct bna_rxf *rxf)
1205{
1206        struct bna *bna = rxf->rx->bna;
1207        int ret = 0;
1208
1209        if (is_promisc_enable(rxf->rxmode_pending,
1210                                rxf->rxmode_pending_bitmask) ||
1211                (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1212                /* Do nothing if pending enable or already enabled */
1213        } else if (is_promisc_disable(rxf->rxmode_pending,
1214                                        rxf->rxmode_pending_bitmask)) {
1215                /* Turn off pending disable command */
1216                promisc_inactive(rxf->rxmode_pending,
1217                        rxf->rxmode_pending_bitmask);
1218        } else {
1219                /* Schedule enable */
1220                promisc_enable(rxf->rxmode_pending,
1221                                rxf->rxmode_pending_bitmask);
1222                bna->promisc_rid = rxf->rx->rid;
1223                ret = 1;
1224        }
1225
1226        return ret;
1227}
1228
1229static int
1230bna_rxf_promisc_disable(struct bna_rxf *rxf)
1231{
1232        struct bna *bna = rxf->rx->bna;
1233        int ret = 0;
1234
1235        if (is_promisc_disable(rxf->rxmode_pending,
1236                                rxf->rxmode_pending_bitmask) ||
1237                (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1238                /* Do nothing if pending disable or already disabled */
1239        } else if (is_promisc_enable(rxf->rxmode_pending,
1240                                        rxf->rxmode_pending_bitmask)) {
1241                /* Turn off pending enable command */
1242                promisc_inactive(rxf->rxmode_pending,
1243                                rxf->rxmode_pending_bitmask);
1244                bna->promisc_rid = BFI_INVALID_RID;
1245        } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1246                /* Schedule disable */
1247                promisc_disable(rxf->rxmode_pending,
1248                                rxf->rxmode_pending_bitmask);
1249                ret = 1;
1250        }
1251
1252        return ret;
1253}
1254
1255static int
1256bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1257{
1258        int ret = 0;
1259
1260        if (is_allmulti_enable(rxf->rxmode_pending,
1261                        rxf->rxmode_pending_bitmask) ||
1262                        (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1263                /* Do nothing if pending enable or already enabled */
1264        } else if (is_allmulti_disable(rxf->rxmode_pending,
1265                                        rxf->rxmode_pending_bitmask)) {
1266                /* Turn off pending disable command */
1267                allmulti_inactive(rxf->rxmode_pending,
1268                        rxf->rxmode_pending_bitmask);
1269        } else {
1270                /* Schedule enable */
1271                allmulti_enable(rxf->rxmode_pending,
1272                                rxf->rxmode_pending_bitmask);
1273                ret = 1;
1274        }
1275
1276        return ret;
1277}
1278
1279static int
1280bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1281{
1282        int ret = 0;
1283
1284        if (is_allmulti_disable(rxf->rxmode_pending,
1285                                rxf->rxmode_pending_bitmask) ||
1286                (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1287                /* Do nothing if pending disable or already disabled */
1288        } else if (is_allmulti_enable(rxf->rxmode_pending,
1289                                        rxf->rxmode_pending_bitmask)) {
1290                /* Turn off pending enable command */
1291                allmulti_inactive(rxf->rxmode_pending,
1292                                rxf->rxmode_pending_bitmask);
1293        } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1294                /* Schedule disable */
1295                allmulti_disable(rxf->rxmode_pending,
1296                                rxf->rxmode_pending_bitmask);
1297                ret = 1;
1298        }
1299
1300        return ret;
1301}
1302
1303static int
1304bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1305{
1306        if (rxf->vlan_strip_pending) {
1307                        rxf->vlan_strip_pending = false;
1308                        bna_bfi_vlan_strip_enable(rxf);
1309                        return 1;
1310        }
1311
1312        return 0;
1313}
1314
1315/**
1316 * RX
1317 */
1318
1319#define BNA_GET_RXQS(qcfg)      (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1320        (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1321
1322#define SIZE_TO_PAGES(size)     (((size) >> PAGE_SHIFT) + ((((size) &\
1323        (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1324
1325#define call_rx_stop_cbfn(rx)                                           \
1326do {                                                                \
1327        if ((rx)->stop_cbfn) {                                          \
1328                void (*cbfn)(void *, struct bna_rx *);    \
1329                void *cbarg;                                        \
1330                cbfn = (rx)->stop_cbfn;                          \
1331                cbarg = (rx)->stop_cbarg;                              \
1332                (rx)->stop_cbfn = NULL;                                 \
1333                (rx)->stop_cbarg = NULL;                                \
1334                cbfn(cbarg, rx);                                        \
1335        }                                                              \
1336} while (0)
1337
1338#define call_rx_stall_cbfn(rx)                                          \
1339do {                                                                    \
1340        if ((rx)->rx_stall_cbfn)                                        \
1341                (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));             \
1342} while (0)
1343
1344#define bfi_enet_datapath_q_init(bfi_q, bna_qpt)                        \
1345do {                                                                    \
1346        struct bna_dma_addr cur_q_addr =                                \
1347                *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));      \
1348        (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;        \
1349        (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;        \
1350        (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;              \
1351        (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;              \
1352        (bfi_q)->pages = htons((u16)(bna_qpt)->page_count);     \
1353        (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1354} while (0)
1355
1356static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1357static void bna_rx_enet_stop(struct bna_rx *rx);
1358static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1359
1360bfa_fsm_state_decl(bna_rx, stopped,
1361        struct bna_rx, enum bna_rx_event);
1362bfa_fsm_state_decl(bna_rx, start_wait,
1363        struct bna_rx, enum bna_rx_event);
1364bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1365        struct bna_rx, enum bna_rx_event);
1366bfa_fsm_state_decl(bna_rx, started,
1367        struct bna_rx, enum bna_rx_event);
1368bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1369        struct bna_rx, enum bna_rx_event);
1370bfa_fsm_state_decl(bna_rx, stop_wait,
1371        struct bna_rx, enum bna_rx_event);
1372bfa_fsm_state_decl(bna_rx, cleanup_wait,
1373        struct bna_rx, enum bna_rx_event);
1374bfa_fsm_state_decl(bna_rx, failed,
1375        struct bna_rx, enum bna_rx_event);
1376bfa_fsm_state_decl(bna_rx, quiesce_wait,
1377        struct bna_rx, enum bna_rx_event);
1378
1379static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1380{
1381        call_rx_stop_cbfn(rx);
1382}
1383
1384static void bna_rx_sm_stopped(struct bna_rx *rx,
1385                                enum bna_rx_event event)
1386{
1387        switch (event) {
1388        case RX_E_START:
1389                bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1390                break;
1391
1392        case RX_E_STOP:
1393                call_rx_stop_cbfn(rx);
1394                break;
1395
1396        case RX_E_FAIL:
1397                /* no-op */
1398                break;
1399
1400        default:
1401                bfa_sm_fault(event);
1402                break;
1403        }
1404}
1405
1406static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1407{
1408        bna_bfi_rx_enet_start(rx);
1409}
1410
1411void
1412bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1413{
1414}
1415
1416static void
1417bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1418{
1419        switch (event) {
1420        case RX_E_FAIL:
1421        case RX_E_STOPPED:
1422                bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1423                rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1424                break;
1425
1426        case RX_E_STARTED:
1427                bna_rx_enet_stop(rx);
1428                break;
1429
1430        default:
1431                bfa_sm_fault(event);
1432                break;
1433        }
1434}
1435
1436static void bna_rx_sm_start_wait(struct bna_rx *rx,
1437                                enum bna_rx_event event)
1438{
1439        switch (event) {
1440        case RX_E_STOP:
1441                bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1442                break;
1443
1444        case RX_E_FAIL:
1445                bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1446                break;
1447
1448        case RX_E_STARTED:
1449                bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1450                break;
1451
1452        default:
1453                bfa_sm_fault(event);
1454                break;
1455        }
1456}
1457
1458static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1459{
1460        rx->rx_post_cbfn(rx->bna->bnad, rx);
1461        bna_rxf_start(&rx->rxf);
1462}
1463
1464void
1465bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1466{
1467}
1468
1469static void
1470bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1471{
1472        switch (event) {
1473        case RX_E_FAIL:
1474                bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1475                bna_rxf_fail(&rx->rxf);
1476                call_rx_stall_cbfn(rx);
1477                rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1478                break;
1479
1480        case RX_E_RXF_STARTED:
1481                bna_rxf_stop(&rx->rxf);
1482                break;
1483
1484        case RX_E_RXF_STOPPED:
1485                bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1486                call_rx_stall_cbfn(rx);
1487                bna_rx_enet_stop(rx);
1488                break;
1489
1490        default:
1491                bfa_sm_fault(event);
1492                break;
1493        }
1494
1495}
1496
1497void
1498bna_rx_sm_started_entry(struct bna_rx *rx)
1499{
1500        struct bna_rxp *rxp;
1501        struct list_head *qe_rxp;
1502        int is_regular = (rx->type == BNA_RX_T_REGULAR);
1503
1504        /* Start IB */
1505        list_for_each(qe_rxp, &rx->rxp_q) {
1506                rxp = (struct bna_rxp *)qe_rxp;
1507                bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1508        }
1509
1510        bna_ethport_cb_rx_started(&rx->bna->ethport);
1511}
1512
1513static void
1514bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1515{
1516        switch (event) {
1517        case RX_E_STOP:
1518                bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1519                bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1520                bna_rxf_stop(&rx->rxf);
1521                break;
1522
1523        case RX_E_FAIL:
1524                bfa_fsm_set_state(rx, bna_rx_sm_failed);
1525                bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1526                bna_rxf_fail(&rx->rxf);
1527                call_rx_stall_cbfn(rx);
1528                rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1529                break;
1530
1531        default:
1532                bfa_sm_fault(event);
1533                break;
1534        }
1535}
1536
1537static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1538                                enum bna_rx_event event)
1539{
1540        switch (event) {
1541        case RX_E_STOP:
1542                bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1543                break;
1544
1545        case RX_E_FAIL:
1546                bfa_fsm_set_state(rx, bna_rx_sm_failed);
1547                bna_rxf_fail(&rx->rxf);
1548                call_rx_stall_cbfn(rx);
1549                rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1550                break;
1551
1552        case RX_E_RXF_STARTED:
1553                bfa_fsm_set_state(rx, bna_rx_sm_started);
1554                break;
1555
1556        default:
1557                bfa_sm_fault(event);
1558                break;
1559        }
1560}
1561
1562void
1563bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1564{
1565}
1566
1567void
1568bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1569{
1570        switch (event) {
1571        case RX_E_FAIL:
1572        case RX_E_RXF_STOPPED:
1573                /* No-op */
1574                break;
1575
1576        case RX_E_CLEANUP_DONE:
1577                bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1578                break;
1579
1580        default:
1581                bfa_sm_fault(event);
1582                break;
1583        }
1584}
1585
1586static void
1587bna_rx_sm_failed_entry(struct bna_rx *rx)
1588{
1589}
1590
1591static void
1592bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1593{
1594        switch (event) {
1595        case RX_E_START:
1596                bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1597                break;
1598
1599        case RX_E_STOP:
1600                bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1601                break;
1602
1603        case RX_E_FAIL:
1604        case RX_E_RXF_STARTED:
1605        case RX_E_RXF_STOPPED:
1606                /* No-op */
1607                break;
1608
1609        case RX_E_CLEANUP_DONE:
1610                bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1611                break;
1612
1613        default:
1614                bfa_sm_fault(event);
1615                break;
1616}       }
1617
1618static void
1619bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1620{
1621}
1622
1623static void
1624bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1625{
1626        switch (event) {
1627        case RX_E_STOP:
1628                bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1629                break;
1630
1631        case RX_E_FAIL:
1632                bfa_fsm_set_state(rx, bna_rx_sm_failed);
1633                break;
1634
1635        case RX_E_CLEANUP_DONE:
1636                bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1637                break;
1638
1639        default:
1640                bfa_sm_fault(event);
1641                break;
1642        }
1643}
1644
1645static void
1646bna_bfi_rx_enet_start(struct bna_rx *rx)
1647{
1648        struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1649        struct bna_rxp *rxp = NULL;
1650        struct bna_rxq *q0 = NULL, *q1 = NULL;
1651        struct list_head *rxp_qe;
1652        int i;
1653
1654        bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1655                BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1656        cfg_req->mh.num_entries = htons(
1657                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1658
1659        cfg_req->num_queue_sets = rx->num_paths;
1660        for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1661                i < rx->num_paths;
1662                i++, rxp_qe = bfa_q_next(rxp_qe)) {
1663                rxp = (struct bna_rxp *)rxp_qe;
1664
1665                GET_RXQS(rxp, q0, q1);
1666                switch (rxp->type) {
1667                case BNA_RXP_SLR:
1668                case BNA_RXP_HDS:
1669                        /* Small RxQ */
1670                        bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1671                                                &q1->qpt);
1672                        cfg_req->q_cfg[i].qs.rx_buffer_size =
1673                                htons((u16)q1->buffer_size);
1674                        /* Fall through */
1675
1676                case BNA_RXP_SINGLE:
1677                        /* Large/Single RxQ */
1678                        bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1679                                                &q0->qpt);
1680                        q0->buffer_size =
1681                                bna_enet_mtu_get(&rx->bna->enet);
1682                        cfg_req->q_cfg[i].ql.rx_buffer_size =
1683                                htons((u16)q0->buffer_size);
1684                        break;
1685
1686                default:
1687                        BUG_ON(1);
1688                }
1689
1690                bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1691                                        &rxp->cq.qpt);
1692
1693                cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1694                        rxp->cq.ib.ib_seg_host_addr.lsb;
1695                cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1696                        rxp->cq.ib.ib_seg_host_addr.msb;
1697                cfg_req->q_cfg[i].ib.intr.msix_index =
1698                        htons((u16)rxp->cq.ib.intr_vector);
1699        }
1700
1701        cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1702        cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1703        cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1704        cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1705        cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1706                                ? BNA_STATUS_T_ENABLED :
1707                                BNA_STATUS_T_DISABLED;
1708        cfg_req->ib_cfg.coalescing_timeout =
1709                        htonl((u32)rxp->cq.ib.coalescing_timeo);
1710        cfg_req->ib_cfg.inter_pkt_timeout =
1711                        htonl((u32)rxp->cq.ib.interpkt_timeo);
1712        cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1713
1714        switch (rxp->type) {
1715        case BNA_RXP_SLR:
1716                cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1717                break;
1718
1719        case BNA_RXP_HDS:
1720                cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1721                cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1722                cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1723                cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1724                break;
1725
1726        case BNA_RXP_SINGLE:
1727                cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1728                break;
1729
1730        default:
1731                BUG_ON(1);
1732        }
1733        cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1734
1735        bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1736                sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1737        bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1738}
1739
1740static void
1741bna_bfi_rx_enet_stop(struct bna_rx *rx)
1742{
1743        struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1744
1745        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1746                BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1747        req->mh.num_entries = htons(
1748                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1749        bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1750                &req->mh);
1751        bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1752}
1753
1754static void
1755bna_rx_enet_stop(struct bna_rx *rx)
1756{
1757        struct bna_rxp *rxp;
1758        struct list_head                 *qe_rxp;
1759
1760        /* Stop IB */
1761        list_for_each(qe_rxp, &rx->rxp_q) {
1762                rxp = (struct bna_rxp *)qe_rxp;
1763                bna_ib_stop(rx->bna, &rxp->cq.ib);
1764        }
1765
1766        bna_bfi_rx_enet_stop(rx);
1767}
1768
1769static int
1770bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1771{
1772        if ((rx_mod->rx_free_count == 0) ||
1773                (rx_mod->rxp_free_count == 0) ||
1774                (rx_mod->rxq_free_count == 0))
1775                return 0;
1776
1777        if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1778                if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1779                        (rx_mod->rxq_free_count < rx_cfg->num_paths))
1780                                return 0;
1781        } else {
1782                if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1783                        (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1784                        return 0;
1785        }
1786
1787        return 1;
1788}
1789
1790static struct bna_rxq *
1791bna_rxq_get(struct bna_rx_mod *rx_mod)
1792{
1793        struct bna_rxq *rxq = NULL;
1794        struct list_head        *qe = NULL;
1795
1796        bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1797        rx_mod->rxq_free_count--;
1798        rxq = (struct bna_rxq *)qe;
1799        bfa_q_qe_init(&rxq->qe);
1800
1801        return rxq;
1802}
1803
1804static void
1805bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1806{
1807        bfa_q_qe_init(&rxq->qe);
1808        list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1809        rx_mod->rxq_free_count++;
1810}
1811
1812static struct bna_rxp *
1813bna_rxp_get(struct bna_rx_mod *rx_mod)
1814{
1815        struct list_head        *qe = NULL;
1816        struct bna_rxp *rxp = NULL;
1817
1818        bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1819        rx_mod->rxp_free_count--;
1820        rxp = (struct bna_rxp *)qe;
1821        bfa_q_qe_init(&rxp->qe);
1822
1823        return rxp;
1824}
1825
1826static void
1827bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1828{
1829        bfa_q_qe_init(&rxp->qe);
1830        list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1831        rx_mod->rxp_free_count++;
1832}
1833
1834static struct bna_rx *
1835bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1836{
1837        struct list_head        *qe = NULL;
1838        struct bna_rx *rx = NULL;
1839
1840        if (type == BNA_RX_T_REGULAR) {
1841                bfa_q_deq(&rx_mod->rx_free_q, &qe);
1842        } else
1843                bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1844
1845        rx_mod->rx_free_count--;
1846        rx = (struct bna_rx *)qe;
1847        bfa_q_qe_init(&rx->qe);
1848        list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1849        rx->type = type;
1850
1851        return rx;
1852}
1853
1854static void
1855bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1856{
1857        struct list_head *prev_qe = NULL;
1858        struct list_head *qe;
1859
1860        bfa_q_qe_init(&rx->qe);
1861
1862        list_for_each(qe, &rx_mod->rx_free_q) {
1863                if (((struct bna_rx *)qe)->rid < rx->rid)
1864                        prev_qe = qe;
1865                else
1866                        break;
1867        }
1868
1869        if (prev_qe == NULL) {
1870                /* This is the first entry */
1871                bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1872        } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1873                /* This is the last entry */
1874                list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1875        } else {
1876                /* Somewhere in the middle */
1877                bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1878                bfa_q_prev(&rx->qe) = prev_qe;
1879                bfa_q_next(prev_qe) = &rx->qe;
1880                bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1881        }
1882
1883        rx_mod->rx_free_count++;
1884}
1885
1886static void
1887bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1888                struct bna_rxq *q1)
1889{
1890        switch (rxp->type) {
1891        case BNA_RXP_SINGLE:
1892                rxp->rxq.single.only = q0;
1893                rxp->rxq.single.reserved = NULL;
1894                break;
1895        case BNA_RXP_SLR:
1896                rxp->rxq.slr.large = q0;
1897                rxp->rxq.slr.small = q1;
1898                break;
1899        case BNA_RXP_HDS:
1900                rxp->rxq.hds.data = q0;
1901                rxp->rxq.hds.hdr = q1;
1902                break;
1903        default:
1904                break;
1905        }
1906}
1907
1908static void
1909bna_rxq_qpt_setup(struct bna_rxq *rxq,
1910                struct bna_rxp *rxp,
1911                u32 page_count,
1912                u32 page_size,
1913                struct bna_mem_descr *qpt_mem,
1914                struct bna_mem_descr *swqpt_mem,
1915                struct bna_mem_descr *page_mem)
1916{
1917        int     i;
1918
1919        rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1920        rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1921        rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1922        rxq->qpt.page_count = page_count;
1923        rxq->qpt.page_size = page_size;
1924
1925        rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1926
1927        for (i = 0; i < rxq->qpt.page_count; i++) {
1928                rxq->rcb->sw_qpt[i] = page_mem[i].kva;
1929                ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1930                        page_mem[i].dma.lsb;
1931                ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1932                        page_mem[i].dma.msb;
1933        }
1934}
1935
1936static void
1937bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1938                u32 page_count,
1939                u32 page_size,
1940                struct bna_mem_descr *qpt_mem,
1941                struct bna_mem_descr *swqpt_mem,
1942                struct bna_mem_descr *page_mem)
1943{
1944        int     i;
1945
1946        rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1947        rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1948        rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1949        rxp->cq.qpt.page_count = page_count;
1950        rxp->cq.qpt.page_size = page_size;
1951
1952        rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1953
1954        for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1955                rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
1956
1957                ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1958                        page_mem[i].dma.lsb;
1959                ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1960                        page_mem[i].dma.msb;
1961        }
1962}
1963
1964static void
1965bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
1966{
1967        struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1968
1969        bfa_wc_down(&rx_mod->rx_stop_wc);
1970}
1971
1972static void
1973bna_rx_mod_cb_rx_stopped_all(void *arg)
1974{
1975        struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1976
1977        if (rx_mod->stop_cbfn)
1978                rx_mod->stop_cbfn(&rx_mod->bna->enet);
1979        rx_mod->stop_cbfn = NULL;
1980}
1981
1982static void
1983bna_rx_start(struct bna_rx *rx)
1984{
1985        rx->rx_flags |= BNA_RX_F_ENET_STARTED;
1986        if (rx->rx_flags & BNA_RX_F_ENABLED)
1987                bfa_fsm_send_event(rx, RX_E_START);
1988}
1989
1990static void
1991bna_rx_stop(struct bna_rx *rx)
1992{
1993        rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
1994        if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
1995                bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
1996        else {
1997                rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
1998                rx->stop_cbarg = &rx->bna->rx_mod;
1999                bfa_fsm_send_event(rx, RX_E_STOP);
2000        }
2001}
2002
2003static void
2004bna_rx_fail(struct bna_rx *rx)
2005{
2006        /* Indicate Enet is not enabled, and failed */
2007        rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2008        bfa_fsm_send_event(rx, RX_E_FAIL);
2009}
2010
2011void
2012bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2013{
2014        struct bna_rx *rx;
2015        struct list_head *qe;
2016
2017        rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2018        if (type == BNA_RX_T_LOOPBACK)
2019                rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2020
2021        list_for_each(qe, &rx_mod->rx_active_q) {
2022                rx = (struct bna_rx *)qe;
2023                if (rx->type == type)
2024                        bna_rx_start(rx);
2025        }
2026}
2027
2028void
2029bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2030{
2031        struct bna_rx *rx;
2032        struct list_head *qe;
2033
2034        rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2035        rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2036
2037        rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2038
2039        bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2040
2041        list_for_each(qe, &rx_mod->rx_active_q) {
2042                rx = (struct bna_rx *)qe;
2043                if (rx->type == type) {
2044                        bfa_wc_up(&rx_mod->rx_stop_wc);
2045                        bna_rx_stop(rx);
2046                }
2047        }
2048
2049        bfa_wc_wait(&rx_mod->rx_stop_wc);
2050}
2051
2052void
2053bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2054{
2055        struct bna_rx *rx;
2056        struct list_head *qe;
2057
2058        rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2059        rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2060
2061        list_for_each(qe, &rx_mod->rx_active_q) {
2062                rx = (struct bna_rx *)qe;
2063                bna_rx_fail(rx);
2064        }
2065}
2066
2067void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2068                        struct bna_res_info *res_info)
2069{
2070        int     index;
2071        struct bna_rx *rx_ptr;
2072        struct bna_rxp *rxp_ptr;
2073        struct bna_rxq *rxq_ptr;
2074
2075        rx_mod->bna = bna;
2076        rx_mod->flags = 0;
2077
2078        rx_mod->rx = (struct bna_rx *)
2079                res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2080        rx_mod->rxp = (struct bna_rxp *)
2081                res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2082        rx_mod->rxq = (struct bna_rxq *)
2083                res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2084
2085        /* Initialize the queues */
2086        INIT_LIST_HEAD(&rx_mod->rx_free_q);
2087        rx_mod->rx_free_count = 0;
2088        INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2089        rx_mod->rxq_free_count = 0;
2090        INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2091        rx_mod->rxp_free_count = 0;
2092        INIT_LIST_HEAD(&rx_mod->rx_active_q);
2093
2094        /* Build RX queues */
2095        for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2096                rx_ptr = &rx_mod->rx[index];
2097
2098                bfa_q_qe_init(&rx_ptr->qe);
2099                INIT_LIST_HEAD(&rx_ptr->rxp_q);
2100                rx_ptr->bna = NULL;
2101                rx_ptr->rid = index;
2102                rx_ptr->stop_cbfn = NULL;
2103                rx_ptr->stop_cbarg = NULL;
2104
2105                list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2106                rx_mod->rx_free_count++;
2107        }
2108
2109        /* build RX-path queue */
2110        for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2111                rxp_ptr = &rx_mod->rxp[index];
2112                bfa_q_qe_init(&rxp_ptr->qe);
2113                list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2114                rx_mod->rxp_free_count++;
2115        }
2116
2117        /* build RXQ queue */
2118        for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2119                rxq_ptr = &rx_mod->rxq[index];
2120                bfa_q_qe_init(&rxq_ptr->qe);
2121                list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2122                rx_mod->rxq_free_count++;
2123        }
2124}
2125
2126void
2127bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2128{
2129        struct list_head                *qe;
2130        int i;
2131
2132        i = 0;
2133        list_for_each(qe, &rx_mod->rx_free_q)
2134                i++;
2135
2136        i = 0;
2137        list_for_each(qe, &rx_mod->rxp_free_q)
2138                i++;
2139
2140        i = 0;
2141        list_for_each(qe, &rx_mod->rxq_free_q)
2142                i++;
2143
2144        rx_mod->bna = NULL;
2145}
2146
2147void
2148bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2149{
2150        struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2151        struct bna_rxp *rxp = NULL;
2152        struct bna_rxq *q0 = NULL, *q1 = NULL;
2153        struct list_head *rxp_qe;
2154        int i;
2155
2156        bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2157                sizeof(struct bfi_enet_rx_cfg_rsp));
2158
2159        rx->hw_id = cfg_rsp->hw_id;
2160
2161        for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2162                i < rx->num_paths;
2163                i++, rxp_qe = bfa_q_next(rxp_qe)) {
2164                rxp = (struct bna_rxp *)rxp_qe;
2165                GET_RXQS(rxp, q0, q1);
2166
2167                /* Setup doorbells */
2168                rxp->cq.ccb->i_dbell->doorbell_addr =
2169                        rx->bna->pcidev.pci_bar_kva
2170                        + ntohl(cfg_rsp->q_handles[i].i_dbell);
2171                rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2172                q0->rcb->q_dbell =
2173                        rx->bna->pcidev.pci_bar_kva
2174                        + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2175                q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2176                if (q1) {
2177                        q1->rcb->q_dbell =
2178                        rx->bna->pcidev.pci_bar_kva
2179                        + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2180                        q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2181                }
2182
2183                /* Initialize producer/consumer indexes */
2184                (*rxp->cq.ccb->hw_producer_index) = 0;
2185                rxp->cq.ccb->producer_index = 0;
2186                q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2187                if (q1)
2188                        q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2189        }
2190
2191        bfa_fsm_send_event(rx, RX_E_STARTED);
2192}
2193
2194void
2195bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2196{
2197        bfa_fsm_send_event(rx, RX_E_STOPPED);
2198}
2199
2200void
2201bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2202{
2203        u32 cq_size, hq_size, dq_size;
2204        u32 cpage_count, hpage_count, dpage_count;
2205        struct bna_mem_info *mem_info;
2206        u32 cq_depth;
2207        u32 hq_depth;
2208        u32 dq_depth;
2209
2210        dq_depth = q_cfg->q_depth;
2211        hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2212        cq_depth = dq_depth + hq_depth;
2213
2214        BNA_TO_POWER_OF_2_HIGH(cq_depth);
2215        cq_size = cq_depth * BFI_CQ_WI_SIZE;
2216        cq_size = ALIGN(cq_size, PAGE_SIZE);
2217        cpage_count = SIZE_TO_PAGES(cq_size);
2218
2219        BNA_TO_POWER_OF_2_HIGH(dq_depth);
2220        dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2221        dq_size = ALIGN(dq_size, PAGE_SIZE);
2222        dpage_count = SIZE_TO_PAGES(dq_size);
2223
2224        if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2225                BNA_TO_POWER_OF_2_HIGH(hq_depth);
2226                hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2227                hq_size = ALIGN(hq_size, PAGE_SIZE);
2228                hpage_count = SIZE_TO_PAGES(hq_size);
2229        } else
2230                hpage_count = 0;
2231
2232        res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2233        mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2234        mem_info->mem_type = BNA_MEM_T_KVA;
2235        mem_info->len = sizeof(struct bna_ccb);
2236        mem_info->num = q_cfg->num_paths;
2237
2238        res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2239        mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2240        mem_info->mem_type = BNA_MEM_T_KVA;
2241        mem_info->len = sizeof(struct bna_rcb);
2242        mem_info->num = BNA_GET_RXQS(q_cfg);
2243
2244        res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2245        mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2246        mem_info->mem_type = BNA_MEM_T_DMA;
2247        mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2248        mem_info->num = q_cfg->num_paths;
2249
2250        res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2251        mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2252        mem_info->mem_type = BNA_MEM_T_KVA;
2253        mem_info->len = cpage_count * sizeof(void *);
2254        mem_info->num = q_cfg->num_paths;
2255
2256        res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2257        mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2258        mem_info->mem_type = BNA_MEM_T_DMA;
2259        mem_info->len = PAGE_SIZE;
2260        mem_info->num = cpage_count * q_cfg->num_paths;
2261
2262        res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2263        mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2264        mem_info->mem_type = BNA_MEM_T_DMA;
2265        mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2266        mem_info->num = q_cfg->num_paths;
2267
2268        res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2269        mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2270        mem_info->mem_type = BNA_MEM_T_KVA;
2271        mem_info->len = dpage_count * sizeof(void *);
2272        mem_info->num = q_cfg->num_paths;
2273
2274        res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2275        mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2276        mem_info->mem_type = BNA_MEM_T_DMA;
2277        mem_info->len = PAGE_SIZE;
2278        mem_info->num = dpage_count * q_cfg->num_paths;
2279
2280        res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2281        mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2282        mem_info->mem_type = BNA_MEM_T_DMA;
2283        mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2284        mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2285
2286        res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2287        mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2288        mem_info->mem_type = BNA_MEM_T_KVA;
2289        mem_info->len = hpage_count * sizeof(void *);
2290        mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2291
2292        res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2293        mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2294        mem_info->mem_type = BNA_MEM_T_DMA;
2295        mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2296        mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2297
2298        res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2299        mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2300        mem_info->mem_type = BNA_MEM_T_DMA;
2301        mem_info->len = BFI_IBIDX_SIZE;
2302        mem_info->num = q_cfg->num_paths;
2303
2304        res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2305        mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2306        mem_info->mem_type = BNA_MEM_T_KVA;
2307        mem_info->len = BFI_ENET_RSS_RIT_MAX;
2308        mem_info->num = 1;
2309
2310        res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2311        res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2312        res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2313}
2314
2315struct bna_rx *
2316bna_rx_create(struct bna *bna, struct bnad *bnad,
2317                struct bna_rx_config *rx_cfg,
2318                const struct bna_rx_event_cbfn *rx_cbfn,
2319                struct bna_res_info *res_info,
2320                void *priv)
2321{
2322        struct bna_rx_mod *rx_mod = &bna->rx_mod;
2323        struct bna_rx *rx;
2324        struct bna_rxp *rxp;
2325        struct bna_rxq *q0;
2326        struct bna_rxq *q1;
2327        struct bna_intr_info *intr_info;
2328        u32 page_count;
2329        struct bna_mem_descr *ccb_mem;
2330        struct bna_mem_descr *rcb_mem;
2331        struct bna_mem_descr *unmapq_mem;
2332        struct bna_mem_descr *cqpt_mem;
2333        struct bna_mem_descr *cswqpt_mem;
2334        struct bna_mem_descr *cpage_mem;
2335        struct bna_mem_descr *hqpt_mem;
2336        struct bna_mem_descr *dqpt_mem;
2337        struct bna_mem_descr *hsqpt_mem;
2338        struct bna_mem_descr *dsqpt_mem;
2339        struct bna_mem_descr *hpage_mem;
2340        struct bna_mem_descr *dpage_mem;
2341        int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
2342        int dpage_count, hpage_count, rcb_idx;
2343
2344        if (!bna_rx_res_check(rx_mod, rx_cfg))
2345                return NULL;
2346
2347        intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2348        ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2349        rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2350        unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2351        cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2352        cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2353        cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2354        hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2355        dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2356        hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2357        dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2358        hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2359        dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2360
2361        page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2362                        rx_cfg->num_paths;
2363
2364        dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2365                        rx_cfg->num_paths;
2366
2367        hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2368                        rx_cfg->num_paths;
2369
2370        rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2371        rx->bna = bna;
2372        rx->rx_flags = 0;
2373        INIT_LIST_HEAD(&rx->rxp_q);
2374        rx->stop_cbfn = NULL;
2375        rx->stop_cbarg = NULL;
2376        rx->priv = priv;
2377
2378        rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2379        rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2380        rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2381        rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2382        rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2383        /* Following callbacks are mandatory */
2384        rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2385        rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2386
2387        if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2388                switch (rx->type) {
2389                case BNA_RX_T_REGULAR:
2390                        if (!(rx->bna->rx_mod.flags &
2391                                BNA_RX_MOD_F_ENET_LOOPBACK))
2392                                rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2393                        break;
2394                case BNA_RX_T_LOOPBACK:
2395                        if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2396                                rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2397                        break;
2398                }
2399        }
2400
2401        rx->num_paths = rx_cfg->num_paths;
2402        for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
2403                rxp = bna_rxp_get(rx_mod);
2404                list_add_tail(&rxp->qe, &rx->rxp_q);
2405                rxp->type = rx_cfg->rxp_type;
2406                rxp->rx = rx;
2407                rxp->cq.rx = rx;
2408
2409                q0 = bna_rxq_get(rx_mod);
2410                if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2411                        q1 = NULL;
2412                else
2413                        q1 = bna_rxq_get(rx_mod);
2414
2415                if (1 == intr_info->num)
2416                        rxp->vector = intr_info->idl[0].vector;
2417                else
2418                        rxp->vector = intr_info->idl[i].vector;
2419
2420                /* Setup IB */
2421
2422                rxp->cq.ib.ib_seg_host_addr.lsb =
2423                res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2424                rxp->cq.ib.ib_seg_host_addr.msb =
2425                res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2426                rxp->cq.ib.ib_seg_host_addr_kva =
2427                res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2428                rxp->cq.ib.intr_type = intr_info->intr_type;
2429                if (intr_info->intr_type == BNA_INTR_T_MSIX)
2430                        rxp->cq.ib.intr_vector = rxp->vector;
2431                else
2432                        rxp->cq.ib.intr_vector = (1 << rxp->vector);
2433                rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2434                rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2435                rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2436
2437                bna_rxp_add_rxqs(rxp, q0, q1);
2438
2439                /* Setup large Q */
2440
2441                q0->rx = rx;
2442                q0->rxp = rxp;
2443
2444                q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2445                q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2446                rcb_idx++;
2447                q0->rcb->q_depth = rx_cfg->q_depth;
2448                q0->rcb->rxq = q0;
2449                q0->rcb->bnad = bna->bnad;
2450                q0->rcb->id = 0;
2451                q0->rx_packets = q0->rx_bytes = 0;
2452                q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2453
2454                bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2455                        &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2456                q0->rcb->page_idx = dpage_idx;
2457                q0->rcb->page_count = dpage_count;
2458                dpage_idx += dpage_count;
2459
2460                if (rx->rcb_setup_cbfn)
2461                        rx->rcb_setup_cbfn(bnad, q0->rcb);
2462
2463                /* Setup small Q */
2464
2465                if (q1) {
2466                        q1->rx = rx;
2467                        q1->rxp = rxp;
2468
2469                        q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2470                        q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2471                        rcb_idx++;
2472                        q1->rcb->q_depth = rx_cfg->q_depth;
2473                        q1->rcb->rxq = q1;
2474                        q1->rcb->bnad = bna->bnad;
2475                        q1->rcb->id = 1;
2476                        q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2477                                        rx_cfg->hds_config.forced_offset
2478                                        : rx_cfg->small_buff_size;
2479                        q1->rx_packets = q1->rx_bytes = 0;
2480                        q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2481
2482                        bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2483                                &hqpt_mem[i], &hsqpt_mem[i],
2484                                &hpage_mem[hpage_idx]);
2485                        q1->rcb->page_idx = hpage_idx;
2486                        q1->rcb->page_count = hpage_count;
2487                        hpage_idx += hpage_count;
2488
2489                        if (rx->rcb_setup_cbfn)
2490                                rx->rcb_setup_cbfn(bnad, q1->rcb);
2491                }
2492
2493                /* Setup CQ */
2494
2495                rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2496                rxp->cq.ccb->q_depth =  rx_cfg->q_depth +
2497                                        ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2498                                        0 : rx_cfg->q_depth);
2499                rxp->cq.ccb->cq = &rxp->cq;
2500                rxp->cq.ccb->rcb[0] = q0->rcb;
2501                q0->rcb->ccb = rxp->cq.ccb;
2502                if (q1) {
2503                        rxp->cq.ccb->rcb[1] = q1->rcb;
2504                        q1->rcb->ccb = rxp->cq.ccb;
2505                }
2506                rxp->cq.ccb->hw_producer_index =
2507                        (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2508                rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2509                rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2510                rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2511                rxp->cq.ccb->rx_coalescing_timeo =
2512                        rxp->cq.ib.coalescing_timeo;
2513                rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2514                rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2515                rxp->cq.ccb->bnad = bna->bnad;
2516                rxp->cq.ccb->id = i;
2517
2518                bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2519                        &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
2520                rxp->cq.ccb->page_idx = cpage_idx;
2521                rxp->cq.ccb->page_count = page_count;
2522                cpage_idx += page_count;
2523
2524                if (rx->ccb_setup_cbfn)
2525                        rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2526        }
2527
2528        rx->hds_cfg = rx_cfg->hds_config;
2529
2530        bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2531
2532        bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2533
2534        rx_mod->rid_mask |= (1 << rx->rid);
2535
2536        return rx;
2537}
2538
2539void
2540bna_rx_destroy(struct bna_rx *rx)
2541{
2542        struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2543        struct bna_rxq *q0 = NULL;
2544        struct bna_rxq *q1 = NULL;
2545        struct bna_rxp *rxp;
2546        struct list_head *qe;
2547
2548        bna_rxf_uninit(&rx->rxf);
2549
2550        while (!list_empty(&rx->rxp_q)) {
2551                bfa_q_deq(&rx->rxp_q, &rxp);
2552                GET_RXQS(rxp, q0, q1);
2553                if (rx->rcb_destroy_cbfn)
2554                        rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2555                q0->rcb = NULL;
2556                q0->rxp = NULL;
2557                q0->rx = NULL;
2558                bna_rxq_put(rx_mod, q0);
2559
2560                if (q1) {
2561                        if (rx->rcb_destroy_cbfn)
2562                                rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2563                        q1->rcb = NULL;
2564                        q1->rxp = NULL;
2565                        q1->rx = NULL;
2566                        bna_rxq_put(rx_mod, q1);
2567                }
2568                rxp->rxq.slr.large = NULL;
2569                rxp->rxq.slr.small = NULL;
2570
2571                if (rx->ccb_destroy_cbfn)
2572                        rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2573                rxp->cq.ccb = NULL;
2574                rxp->rx = NULL;
2575                bna_rxp_put(rx_mod, rxp);
2576        }
2577
2578        list_for_each(qe, &rx_mod->rx_active_q) {
2579                if (qe == &rx->qe) {
2580                        list_del(&rx->qe);
2581                        bfa_q_qe_init(&rx->qe);
2582                        break;
2583                }
2584        }
2585
2586        rx_mod->rid_mask &= ~(1 << rx->rid);
2587
2588        rx->bna = NULL;
2589        rx->priv = NULL;
2590        bna_rx_put(rx_mod, rx);
2591}
2592
2593void
2594bna_rx_enable(struct bna_rx *rx)
2595{
2596        if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2597                return;
2598
2599        rx->rx_flags |= BNA_RX_F_ENABLED;
2600        if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2601                bfa_fsm_send_event(rx, RX_E_START);
2602}
2603
2604void
2605bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2606                void (*cbfn)(void *, struct bna_rx *))
2607{
2608        if (type == BNA_SOFT_CLEANUP) {
2609                /* h/w should not be accessed. Treat we're stopped */
2610                (*cbfn)(rx->bna->bnad, rx);
2611        } else {
2612                rx->stop_cbfn = cbfn;
2613                rx->stop_cbarg = rx->bna->bnad;
2614
2615                rx->rx_flags &= ~BNA_RX_F_ENABLED;
2616
2617                bfa_fsm_send_event(rx, RX_E_STOP);
2618        }
2619}
2620
2621void
2622bna_rx_cleanup_complete(struct bna_rx *rx)
2623{
2624        bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2625}
2626
2627enum bna_cb_status
2628bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2629                enum bna_rxmode bitmask,
2630                void (*cbfn)(struct bnad *, struct bna_rx *))
2631{
2632        struct bna_rxf *rxf = &rx->rxf;
2633        int need_hw_config = 0;
2634
2635        /* Error checks */
2636
2637        if (is_promisc_enable(new_mode, bitmask)) {
2638                /* If promisc mode is already enabled elsewhere in the system */
2639                if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2640                        (rx->bna->promisc_rid != rxf->rx->rid))
2641                        goto err_return;
2642
2643                /* If default mode is already enabled in the system */
2644                if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2645                        goto err_return;
2646
2647                /* Trying to enable promiscuous and default mode together */
2648                if (is_default_enable(new_mode, bitmask))
2649                        goto err_return;
2650        }
2651
2652        if (is_default_enable(new_mode, bitmask)) {
2653                /* If default mode is already enabled elsewhere in the system */
2654                if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2655                        (rx->bna->default_mode_rid != rxf->rx->rid)) {
2656                                goto err_return;
2657                }
2658
2659                /* If promiscuous mode is already enabled in the system */
2660                if (rx->bna->promisc_rid != BFI_INVALID_RID)
2661                        goto err_return;
2662        }
2663
2664        /* Process the commands */
2665
2666        if (is_promisc_enable(new_mode, bitmask)) {
2667                if (bna_rxf_promisc_enable(rxf))
2668                        need_hw_config = 1;
2669        } else if (is_promisc_disable(new_mode, bitmask)) {
2670                if (bna_rxf_promisc_disable(rxf))
2671                        need_hw_config = 1;
2672        }
2673
2674        if (is_allmulti_enable(new_mode, bitmask)) {
2675                if (bna_rxf_allmulti_enable(rxf))
2676                        need_hw_config = 1;
2677        } else if (is_allmulti_disable(new_mode, bitmask)) {
2678                if (bna_rxf_allmulti_disable(rxf))
2679                        need_hw_config = 1;
2680        }
2681
2682        /* Trigger h/w if needed */
2683
2684        if (need_hw_config) {
2685                rxf->cam_fltr_cbfn = cbfn;
2686                rxf->cam_fltr_cbarg = rx->bna->bnad;
2687                bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2688        } else if (cbfn)
2689                (*cbfn)(rx->bna->bnad, rx);
2690
2691        return BNA_CB_SUCCESS;
2692
2693err_return:
2694        return BNA_CB_FAIL;
2695}
2696
2697void
2698bna_rx_vlanfilter_enable(struct bna_rx *rx)
2699{
2700        struct bna_rxf *rxf = &rx->rxf;
2701
2702        if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2703                rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2704                rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2705                bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2706        }
2707}
2708
2709void
2710bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2711{
2712        struct bna_rxp *rxp;
2713        struct list_head *qe;
2714
2715        list_for_each(qe, &rx->rxp_q) {
2716                rxp = (struct bna_rxp *)qe;
2717                rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2718                bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2719        }
2720}
2721
2722void
2723bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2724{
2725        int i, j;
2726
2727        for (i = 0; i < BNA_LOAD_T_MAX; i++)
2728                for (j = 0; j < BNA_BIAS_T_MAX; j++)
2729                        bna->rx_mod.dim_vector[i][j] = vector[i][j];
2730}
2731
2732void
2733bna_rx_dim_update(struct bna_ccb *ccb)
2734{
2735        struct bna *bna = ccb->cq->rx->bna;
2736        u32 load, bias;
2737        u32 pkt_rt, small_rt, large_rt;
2738        u8 coalescing_timeo;
2739
2740        if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2741                (ccb->pkt_rate.large_pkt_cnt == 0))
2742                return;
2743
2744        /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2745
2746        small_rt = ccb->pkt_rate.small_pkt_cnt;
2747        large_rt = ccb->pkt_rate.large_pkt_cnt;
2748
2749        pkt_rt = small_rt + large_rt;
2750
2751        if (pkt_rt < BNA_PKT_RATE_10K)
2752                load = BNA_LOAD_T_LOW_4;
2753        else if (pkt_rt < BNA_PKT_RATE_20K)
2754                load = BNA_LOAD_T_LOW_3;
2755        else if (pkt_rt < BNA_PKT_RATE_30K)
2756                load = BNA_LOAD_T_LOW_2;
2757        else if (pkt_rt < BNA_PKT_RATE_40K)
2758                load = BNA_LOAD_T_LOW_1;
2759        else if (pkt_rt < BNA_PKT_RATE_50K)
2760                load = BNA_LOAD_T_HIGH_1;
2761        else if (pkt_rt < BNA_PKT_RATE_60K)
2762                load = BNA_LOAD_T_HIGH_2;
2763        else if (pkt_rt < BNA_PKT_RATE_80K)
2764                load = BNA_LOAD_T_HIGH_3;
2765        else
2766                load = BNA_LOAD_T_HIGH_4;
2767
2768        if (small_rt > (large_rt << 1))
2769                bias = 0;
2770        else
2771                bias = 1;
2772
2773        ccb->pkt_rate.small_pkt_cnt = 0;
2774        ccb->pkt_rate.large_pkt_cnt = 0;
2775
2776        coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2777        ccb->rx_coalescing_timeo = coalescing_timeo;
2778
2779        /* Set it to IB */
2780        bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2781}
2782
2783const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2784        {12, 12},
2785        {6, 10},
2786        {5, 10},
2787        {4, 8},
2788        {3, 6},
2789        {3, 6},
2790        {2, 4},
2791        {1, 2},
2792};
2793
2794/**
2795 * TX
2796 */
2797#define call_tx_stop_cbfn(tx)                                           \
2798do {                                                                    \
2799        if ((tx)->stop_cbfn) {                                          \
2800                void (*cbfn)(void *, struct bna_tx *);          \
2801                void *cbarg;                                            \
2802                cbfn = (tx)->stop_cbfn;                                 \
2803                cbarg = (tx)->stop_cbarg;                               \
2804                (tx)->stop_cbfn = NULL;                                 \
2805                (tx)->stop_cbarg = NULL;                                \
2806                cbfn(cbarg, (tx));                                      \
2807        }                                                               \
2808} while (0)
2809
2810#define call_tx_prio_change_cbfn(tx)                                    \
2811do {                                                                    \
2812        if ((tx)->prio_change_cbfn) {                                   \
2813                void (*cbfn)(struct bnad *, struct bna_tx *);   \
2814                cbfn = (tx)->prio_change_cbfn;                          \
2815                (tx)->prio_change_cbfn = NULL;                          \
2816                cbfn((tx)->bna->bnad, (tx));                            \
2817        }                                                               \
2818} while (0)
2819
2820static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2821static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2822static void bna_tx_enet_stop(struct bna_tx *tx);
2823
2824enum bna_tx_event {
2825        TX_E_START                      = 1,
2826        TX_E_STOP                       = 2,
2827        TX_E_FAIL                       = 3,
2828        TX_E_STARTED                    = 4,
2829        TX_E_STOPPED                    = 5,
2830        TX_E_PRIO_CHANGE                = 6,
2831        TX_E_CLEANUP_DONE               = 7,
2832        TX_E_BW_UPDATE                  = 8,
2833};
2834
2835bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2836bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2837bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2838bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2839bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2840                        enum bna_tx_event);
2841bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2842                        enum bna_tx_event);
2843bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2844                        enum bna_tx_event);
2845bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2846bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2847                        enum bna_tx_event);
2848
2849static void
2850bna_tx_sm_stopped_entry(struct bna_tx *tx)
2851{
2852        call_tx_stop_cbfn(tx);
2853}
2854
2855static void
2856bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2857{
2858        switch (event) {
2859        case TX_E_START:
2860                bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2861                break;
2862
2863        case TX_E_STOP:
2864                call_tx_stop_cbfn(tx);
2865                break;
2866
2867        case TX_E_FAIL:
2868                /* No-op */
2869                break;
2870
2871        case TX_E_PRIO_CHANGE:
2872                call_tx_prio_change_cbfn(tx);
2873                break;
2874
2875        case TX_E_BW_UPDATE:
2876                /* No-op */
2877                break;
2878
2879        default:
2880                bfa_sm_fault(event);
2881        }
2882}
2883
2884static void
2885bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2886{
2887        bna_bfi_tx_enet_start(tx);
2888}
2889
2890static void
2891bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2892{
2893        switch (event) {
2894        case TX_E_STOP:
2895                tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2896                bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2897                break;
2898
2899        case TX_E_FAIL:
2900                tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2901                bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2902                break;
2903
2904        case TX_E_STARTED:
2905                if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
2906                        tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2907                                BNA_TX_F_BW_UPDATED);
2908                        bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2909                } else
2910                        bfa_fsm_set_state(tx, bna_tx_sm_started);
2911                break;
2912
2913        case TX_E_PRIO_CHANGE:
2914                tx->flags |=  BNA_TX_F_PRIO_CHANGED;
2915                break;
2916
2917        case TX_E_BW_UPDATE:
2918                tx->flags |= BNA_TX_F_BW_UPDATED;
2919                break;
2920
2921        default:
2922                bfa_sm_fault(event);
2923        }
2924}
2925
2926static void
2927bna_tx_sm_started_entry(struct bna_tx *tx)
2928{
2929        struct bna_txq *txq;
2930        struct list_head                 *qe;
2931        int is_regular = (tx->type == BNA_TX_T_REGULAR);
2932
2933        list_for_each(qe, &tx->txq_q) {
2934                txq = (struct bna_txq *)qe;
2935                txq->tcb->priority = txq->priority;
2936                /* Start IB */
2937                bna_ib_start(tx->bna, &txq->ib, is_regular);
2938        }
2939        tx->tx_resume_cbfn(tx->bna->bnad, tx);
2940}
2941
2942static void
2943bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2944{
2945        switch (event) {
2946        case TX_E_STOP:
2947                bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2948                tx->tx_stall_cbfn(tx->bna->bnad, tx);
2949                bna_tx_enet_stop(tx);
2950                break;
2951
2952        case TX_E_FAIL:
2953                bfa_fsm_set_state(tx, bna_tx_sm_failed);
2954                tx->tx_stall_cbfn(tx->bna->bnad, tx);
2955                tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2956                break;
2957
2958        case TX_E_PRIO_CHANGE:
2959        case TX_E_BW_UPDATE:
2960                bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2961                break;
2962
2963        default:
2964                bfa_sm_fault(event);
2965        }
2966}
2967
2968static void
2969bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
2970{
2971}
2972
2973static void
2974bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2975{
2976        switch (event) {
2977        case TX_E_FAIL:
2978        case TX_E_STOPPED:
2979                bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
2980                tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2981                break;
2982
2983        case TX_E_STARTED:
2984                /**
2985                 * We are here due to start_wait -> stop_wait transition on
2986                 * TX_E_STOP event
2987                 */
2988                bna_tx_enet_stop(tx);
2989                break;
2990
2991        case TX_E_PRIO_CHANGE:
2992        case TX_E_BW_UPDATE:
2993                /* No-op */
2994                break;
2995
2996        default:
2997                bfa_sm_fault(event);
2998        }
2999}
3000
3001static void
3002bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3003{
3004}
3005
3006static void
3007bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3008{
3009        switch (event) {
3010        case TX_E_FAIL:
3011        case TX_E_PRIO_CHANGE:
3012        case TX_E_BW_UPDATE:
3013                /* No-op */
3014                break;
3015
3016        case TX_E_CLEANUP_DONE:
3017                bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3018                break;
3019
3020        default:
3021                bfa_sm_fault(event);
3022        }
3023}
3024
3025static void
3026bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3027{
3028        tx->tx_stall_cbfn(tx->bna->bnad, tx);
3029        bna_tx_enet_stop(tx);
3030}
3031
3032static void
3033bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3034{
3035        switch (event) {
3036        case TX_E_STOP:
3037                bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3038                break;
3039
3040        case TX_E_FAIL:
3041                bfa_fsm_set_state(tx, bna_tx_sm_failed);
3042                call_tx_prio_change_cbfn(tx);
3043                tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3044                break;
3045
3046        case TX_E_STOPPED:
3047                bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3048                break;
3049
3050        case TX_E_PRIO_CHANGE:
3051        case TX_E_BW_UPDATE:
3052                /* No-op */
3053                break;
3054
3055        default:
3056                bfa_sm_fault(event);
3057        }
3058}
3059
3060static void
3061bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3062{
3063        call_tx_prio_change_cbfn(tx);
3064        tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3065}
3066
3067static void
3068bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3069{
3070        switch (event) {
3071        case TX_E_STOP:
3072                bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3073                break;
3074
3075        case TX_E_FAIL:
3076                bfa_fsm_set_state(tx, bna_tx_sm_failed);
3077                break;
3078
3079        case TX_E_PRIO_CHANGE:
3080        case TX_E_BW_UPDATE:
3081                /* No-op */
3082                break;
3083
3084        case TX_E_CLEANUP_DONE:
3085                bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3086                break;
3087
3088        default:
3089                bfa_sm_fault(event);
3090        }
3091}
3092
3093static void
3094bna_tx_sm_failed_entry(struct bna_tx *tx)
3095{
3096}
3097
3098static void
3099bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3100{
3101        switch (event) {
3102        case TX_E_START:
3103                bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3104                break;
3105
3106        case TX_E_STOP:
3107                bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3108                break;
3109
3110        case TX_E_FAIL:
3111                /* No-op */
3112                break;
3113
3114        case TX_E_CLEANUP_DONE:
3115                bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3116                break;
3117
3118        default:
3119                bfa_sm_fault(event);
3120        }
3121}
3122
3123static void
3124bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3125{
3126}
3127
3128static void
3129bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3130{
3131        switch (event) {
3132        case TX_E_STOP:
3133                bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3134                break;
3135
3136        case TX_E_FAIL:
3137                bfa_fsm_set_state(tx, bna_tx_sm_failed);
3138                break;
3139
3140        case TX_E_CLEANUP_DONE:
3141                bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3142                break;
3143
3144        case TX_E_BW_UPDATE:
3145                /* No-op */
3146                break;
3147
3148        default:
3149                bfa_sm_fault(event);
3150        }
3151}
3152
3153static void
3154bna_bfi_tx_enet_start(struct bna_tx *tx)
3155{
3156        struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3157        struct bna_txq *txq = NULL;
3158        struct list_head *qe;
3159        int i;
3160
3161        bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3162                BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3163        cfg_req->mh.num_entries = htons(
3164                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3165
3166        cfg_req->num_queues = tx->num_txq;
3167        for (i = 0, qe = bfa_q_first(&tx->txq_q);
3168                i < tx->num_txq;
3169                i++, qe = bfa_q_next(qe)) {
3170                txq = (struct bna_txq *)qe;
3171
3172                bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3173                cfg_req->q_cfg[i].q.priority = txq->priority;
3174
3175                cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3176                        txq->ib.ib_seg_host_addr.lsb;
3177                cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3178                        txq->ib.ib_seg_host_addr.msb;
3179                cfg_req->q_cfg[i].ib.intr.msix_index =
3180                        htons((u16)txq->ib.intr_vector);
3181        }
3182
3183        cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3184        cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3185        cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3186        cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3187        cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3188                                ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3189        cfg_req->ib_cfg.coalescing_timeout =
3190                        htonl((u32)txq->ib.coalescing_timeo);
3191        cfg_req->ib_cfg.inter_pkt_timeout =
3192                        htonl((u32)txq->ib.interpkt_timeo);
3193        cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3194
3195        cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3196        cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3197        cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3198        cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3199
3200        bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3201                sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3202        bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3203}
3204
3205static void
3206bna_bfi_tx_enet_stop(struct bna_tx *tx)
3207{
3208        struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3209
3210        bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3211                BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3212        req->mh.num_entries = htons(
3213                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3214        bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3215                &req->mh);
3216        bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3217}
3218
3219static void
3220bna_tx_enet_stop(struct bna_tx *tx)
3221{
3222        struct bna_txq *txq;
3223        struct list_head                 *qe;
3224
3225        /* Stop IB */
3226        list_for_each(qe, &tx->txq_q) {
3227                txq = (struct bna_txq *)qe;
3228                bna_ib_stop(tx->bna, &txq->ib);
3229        }
3230
3231        bna_bfi_tx_enet_stop(tx);
3232}
3233
3234static void
3235bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3236                struct bna_mem_descr *qpt_mem,
3237                struct bna_mem_descr *swqpt_mem,
3238                struct bna_mem_descr *page_mem)
3239{
3240        int i;
3241
3242        txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3243        txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3244        txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3245        txq->qpt.page_count = page_count;
3246        txq->qpt.page_size = page_size;
3247
3248        txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3249
3250        for (i = 0; i < page_count; i++) {
3251                txq->tcb->sw_qpt[i] = page_mem[i].kva;
3252
3253                ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3254                        page_mem[i].dma.lsb;
3255                ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3256                        page_mem[i].dma.msb;
3257        }
3258}
3259
3260static struct bna_tx *
3261bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3262{
3263        struct list_head        *qe = NULL;
3264        struct bna_tx *tx = NULL;
3265
3266        if (list_empty(&tx_mod->tx_free_q))
3267                return NULL;
3268        if (type == BNA_TX_T_REGULAR) {
3269                bfa_q_deq(&tx_mod->tx_free_q, &qe);
3270        } else {
3271                bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3272        }
3273        tx = (struct bna_tx *)qe;
3274        bfa_q_qe_init(&tx->qe);
3275        tx->type = type;
3276
3277        return tx;
3278}
3279
3280static void
3281bna_tx_free(struct bna_tx *tx)
3282{
3283        struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3284        struct bna_txq *txq;
3285        struct list_head *prev_qe;
3286        struct list_head *qe;
3287
3288        while (!list_empty(&tx->txq_q)) {
3289                bfa_q_deq(&tx->txq_q, &txq);
3290                bfa_q_qe_init(&txq->qe);
3291                txq->tcb = NULL;
3292                txq->tx = NULL;
3293                list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3294        }
3295
3296        list_for_each(qe, &tx_mod->tx_active_q) {
3297                if (qe == &tx->qe) {
3298                        list_del(&tx->qe);
3299                        bfa_q_qe_init(&tx->qe);
3300                        break;
3301                }
3302        }
3303
3304        tx->bna = NULL;
3305        tx->priv = NULL;
3306
3307        prev_qe = NULL;
3308        list_for_each(qe, &tx_mod->tx_free_q) {
3309                if (((struct bna_tx *)qe)->rid < tx->rid)
3310                        prev_qe = qe;
3311                else {
3312                        break;
3313                }
3314        }
3315
3316        if (prev_qe == NULL) {
3317                /* This is the first entry */
3318                bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3319        } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3320                /* This is the last entry */
3321                list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3322        } else {
3323                /* Somewhere in the middle */
3324                bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3325                bfa_q_prev(&tx->qe) = prev_qe;
3326                bfa_q_next(prev_qe) = &tx->qe;
3327                bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3328        }
3329}
3330
3331static void
3332bna_tx_start(struct bna_tx *tx)
3333{
3334        tx->flags |= BNA_TX_F_ENET_STARTED;
3335        if (tx->flags & BNA_TX_F_ENABLED)
3336                bfa_fsm_send_event(tx, TX_E_START);
3337}
3338
3339static void
3340bna_tx_stop(struct bna_tx *tx)
3341{
3342        tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3343        tx->stop_cbarg = &tx->bna->tx_mod;
3344
3345        tx->flags &= ~BNA_TX_F_ENET_STARTED;
3346        bfa_fsm_send_event(tx, TX_E_STOP);
3347}
3348
3349static void
3350bna_tx_fail(struct bna_tx *tx)
3351{
3352        tx->flags &= ~BNA_TX_F_ENET_STARTED;
3353        bfa_fsm_send_event(tx, TX_E_FAIL);
3354}
3355
3356void
3357bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3358{
3359        struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3360        struct bna_txq *txq = NULL;
3361        struct list_head *qe;
3362        int i;
3363
3364        bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3365                sizeof(struct bfi_enet_tx_cfg_rsp));
3366
3367        tx->hw_id = cfg_rsp->hw_id;
3368
3369        for (i = 0, qe = bfa_q_first(&tx->txq_q);
3370                i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3371                txq = (struct bna_txq *)qe;
3372
3373                /* Setup doorbells */
3374                txq->tcb->i_dbell->doorbell_addr =
3375                        tx->bna->pcidev.pci_bar_kva
3376                        + ntohl(cfg_rsp->q_handles[i].i_dbell);
3377                txq->tcb->q_dbell =
3378                        tx->bna->pcidev.pci_bar_kva
3379                        + ntohl(cfg_rsp->q_handles[i].q_dbell);
3380                txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3381
3382                /* Initialize producer/consumer indexes */
3383                (*txq->tcb->hw_consumer_index) = 0;
3384                txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3385        }
3386
3387        bfa_fsm_send_event(tx, TX_E_STARTED);
3388}
3389
3390void
3391bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3392{
3393        bfa_fsm_send_event(tx, TX_E_STOPPED);
3394}
3395
3396void
3397bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3398{
3399        struct bna_tx *tx;
3400        struct list_head                *qe;
3401
3402        list_for_each(qe, &tx_mod->tx_active_q) {
3403                tx = (struct bna_tx *)qe;
3404                bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3405        }
3406}
3407
3408void
3409bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3410{
3411        u32 q_size;
3412        u32 page_count;
3413        struct bna_mem_info *mem_info;
3414
3415        res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3416        mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3417        mem_info->mem_type = BNA_MEM_T_KVA;
3418        mem_info->len = sizeof(struct bna_tcb);
3419        mem_info->num = num_txq;
3420
3421        q_size = txq_depth * BFI_TXQ_WI_SIZE;
3422        q_size = ALIGN(q_size, PAGE_SIZE);
3423        page_count = q_size >> PAGE_SHIFT;
3424
3425        res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3426        mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3427        mem_info->mem_type = BNA_MEM_T_DMA;
3428        mem_info->len = page_count * sizeof(struct bna_dma_addr);
3429        mem_info->num = num_txq;
3430
3431        res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3432        mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3433        mem_info->mem_type = BNA_MEM_T_KVA;
3434        mem_info->len = page_count * sizeof(void *);
3435        mem_info->num = num_txq;
3436
3437        res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3438        mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3439        mem_info->mem_type = BNA_MEM_T_DMA;
3440        mem_info->len = PAGE_SIZE;
3441        mem_info->num = num_txq * page_count;
3442
3443        res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3444        mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3445        mem_info->mem_type = BNA_MEM_T_DMA;
3446        mem_info->len = BFI_IBIDX_SIZE;
3447        mem_info->num = num_txq;
3448
3449        res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3450        res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3451                        BNA_INTR_T_MSIX;
3452        res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3453}
3454
3455struct bna_tx *
3456bna_tx_create(struct bna *bna, struct bnad *bnad,
3457                struct bna_tx_config *tx_cfg,
3458                const struct bna_tx_event_cbfn *tx_cbfn,
3459                struct bna_res_info *res_info, void *priv)
3460{
3461        struct bna_intr_info *intr_info;
3462        struct bna_tx_mod *tx_mod = &bna->tx_mod;
3463        struct bna_tx *tx;
3464        struct bna_txq *txq;
3465        struct list_head *qe;
3466        int page_count;
3467        int page_size;
3468        int page_idx;
3469        int i;
3470
3471        intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3472        page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3473                        tx_cfg->num_txq;
3474        page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3475
3476        /**
3477         * Get resources
3478         */
3479
3480        if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3481                return NULL;
3482
3483        /* Tx */
3484
3485        tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3486        if (!tx)
3487                return NULL;
3488        tx->bna = bna;
3489        tx->priv = priv;
3490
3491        /* TxQs */
3492
3493        INIT_LIST_HEAD(&tx->txq_q);
3494        for (i = 0; i < tx_cfg->num_txq; i++) {
3495                if (list_empty(&tx_mod->txq_free_q))
3496                        goto err_return;
3497
3498                bfa_q_deq(&tx_mod->txq_free_q, &txq);
3499                bfa_q_qe_init(&txq->qe);
3500                list_add_tail(&txq->qe, &tx->txq_q);
3501                txq->tx = tx;
3502        }
3503
3504        /*
3505         * Initialize
3506         */
3507
3508        /* Tx */
3509
3510        tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3511        tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3512        /* Following callbacks are mandatory */
3513        tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3514        tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3515        tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3516
3517        list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3518
3519        tx->num_txq = tx_cfg->num_txq;
3520
3521        tx->flags = 0;
3522        if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3523                switch (tx->type) {
3524                case BNA_TX_T_REGULAR:
3525                        if (!(tx->bna->tx_mod.flags &
3526                                BNA_TX_MOD_F_ENET_LOOPBACK))
3527                                tx->flags |= BNA_TX_F_ENET_STARTED;
3528                        break;
3529                case BNA_TX_T_LOOPBACK:
3530                        if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3531                                tx->flags |= BNA_TX_F_ENET_STARTED;
3532                        break;
3533                }
3534        }
3535
3536        /* TxQ */
3537
3538        i = 0;
3539        page_idx = 0;
3540        list_for_each(qe, &tx->txq_q) {
3541                txq = (struct bna_txq *)qe;
3542                txq->tcb = (struct bna_tcb *)
3543                res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3544                txq->tx_packets = 0;
3545                txq->tx_bytes = 0;
3546
3547                /* IB */
3548                txq->ib.ib_seg_host_addr.lsb =
3549                res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3550                txq->ib.ib_seg_host_addr.msb =
3551                res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3552                txq->ib.ib_seg_host_addr_kva =
3553                res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3554                txq->ib.intr_type = intr_info->intr_type;
3555                txq->ib.intr_vector = (intr_info->num == 1) ?
3556                                        intr_info->idl[0].vector :
3557                                        intr_info->idl[i].vector;
3558                if (intr_info->intr_type == BNA_INTR_T_INTX)
3559                        txq->ib.intr_vector = (1 <<  txq->ib.intr_vector);
3560                txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3561                txq->ib.interpkt_timeo = 0; /* Not used */
3562                txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3563
3564                /* TCB */
3565
3566                txq->tcb->q_depth = tx_cfg->txq_depth;
3567                txq->tcb->unmap_q = (void *)
3568                res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3569                txq->tcb->hw_consumer_index =
3570                        (u32 *)txq->ib.ib_seg_host_addr_kva;
3571                txq->tcb->i_dbell = &txq->ib.door_bell;
3572                txq->tcb->intr_type = txq->ib.intr_type;
3573                txq->tcb->intr_vector = txq->ib.intr_vector;
3574                txq->tcb->txq = txq;
3575                txq->tcb->bnad = bnad;
3576                txq->tcb->id = i;
3577
3578                /* QPT, SWQPT, Pages */
3579                bna_txq_qpt_setup(txq, page_count, page_size,
3580                        &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3581                        &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3582                        &res_info[BNA_TX_RES_MEM_T_PAGE].
3583                                  res_u.mem_info.mdl[page_idx]);
3584                txq->tcb->page_idx = page_idx;
3585                txq->tcb->page_count = page_count;
3586                page_idx += page_count;
3587
3588                /* Callback to bnad for setting up TCB */
3589                if (tx->tcb_setup_cbfn)
3590                        (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3591
3592                if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3593                        txq->priority = txq->tcb->id;
3594                else
3595                        txq->priority = tx_mod->default_prio;
3596
3597                i++;
3598        }
3599
3600        tx->txf_vlan_id = 0;
3601
3602        bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3603
3604        tx_mod->rid_mask |= (1 << tx->rid);
3605
3606        return tx;
3607
3608err_return:
3609        bna_tx_free(tx);
3610        return NULL;
3611}
3612
3613void
3614bna_tx_destroy(struct bna_tx *tx)
3615{
3616        struct bna_txq *txq;
3617        struct list_head *qe;
3618
3619        list_for_each(qe, &tx->txq_q) {
3620                txq = (struct bna_txq *)qe;
3621                if (tx->tcb_destroy_cbfn)
3622                        (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3623        }
3624
3625        tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3626        bna_tx_free(tx);
3627}
3628
3629void
3630bna_tx_enable(struct bna_tx *tx)
3631{
3632        if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3633                return;
3634
3635        tx->flags |= BNA_TX_F_ENABLED;
3636
3637        if (tx->flags & BNA_TX_F_ENET_STARTED)
3638                bfa_fsm_send_event(tx, TX_E_START);
3639}
3640
3641void
3642bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3643                void (*cbfn)(void *, struct bna_tx *))
3644{
3645        if (type == BNA_SOFT_CLEANUP) {
3646                (*cbfn)(tx->bna->bnad, tx);
3647                return;
3648        }
3649
3650        tx->stop_cbfn = cbfn;
3651        tx->stop_cbarg = tx->bna->bnad;
3652
3653        tx->flags &= ~BNA_TX_F_ENABLED;
3654
3655        bfa_fsm_send_event(tx, TX_E_STOP);
3656}
3657
3658void
3659bna_tx_cleanup_complete(struct bna_tx *tx)
3660{
3661        bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3662}
3663
3664static void
3665bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3666{
3667        struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3668
3669        bfa_wc_down(&tx_mod->tx_stop_wc);
3670}
3671
3672static void
3673bna_tx_mod_cb_tx_stopped_all(void *arg)
3674{
3675        struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3676
3677        if (tx_mod->stop_cbfn)
3678                tx_mod->stop_cbfn(&tx_mod->bna->enet);
3679        tx_mod->stop_cbfn = NULL;
3680}
3681
3682void
3683bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3684                struct bna_res_info *res_info)
3685{
3686        int i;
3687
3688        tx_mod->bna = bna;
3689        tx_mod->flags = 0;
3690
3691        tx_mod->tx = (struct bna_tx *)
3692                res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3693        tx_mod->txq = (struct bna_txq *)
3694                res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3695
3696        INIT_LIST_HEAD(&tx_mod->tx_free_q);
3697        INIT_LIST_HEAD(&tx_mod->tx_active_q);
3698
3699        INIT_LIST_HEAD(&tx_mod->txq_free_q);
3700
3701        for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3702                tx_mod->tx[i].rid = i;
3703                bfa_q_qe_init(&tx_mod->tx[i].qe);
3704                list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3705                bfa_q_qe_init(&tx_mod->txq[i].qe);
3706                list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3707        }
3708
3709        tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3710        tx_mod->default_prio = 0;
3711        tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3712        tx_mod->iscsi_prio = -1;
3713}
3714
3715void
3716bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3717{
3718        struct list_head                *qe;
3719        int i;
3720
3721        i = 0;
3722        list_for_each(qe, &tx_mod->tx_free_q)
3723                i++;
3724
3725        i = 0;
3726        list_for_each(qe, &tx_mod->txq_free_q)
3727                i++;
3728
3729        tx_mod->bna = NULL;
3730}
3731
3732void
3733bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3734{
3735        struct bna_tx *tx;
3736        struct list_head                *qe;
3737
3738        tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3739        if (type == BNA_TX_T_LOOPBACK)
3740                tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3741
3742        list_for_each(qe, &tx_mod->tx_active_q) {
3743                tx = (struct bna_tx *)qe;
3744                if (tx->type == type)
3745                        bna_tx_start(tx);
3746        }
3747}
3748
3749void
3750bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3751{
3752        struct bna_tx *tx;
3753        struct list_head                *qe;
3754
3755        tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3756        tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3757
3758        tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3759
3760        bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3761
3762        list_for_each(qe, &tx_mod->tx_active_q) {
3763                tx = (struct bna_tx *)qe;
3764                if (tx->type == type) {
3765                        bfa_wc_up(&tx_mod->tx_stop_wc);
3766                        bna_tx_stop(tx);
3767                }
3768        }
3769
3770        bfa_wc_wait(&tx_mod->tx_stop_wc);
3771}
3772
3773void
3774bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3775{
3776        struct bna_tx *tx;
3777        struct list_head                *qe;
3778
3779        tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3780        tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3781
3782        list_for_each(qe, &tx_mod->tx_active_q) {
3783                tx = (struct bna_tx *)qe;
3784                bna_tx_fail(tx);
3785        }
3786}
3787
3788void
3789bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3790{
3791        struct bna_txq *txq;
3792        struct list_head *qe;
3793
3794        list_for_each(qe, &tx->txq_q) {
3795                txq = (struct bna_txq *)qe;
3796                bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3797        }
3798}
3799