dpdk/drivers/net/bnx2x/bnx2x_ethdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
   3 * Copyright (c) 2015-2018 Cavium Inc.
   4 * All rights reserved.
   5 * www.cavium.com
   6 */
   7
   8#include "bnx2x.h"
   9#include "bnx2x_rxtx.h"
  10
  11#include <rte_string_fns.h>
  12#include <rte_dev.h>
  13#include <ethdev_pci.h>
  14#include <rte_alarm.h>
  15
  16/*
  17 * The set of PCI devices this driver supports
  18 */
  19#define BROADCOM_PCI_VENDOR_ID 0x14E4
  20#define QLOGIC_PCI_VENDOR_ID 0x1077
  21static const struct rte_pci_id pci_id_bnx2x_map[] = {
  22        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
  23        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
  24        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
  25        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
  26        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
  27        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
  28        { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
  29        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
  30#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
  31        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
  32        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
  33        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
  34        { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
  35#endif
  36        { .vendor_id = 0, }
  37};
  38
  39static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
  40        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
  41        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
  42        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
  43        { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
  44        { RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
  45        { .vendor_id = 0, }
  46};
  47
  48struct rte_bnx2x_xstats_name_off {
  49        char name[RTE_ETH_XSTATS_NAME_SIZE];
  50        uint32_t offset_hi;
  51        uint32_t offset_lo;
  52};
  53
  54static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
  55        {"rx_buffer_drops",
  56                offsetof(struct bnx2x_eth_stats, brb_drop_hi),
  57                offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
  58        {"rx_buffer_truncates",
  59                offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
  60                offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
  61        {"rx_buffer_truncate_discard",
  62                offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
  63                offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
  64        {"mac_filter_discard",
  65                offsetof(struct bnx2x_eth_stats, mac_filter_discard),
  66                offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
  67        {"no_match_vlan_tag_discard",
  68                offsetof(struct bnx2x_eth_stats, mf_tag_discard),
  69                offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
  70        {"tx_pause",
  71                offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
  72                offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
  73        {"rx_pause",
  74                offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
  75                offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
  76        {"tx_priority_flow_control",
  77                offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
  78                offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
  79        {"rx_priority_flow_control",
  80                offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
  81                offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
  82};
  83
  84static int
  85bnx2x_link_update(struct rte_eth_dev *dev)
  86{
  87        struct bnx2x_softc *sc = dev->data->dev_private;
  88        struct rte_eth_link link;
  89
  90        PMD_INIT_FUNC_TRACE(sc);
  91
  92        memset(&link, 0, sizeof(link));
  93        mb();
  94        link.link_speed = sc->link_vars.line_speed;
  95        switch (sc->link_vars.duplex) {
  96                case DUPLEX_FULL:
  97                        link.link_duplex = ETH_LINK_FULL_DUPLEX;
  98                        break;
  99                case DUPLEX_HALF:
 100                        link.link_duplex = ETH_LINK_HALF_DUPLEX;
 101                        break;
 102        }
 103        link.link_autoneg = !(dev->data->dev_conf.link_speeds &
 104                        ETH_LINK_SPEED_FIXED);
 105        link.link_status = sc->link_vars.link_up;
 106
 107        return rte_eth_linkstatus_set(dev, &link);
 108}
 109
 110static void
 111bnx2x_interrupt_action(struct rte_eth_dev *dev, int intr_cxt)
 112{
 113        struct bnx2x_softc *sc = dev->data->dev_private;
 114        uint32_t link_status;
 115
 116        bnx2x_intr_legacy(sc);
 117
 118        if ((atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO) &&
 119            !intr_cxt)
 120                bnx2x_periodic_callout(sc);
 121        link_status = REG_RD(sc, sc->link_params.shmem_base +
 122                        offsetof(struct shmem_region,
 123                                port_mb[sc->link_params.port].link_status));
 124        if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
 125                bnx2x_link_update(dev);
 126}
 127
 128static void
 129bnx2x_interrupt_handler(void *param)
 130{
 131        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 132        struct bnx2x_softc *sc = dev->data->dev_private;
 133
 134        PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
 135
 136        bnx2x_interrupt_action(dev, 1);
 137        rte_intr_ack(&sc->pci_dev->intr_handle);
 138}
 139
 140static void bnx2x_periodic_start(void *param)
 141{
 142        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 143        struct bnx2x_softc *sc = dev->data->dev_private;
 144        int ret = 0;
 145
 146        atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
 147        bnx2x_interrupt_action(dev, 0);
 148        if (IS_PF(sc)) {
 149                ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
 150                                        bnx2x_periodic_start, (void *)dev);
 151                if (ret) {
 152                        PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
 153                                             " timer rc %d", ret);
 154                }
 155        }
 156}
 157
 158void bnx2x_periodic_stop(void *param)
 159{
 160        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 161        struct bnx2x_softc *sc = dev->data->dev_private;
 162
 163        atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
 164
 165        rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
 166
 167        PMD_DRV_LOG(DEBUG, sc, "Periodic poll stopped");
 168}
 169
 170/*
 171 * Devops - helper functions can be called from user application
 172 */
 173
 174static int
 175bnx2x_dev_configure(struct rte_eth_dev *dev)
 176{
 177        struct bnx2x_softc *sc = dev->data->dev_private;
 178        struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 179
 180        int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
 181
 182        PMD_INIT_FUNC_TRACE(sc);
 183
 184        if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
 185                sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 186                dev->data->mtu = sc->mtu;
 187        }
 188
 189        if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
 190                PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
 191                return -EINVAL;
 192        }
 193
 194        sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
 195        if (sc->num_queues > mp_ncpus) {
 196                PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs");
 197                return -EINVAL;
 198        }
 199
 200        PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d",
 201                       sc->num_queues, sc->mtu);
 202
 203        /* allocate ilt */
 204        if (bnx2x_alloc_ilt_mem(sc) != 0) {
 205                PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed");
 206                return -ENXIO;
 207        }
 208
 209        bnx2x_dev_rxtx_init_dummy(dev);
 210        return 0;
 211}
 212
 213static int
 214bnx2x_dev_start(struct rte_eth_dev *dev)
 215{
 216        struct bnx2x_softc *sc = dev->data->dev_private;
 217        int ret = 0;
 218
 219        PMD_INIT_FUNC_TRACE(sc);
 220
 221        /* start the periodic callout */
 222        if (IS_PF(sc)) {
 223                if (atomic_load_acq_long(&sc->periodic_flags) ==
 224                    PERIODIC_STOP) {
 225                        bnx2x_periodic_start(dev);
 226                        PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
 227                }
 228        }
 229
 230        ret = bnx2x_init(sc);
 231        if (ret) {
 232                PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret);
 233                return -1;
 234        }
 235
 236        if (IS_PF(sc)) {
 237                rte_intr_callback_register(&sc->pci_dev->intr_handle,
 238                                bnx2x_interrupt_handler, (void *)dev);
 239
 240                if (rte_intr_enable(&sc->pci_dev->intr_handle))
 241                        PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
 242        }
 243
 244        /* Configure the previously stored Multicast address list */
 245        if (IS_VF(sc))
 246                bnx2x_vfpf_set_mcast(sc, sc->mc_addrs, sc->mc_addrs_num);
 247        bnx2x_dev_rxtx_init(dev);
 248
 249        bnx2x_print_device_info(sc);
 250
 251        return ret;
 252}
 253
 254static int
 255bnx2x_dev_stop(struct rte_eth_dev *dev)
 256{
 257        struct bnx2x_softc *sc = dev->data->dev_private;
 258        int ret = 0;
 259
 260        PMD_INIT_FUNC_TRACE(sc);
 261
 262        bnx2x_dev_rxtx_init_dummy(dev);
 263
 264        if (IS_PF(sc)) {
 265                rte_intr_disable(&sc->pci_dev->intr_handle);
 266                rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
 267                                bnx2x_interrupt_handler, (void *)dev);
 268
 269                /* stop the periodic callout */
 270                bnx2x_periodic_stop(dev);
 271        }
 272        /* Remove the configured Multicast list
 273         * Sending NULL for the list of address and the
 274         * Number is set to 0 denoting DEL_CMD
 275         */
 276        if (IS_VF(sc))
 277                bnx2x_vfpf_set_mcast(sc, NULL, 0);
 278        ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
 279        if (ret) {
 280                PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret);
 281                return ret;
 282        }
 283
 284        return 0;
 285}
 286
 287static int
 288bnx2x_dev_close(struct rte_eth_dev *dev)
 289{
 290        struct bnx2x_softc *sc = dev->data->dev_private;
 291
 292        PMD_INIT_FUNC_TRACE(sc);
 293
 294        /* only close in case of the primary process */
 295        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 296                return 0;
 297
 298        if (IS_VF(sc))
 299                bnx2x_vf_close(sc);
 300
 301        bnx2x_dev_clear_queues(dev);
 302        memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
 303
 304        /* free ilt */
 305        bnx2x_free_ilt_mem(sc);
 306
 307        /* mac_addrs must not be freed alone because part of dev_private */
 308        dev->data->mac_addrs = NULL;
 309
 310        return 0;
 311}
 312
 313static int
 314bnx2x_promisc_enable(struct rte_eth_dev *dev)
 315{
 316        struct bnx2x_softc *sc = dev->data->dev_private;
 317
 318        PMD_INIT_FUNC_TRACE(sc);
 319        sc->rx_mode = BNX2X_RX_MODE_PROMISC;
 320        if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
 321                sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
 322        bnx2x_set_rx_mode(sc);
 323
 324        return 0;
 325}
 326
 327static int
 328bnx2x_promisc_disable(struct rte_eth_dev *dev)
 329{
 330        struct bnx2x_softc *sc = dev->data->dev_private;
 331
 332        PMD_INIT_FUNC_TRACE(sc);
 333        sc->rx_mode = BNX2X_RX_MODE_NORMAL;
 334        if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
 335                sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
 336        bnx2x_set_rx_mode(sc);
 337
 338        return 0;
 339}
 340
 341static int
 342bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
 343{
 344        struct bnx2x_softc *sc = dev->data->dev_private;
 345
 346        PMD_INIT_FUNC_TRACE(sc);
 347        sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
 348        if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
 349                sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
 350        bnx2x_set_rx_mode(sc);
 351
 352        return 0;
 353}
 354
 355static int
 356bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
 357{
 358        struct bnx2x_softc *sc = dev->data->dev_private;
 359
 360        PMD_INIT_FUNC_TRACE(sc);
 361        sc->rx_mode = BNX2X_RX_MODE_NORMAL;
 362        if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
 363                sc->rx_mode = BNX2X_RX_MODE_PROMISC;
 364        bnx2x_set_rx_mode(sc);
 365
 366        return 0;
 367}
 368
 369static int
 370bnx2x_dev_set_mc_addr_list(struct rte_eth_dev *dev,
 371                struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num)
 372{
 373        struct bnx2x_softc *sc = dev->data->dev_private;
 374        int err;
 375        PMD_INIT_FUNC_TRACE(sc);
 376        /* flush previous addresses */
 377        err = bnx2x_vfpf_set_mcast(sc, NULL, 0);
 378        if (err)
 379                return err;
 380        sc->mc_addrs_num = 0;
 381
 382        /* Add new ones */
 383        err = bnx2x_vfpf_set_mcast(sc, mc_addrs, mc_addrs_num);
 384        if (err)
 385                return err;
 386
 387        sc->mc_addrs_num = mc_addrs_num;
 388        memcpy(sc->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
 389
 390        return 0;
 391}
 392
 393static int
 394bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 395{
 396        struct bnx2x_softc *sc = dev->data->dev_private;
 397
 398        PMD_INIT_FUNC_TRACE(sc);
 399
 400        return bnx2x_link_update(dev);
 401}
 402
 403static int
 404bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 405{
 406        struct bnx2x_softc *sc = dev->data->dev_private;
 407        int ret = 0;
 408
 409        ret = bnx2x_link_update(dev);
 410
 411        bnx2x_check_bull(sc);
 412        if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
 413                PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
 414                                "VF device is no longer operational");
 415                dev->data->dev_link.link_status = ETH_LINK_DOWN;
 416        }
 417
 418        return ret;
 419}
 420
 421static int
 422bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 423{
 424        struct bnx2x_softc *sc = dev->data->dev_private;
 425        uint32_t brb_truncate_discard;
 426        uint64_t brb_drops;
 427        uint64_t brb_truncates;
 428
 429        PMD_INIT_FUNC_TRACE(sc);
 430
 431        bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
 432
 433        memset(stats, 0, sizeof (struct rte_eth_stats));
 434
 435        stats->ipackets =
 436                HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
 437                                sc->eth_stats.total_unicast_packets_received_lo) +
 438                HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
 439                                sc->eth_stats.total_multicast_packets_received_lo) +
 440                HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
 441                                sc->eth_stats.total_broadcast_packets_received_lo);
 442
 443        stats->opackets =
 444                HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
 445                                sc->eth_stats.total_unicast_packets_transmitted_lo) +
 446                HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
 447                                sc->eth_stats.total_multicast_packets_transmitted_lo) +
 448                HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
 449                                sc->eth_stats.total_broadcast_packets_transmitted_lo);
 450
 451        stats->ibytes =
 452                HILO_U64(sc->eth_stats.total_bytes_received_hi,
 453                                sc->eth_stats.total_bytes_received_lo);
 454
 455        stats->obytes =
 456                HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
 457                                sc->eth_stats.total_bytes_transmitted_lo);
 458
 459        stats->ierrors =
 460                HILO_U64(sc->eth_stats.error_bytes_received_hi,
 461                                sc->eth_stats.error_bytes_received_lo);
 462
 463        stats->oerrors = 0;
 464
 465        stats->rx_nombuf =
 466                HILO_U64(sc->eth_stats.no_buff_discard_hi,
 467                                sc->eth_stats.no_buff_discard_lo);
 468
 469        brb_drops =
 470                HILO_U64(sc->eth_stats.brb_drop_hi,
 471                         sc->eth_stats.brb_drop_lo);
 472
 473        brb_truncates =
 474                HILO_U64(sc->eth_stats.brb_truncate_hi,
 475                         sc->eth_stats.brb_truncate_lo);
 476
 477        brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
 478
 479        stats->imissed = brb_drops + brb_truncates +
 480                         brb_truncate_discard + stats->rx_nombuf;
 481
 482        return 0;
 483}
 484
 485static int
 486bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
 487                       struct rte_eth_xstat_name *xstats_names,
 488                       __rte_unused unsigned limit)
 489{
 490        unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
 491
 492        if (xstats_names != NULL)
 493                for (i = 0; i < stat_cnt; i++)
 494                        strlcpy(xstats_names[i].name,
 495                                bnx2x_xstats_strings[i].name,
 496                                sizeof(xstats_names[i].name));
 497
 498        return stat_cnt;
 499}
 500
 501static int
 502bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 503                     unsigned int n)
 504{
 505        struct bnx2x_softc *sc = dev->data->dev_private;
 506        unsigned int num = RTE_DIM(bnx2x_xstats_strings);
 507
 508        if (n < num)
 509                return num;
 510
 511        bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
 512
 513        for (num = 0; num < n; num++) {
 514                if (bnx2x_xstats_strings[num].offset_hi !=
 515                    bnx2x_xstats_strings[num].offset_lo)
 516                        xstats[num].value = HILO_U64(
 517                                          *(uint32_t *)((char *)&sc->eth_stats +
 518                                          bnx2x_xstats_strings[num].offset_hi),
 519                                          *(uint32_t *)((char *)&sc->eth_stats +
 520                                          bnx2x_xstats_strings[num].offset_lo));
 521                else
 522                        xstats[num].value =
 523                                          *(uint64_t *)((char *)&sc->eth_stats +
 524                                          bnx2x_xstats_strings[num].offset_lo);
 525                xstats[num].id = num;
 526        }
 527
 528        return num;
 529}
 530
 531static int
 532bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 533{
 534        struct bnx2x_softc *sc = dev->data->dev_private;
 535
 536        dev_info->max_rx_queues  = sc->max_rx_queues;
 537        dev_info->max_tx_queues  = sc->max_tx_queues;
 538        dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
 539        dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
 540        dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
 541        dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
 542        dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
 543
 544        dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
 545        dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
 546        dev_info->rx_desc_lim.nb_mtu_seg_max = 1;
 547        dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
 548
 549        return 0;
 550}
 551
 552static int
 553bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
 554                uint32_t index, uint32_t pool)
 555{
 556        struct bnx2x_softc *sc = dev->data->dev_private;
 557
 558        if (sc->mac_ops.mac_addr_add) {
 559                sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
 560                return 0;
 561        }
 562        return -ENOTSUP;
 563}
 564
 565static void
 566bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
 567{
 568        struct bnx2x_softc *sc = dev->data->dev_private;
 569
 570        if (sc->mac_ops.mac_addr_remove)
 571                sc->mac_ops.mac_addr_remove(dev, index);
 572}
 573
 574static const struct eth_dev_ops bnx2x_eth_dev_ops = {
 575        .dev_configure                = bnx2x_dev_configure,
 576        .dev_start                    = bnx2x_dev_start,
 577        .dev_stop                     = bnx2x_dev_stop,
 578        .dev_close                    = bnx2x_dev_close,
 579        .promiscuous_enable           = bnx2x_promisc_enable,
 580        .promiscuous_disable          = bnx2x_promisc_disable,
 581        .allmulticast_enable          = bnx2x_dev_allmulticast_enable,
 582        .allmulticast_disable         = bnx2x_dev_allmulticast_disable,
 583        .link_update                  = bnx2x_dev_link_update,
 584        .stats_get                    = bnx2x_dev_stats_get,
 585        .xstats_get                   = bnx2x_dev_xstats_get,
 586        .xstats_get_names             = bnx2x_get_xstats_names,
 587        .dev_infos_get                = bnx2x_dev_infos_get,
 588        .rx_queue_setup               = bnx2x_dev_rx_queue_setup,
 589        .rx_queue_release             = bnx2x_dev_rx_queue_release,
 590        .tx_queue_setup               = bnx2x_dev_tx_queue_setup,
 591        .tx_queue_release             = bnx2x_dev_tx_queue_release,
 592        .mac_addr_add                 = bnx2x_mac_addr_add,
 593        .mac_addr_remove              = bnx2x_mac_addr_remove,
 594};
 595
 596/*
 597 * dev_ops for virtual function
 598 */
 599static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
 600        .dev_configure                = bnx2x_dev_configure,
 601        .dev_start                    = bnx2x_dev_start,
 602        .dev_stop                     = bnx2x_dev_stop,
 603        .dev_close                    = bnx2x_dev_close,
 604        .promiscuous_enable           = bnx2x_promisc_enable,
 605        .promiscuous_disable          = bnx2x_promisc_disable,
 606        .allmulticast_enable          = bnx2x_dev_allmulticast_enable,
 607        .allmulticast_disable         = bnx2x_dev_allmulticast_disable,
 608        .set_mc_addr_list             = bnx2x_dev_set_mc_addr_list,
 609        .link_update                  = bnx2xvf_dev_link_update,
 610        .stats_get                    = bnx2x_dev_stats_get,
 611        .xstats_get                   = bnx2x_dev_xstats_get,
 612        .xstats_get_names             = bnx2x_get_xstats_names,
 613        .dev_infos_get                = bnx2x_dev_infos_get,
 614        .rx_queue_setup               = bnx2x_dev_rx_queue_setup,
 615        .rx_queue_release             = bnx2x_dev_rx_queue_release,
 616        .tx_queue_setup               = bnx2x_dev_tx_queue_setup,
 617        .tx_queue_release             = bnx2x_dev_tx_queue_release,
 618        .mac_addr_add                 = bnx2x_mac_addr_add,
 619        .mac_addr_remove              = bnx2x_mac_addr_remove,
 620};
 621
 622
 623static int
 624bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
 625{
 626        int ret = 0;
 627        struct rte_pci_device *pci_dev;
 628        struct rte_pci_addr pci_addr;
 629        struct bnx2x_softc *sc;
 630        static bool adapter_info = true;
 631
 632        /* Extract key data structures */
 633        sc = eth_dev->data->dev_private;
 634        pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
 635        pci_addr = pci_dev->addr;
 636
 637        snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
 638                 pci_addr.bus, pci_addr.devid, pci_addr.function,
 639                 eth_dev->data->port_id);
 640
 641        PMD_INIT_FUNC_TRACE(sc);
 642
 643        eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
 644
 645        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 646                PMD_DRV_LOG(ERR, sc, "Skipping device init from secondary process");
 647                return 0;
 648        }
 649
 650        rte_eth_copy_pci_info(eth_dev, pci_dev);
 651        eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 652
 653        sc->pcie_bus    = pci_dev->addr.bus;
 654        sc->pcie_device = pci_dev->addr.devid;
 655
 656        sc->devinfo.vendor_id    = pci_dev->id.vendor_id;
 657        sc->devinfo.device_id    = pci_dev->id.device_id;
 658        sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
 659        sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
 660
 661        if (is_vf)
 662                sc->flags = BNX2X_IS_VF_FLAG;
 663
 664        sc->pcie_func = pci_dev->addr.function;
 665        sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
 666        if (is_vf)
 667                sc->bar[BAR1].base_addr = (void *)
 668                        ((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
 669        else
 670                sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
 671
 672        assert(sc->bar[BAR0].base_addr);
 673        assert(sc->bar[BAR1].base_addr);
 674
 675        bnx2x_load_firmware(sc);
 676        assert(sc->firmware);
 677
 678        if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
 679                sc->udp_rss = 1;
 680
 681        sc->rx_budget = BNX2X_RX_BUDGET;
 682        sc->hc_rx_ticks = BNX2X_RX_TICKS;
 683        sc->hc_tx_ticks = BNX2X_TX_TICKS;
 684
 685        sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
 686        sc->rx_mode = BNX2X_RX_MODE_NORMAL;
 687
 688        sc->pci_dev = pci_dev;
 689        ret = bnx2x_attach(sc);
 690        if (ret) {
 691                PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret);
 692                return ret;
 693        }
 694
 695        /* Print important adapter info for the user. */
 696        if (adapter_info) {
 697                bnx2x_print_adapter_info(sc);
 698                adapter_info = false;
 699        }
 700
 701        /* schedule periodic poll for slowpath link events */
 702        if (IS_PF(sc)) {
 703                PMD_DRV_LOG(DEBUG, sc, "Scheduling periodic poll for slowpath link events");
 704                ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
 705                                        bnx2x_periodic_start, (void *)eth_dev);
 706                if (ret) {
 707                        PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
 708                                             " timer rc %d", ret);
 709                        return -EINVAL;
 710                }
 711        }
 712
 713        eth_dev->data->mac_addrs =
 714                (struct rte_ether_addr *)sc->link_params.mac_addr;
 715
 716        if (IS_VF(sc)) {
 717                rte_spinlock_init(&sc->vf2pf_lock);
 718
 719                ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
 720                                      &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
 721                                      RTE_CACHE_LINE_SIZE);
 722                if (ret)
 723                        goto out;
 724
 725                sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
 726                                         sc->vf2pf_mbox_mapping.vaddr;
 727
 728                ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
 729                                      &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
 730                                      RTE_CACHE_LINE_SIZE);
 731                if (ret)
 732                        goto out;
 733
 734                sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
 735                                             sc->pf2vf_bulletin_mapping.vaddr;
 736
 737                ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
 738                                             sc->max_rx_queues);
 739                if (ret)
 740                        goto out;
 741        }
 742
 743        return 0;
 744
 745out:
 746        if (IS_PF(sc))
 747                bnx2x_periodic_stop(eth_dev);
 748
 749        return ret;
 750}
 751
 752static int
 753eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
 754{
 755        struct bnx2x_softc *sc = eth_dev->data->dev_private;
 756        PMD_INIT_FUNC_TRACE(sc);
 757        return bnx2x_common_dev_init(eth_dev, 0);
 758}
 759
 760static int
 761eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
 762{
 763        struct bnx2x_softc *sc = eth_dev->data->dev_private;
 764        PMD_INIT_FUNC_TRACE(sc);
 765        return bnx2x_common_dev_init(eth_dev, 1);
 766}
 767
 768static int eth_bnx2x_dev_uninit(struct rte_eth_dev *eth_dev)
 769{
 770        struct bnx2x_softc *sc = eth_dev->data->dev_private;
 771        PMD_INIT_FUNC_TRACE(sc);
 772        bnx2x_dev_close(eth_dev);
 773        return 0;
 774}
 775
 776static struct rte_pci_driver rte_bnx2x_pmd;
 777static struct rte_pci_driver rte_bnx2xvf_pmd;
 778
 779static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
 780        struct rte_pci_device *pci_dev)
 781{
 782        if (pci_drv == &rte_bnx2x_pmd)
 783                return rte_eth_dev_pci_generic_probe(pci_dev,
 784                                sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
 785        else if (pci_drv == &rte_bnx2xvf_pmd)
 786                return rte_eth_dev_pci_generic_probe(pci_dev,
 787                                sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
 788        else
 789                return -EINVAL;
 790}
 791
 792static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
 793{
 794        return rte_eth_dev_pci_generic_remove(pci_dev, eth_bnx2x_dev_uninit);
 795}
 796
 797static struct rte_pci_driver rte_bnx2x_pmd = {
 798        .id_table = pci_id_bnx2x_map,
 799        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
 800        .probe = eth_bnx2x_pci_probe,
 801        .remove = eth_bnx2x_pci_remove,
 802};
 803
 804/*
 805 * virtual function driver struct
 806 */
 807static struct rte_pci_driver rte_bnx2xvf_pmd = {
 808        .id_table = pci_id_bnx2xvf_map,
 809        .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
 810        .probe = eth_bnx2x_pci_probe,
 811        .remove = eth_bnx2x_pci_remove,
 812};
 813
 814RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
 815RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
 816RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
 817RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
 818RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
 819RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
 820RTE_LOG_REGISTER_SUFFIX(bnx2x_logtype_init, init, NOTICE);
 821RTE_LOG_REGISTER_SUFFIX(bnx2x_logtype_driver, driver, NOTICE);
 822