dpdk/drivers/net/nfp/nfp_common.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright (c) 2014-2018 Netronome Systems, Inc.
   3 * All rights reserved.
   4 *
   5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
   6 */
   7
   8/*
   9 * vim:shiftwidth=8:noexpandtab
  10 *
  11 * @file dpdk/pmd/nfp_common.c
  12 *
  13 * Netronome vNIC DPDK Poll-Mode Driver: Common files
  14 */
  15
  16#include <rte_byteorder.h>
  17#include <rte_common.h>
  18#include <rte_log.h>
  19#include <rte_debug.h>
  20#include <ethdev_driver.h>
  21#include <ethdev_pci.h>
  22#include <rte_dev.h>
  23#include <rte_ether.h>
  24#include <rte_malloc.h>
  25#include <rte_memzone.h>
  26#include <rte_mempool.h>
  27#include <rte_version.h>
  28#include <rte_string_fns.h>
  29#include <rte_alarm.h>
  30#include <rte_spinlock.h>
  31#include <rte_service_component.h>
  32
  33#include "nfpcore/nfp_cpp.h"
  34#include "nfpcore/nfp_nffw.h"
  35#include "nfpcore/nfp_hwinfo.h"
  36#include "nfpcore/nfp_mip.h"
  37#include "nfpcore/nfp_rtsym.h"
  38#include "nfpcore/nfp_nsp.h"
  39
  40#include "nfp_common.h"
  41#include "nfp_rxtx.h"
  42#include "nfp_logs.h"
  43#include "nfp_ctrl.h"
  44#include "nfp_cpp_bridge.h"
  45
  46#include <sys/types.h>
  47#include <sys/socket.h>
  48#include <sys/un.h>
  49#include <unistd.h>
  50#include <stdio.h>
  51#include <sys/ioctl.h>
  52#include <errno.h>
  53
  54static int
  55__nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
  56{
  57        int cnt;
  58        uint32_t new;
  59        struct timespec wait;
  60
  61        PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
  62                    hw->qcp_cfg);
  63
  64        if (hw->qcp_cfg == NULL)
  65                rte_panic("Bad configuration queue pointer\n");
  66
  67        nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
  68
  69        wait.tv_sec = 0;
  70        wait.tv_nsec = 1000000;
  71
  72        PMD_DRV_LOG(DEBUG, "Polling for update ack...");
  73
  74        /* Poll update field, waiting for NFP to ack the config */
  75        for (cnt = 0; ; cnt++) {
  76                new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
  77                if (new == 0)
  78                        break;
  79                if (new & NFP_NET_CFG_UPDATE_ERR) {
  80                        PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
  81                        return -1;
  82                }
  83                if (cnt >= NFP_NET_POLL_TIMEOUT) {
  84                        PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
  85                                          " %dms", update, cnt);
  86                        rte_panic("Exiting\n");
  87                }
  88                nanosleep(&wait, 0); /* waiting for a 1ms */
  89        }
  90        PMD_DRV_LOG(DEBUG, "Ack DONE");
  91        return 0;
  92}
  93
  94/*
  95 * Reconfigure the NIC
  96 * @nn:    device to reconfigure
  97 * @ctrl:    The value for the ctrl field in the BAR config
  98 * @update:  The value for the update field in the BAR config
  99 *
 100 * Write the update word to the BAR and ping the reconfig queue. Then poll
 101 * until the firmware has acknowledged the update by zeroing the update word.
 102 */
 103int
 104nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
 105{
 106        uint32_t err;
 107
 108        PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
 109                    ctrl, update);
 110
 111        rte_spinlock_lock(&hw->reconfig_lock);
 112
 113        nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
 114        nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
 115
 116        rte_wmb();
 117
 118        err = __nfp_net_reconfig(hw, update);
 119
 120        rte_spinlock_unlock(&hw->reconfig_lock);
 121
 122        if (!err)
 123                return 0;
 124
 125        /*
 126         * Reconfig errors imply situations where they can be handled.
 127         * Otherwise, rte_panic is called inside __nfp_net_reconfig
 128         */
 129        PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
 130                     ctrl, update);
 131        return -EIO;
 132}
 133
 134/*
 135 * Configure an Ethernet device. This function must be invoked first
 136 * before any other function in the Ethernet API. This function can
 137 * also be re-invoked when a device is in the stopped state.
 138 */
 139int
 140nfp_net_configure(struct rte_eth_dev *dev)
 141{
 142        struct rte_eth_conf *dev_conf;
 143        struct rte_eth_rxmode *rxmode;
 144        struct rte_eth_txmode *txmode;
 145        struct nfp_net_hw *hw;
 146
 147        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 148
 149        /*
 150         * A DPDK app sends info about how many queues to use and how
 151         * those queues need to be configured. This is used by the
 152         * DPDK core and it makes sure no more queues than those
 153         * advertised by the driver are requested. This function is
 154         * called after that internal process
 155         */
 156
 157        PMD_INIT_LOG(DEBUG, "Configure");
 158
 159        dev_conf = &dev->data->dev_conf;
 160        rxmode = &dev_conf->rxmode;
 161        txmode = &dev_conf->txmode;
 162
 163        if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
 164                rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 165
 166        /* Checking TX mode */
 167        if (txmode->mq_mode) {
 168                PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
 169                return -EINVAL;
 170        }
 171
 172        /* Checking RX mode */
 173        if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS &&
 174            !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
 175                PMD_INIT_LOG(INFO, "RSS not supported");
 176                return -EINVAL;
 177        }
 178
 179        return 0;
 180}
 181
 182void
 183nfp_net_enable_queues(struct rte_eth_dev *dev)
 184{
 185        struct nfp_net_hw *hw;
 186        uint64_t enabled_queues = 0;
 187        int i;
 188
 189        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 190
 191        /* Enabling the required TX queues in the device */
 192        for (i = 0; i < dev->data->nb_tx_queues; i++)
 193                enabled_queues |= (1 << i);
 194
 195        nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
 196
 197        enabled_queues = 0;
 198
 199        /* Enabling the required RX queues in the device */
 200        for (i = 0; i < dev->data->nb_rx_queues; i++)
 201                enabled_queues |= (1 << i);
 202
 203        nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
 204}
 205
 206void
 207nfp_net_disable_queues(struct rte_eth_dev *dev)
 208{
 209        struct nfp_net_hw *hw;
 210        uint32_t new_ctrl, update = 0;
 211
 212        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 213
 214        nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
 215        nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
 216
 217        new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
 218        update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
 219                 NFP_NET_CFG_UPDATE_MSIX;
 220
 221        if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
 222                new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
 223
 224        /* If an error when reconfig we avoid to change hw state */
 225        if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
 226                return;
 227
 228        hw->ctrl = new_ctrl;
 229}
 230
 231void
 232nfp_net_params_setup(struct nfp_net_hw *hw)
 233{
 234        nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
 235        nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
 236}
 237
 238void
 239nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
 240{
 241        hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
 242}
 243
 244#define ETH_ADDR_LEN    6
 245
 246void
 247nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
 248{
 249        int i;
 250
 251        for (i = 0; i < ETH_ADDR_LEN; i++)
 252                dst[i] = src[i];
 253}
 254
 255void
 256nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
 257{
 258        uint32_t mac0 = *(uint32_t *)mac;
 259        uint16_t mac1;
 260
 261        nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
 262
 263        mac += 4;
 264        mac1 = *(uint16_t *)mac;
 265        nn_writew(rte_cpu_to_be_16(mac1),
 266                  hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
 267}
 268
 269int
 270nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
 271{
 272        struct nfp_net_hw *hw;
 273        uint32_t update, ctrl;
 274
 275        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 276        if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
 277            !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
 278                PMD_INIT_LOG(INFO, "MAC address unable to change when"
 279                                  " port enabled");
 280                return -EBUSY;
 281        }
 282
 283        if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
 284            !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
 285                return -EBUSY;
 286
 287        /* Writing new MAC to the specific port BAR address */
 288        nfp_net_write_mac(hw, (uint8_t *)mac_addr);
 289
 290        /* Signal the NIC about the change */
 291        update = NFP_NET_CFG_UPDATE_MACADDR;
 292        ctrl = hw->ctrl;
 293        if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
 294            (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
 295                ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
 296        if (nfp_net_reconfig(hw, ctrl, update) < 0) {
 297                PMD_INIT_LOG(INFO, "MAC address update failed");
 298                return -EIO;
 299        }
 300        return 0;
 301}
 302
 303int
 304nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 305                           struct rte_intr_handle *intr_handle)
 306{
 307        struct nfp_net_hw *hw;
 308        int i;
 309
 310        if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
 311                                    dev->data->nb_rx_queues)) {
 312                PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
 313                             " intr_vec", dev->data->nb_rx_queues);
 314                return -ENOMEM;
 315        }
 316
 317        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 318
 319        if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
 320                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
 321                /* UIO just supports one queue and no LSC*/
 322                nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
 323                if (rte_intr_vec_list_index_set(intr_handle, 0, 0))
 324                        return -1;
 325        } else {
 326                PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
 327                for (i = 0; i < dev->data->nb_rx_queues; i++) {
 328                        /*
 329                         * The first msix vector is reserved for non
 330                         * efd interrupts
 331                        */
 332                        nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
 333                        if (rte_intr_vec_list_index_set(intr_handle, i,
 334                                                               i + 1))
 335                                return -1;
 336                        PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
 337                                rte_intr_vec_list_index_get(intr_handle,
 338                                                                   i));
 339                }
 340        }
 341
 342        /* Avoiding TX interrupts */
 343        hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
 344        return 0;
 345}
 346
 347uint32_t
 348nfp_check_offloads(struct rte_eth_dev *dev)
 349{
 350        struct nfp_net_hw *hw;
 351        struct rte_eth_conf *dev_conf;
 352        struct rte_eth_rxmode *rxmode;
 353        struct rte_eth_txmode *txmode;
 354        uint32_t ctrl = 0;
 355
 356        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 357
 358        dev_conf = &dev->data->dev_conf;
 359        rxmode = &dev_conf->rxmode;
 360        txmode = &dev_conf->txmode;
 361
 362        if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) {
 363                if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 364                        ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 365        }
 366
 367        if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
 368                if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 369                        ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
 370        }
 371
 372        hw->mtu = dev->data->mtu;
 373
 374        if (txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
 375                ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 376
 377        /* L2 broadcast */
 378        if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
 379                ctrl |= NFP_NET_CFG_CTRL_L2BC;
 380
 381        /* L2 multicast */
 382        if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
 383                ctrl |= NFP_NET_CFG_CTRL_L2MC;
 384
 385        /* TX checksum offload */
 386        if (txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
 387            txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM ||
 388            txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 389                ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 390
 391        /* LSO offload */
 392        if (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
 393                if (hw->cap & NFP_NET_CFG_CTRL_LSO)
 394                        ctrl |= NFP_NET_CFG_CTRL_LSO;
 395                else
 396                        ctrl |= NFP_NET_CFG_CTRL_LSO2;
 397        }
 398
 399        /* RX gather */
 400        if (txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 401                ctrl |= NFP_NET_CFG_CTRL_GATHER;
 402
 403        return ctrl;
 404}
 405
 406int
 407nfp_net_promisc_enable(struct rte_eth_dev *dev)
 408{
 409        uint32_t new_ctrl, update = 0;
 410        struct nfp_net_hw *hw;
 411        int ret;
 412
 413        PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
 414
 415        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 416
 417        if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
 418                PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
 419                return -ENOTSUP;
 420        }
 421
 422        if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
 423                PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
 424                return 0;
 425        }
 426
 427        new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
 428        update = NFP_NET_CFG_UPDATE_GEN;
 429
 430        /*
 431         * DPDK sets promiscuous mode on just after this call assuming
 432         * it can not fail ...
 433         */
 434        ret = nfp_net_reconfig(hw, new_ctrl, update);
 435        if (ret < 0)
 436                return ret;
 437
 438        hw->ctrl = new_ctrl;
 439
 440        return 0;
 441}
 442
 443int
 444nfp_net_promisc_disable(struct rte_eth_dev *dev)
 445{
 446        uint32_t new_ctrl, update = 0;
 447        struct nfp_net_hw *hw;
 448        int ret;
 449
 450        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 451
 452        if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
 453                PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
 454                return 0;
 455        }
 456
 457        new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
 458        update = NFP_NET_CFG_UPDATE_GEN;
 459
 460        /*
 461         * DPDK sets promiscuous mode off just before this call
 462         * assuming it can not fail ...
 463         */
 464        ret = nfp_net_reconfig(hw, new_ctrl, update);
 465        if (ret < 0)
 466                return ret;
 467
 468        hw->ctrl = new_ctrl;
 469
 470        return 0;
 471}
 472
 473/*
 474 * return 0 means link status changed, -1 means not changed
 475 *
 476 * Wait to complete is needed as it can take up to 9 seconds to get the Link
 477 * status.
 478 */
 479int
 480nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 481{
 482        struct nfp_net_hw *hw;
 483        struct rte_eth_link link;
 484        uint32_t nn_link_status;
 485        int ret;
 486
 487        static const uint32_t ls_to_ethtool[] = {
 488                [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
 489                [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
 490                [NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
 491                [NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
 492                [NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
 493                [NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
 494                [NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
 495                [NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
 496        };
 497
 498        PMD_DRV_LOG(DEBUG, "Link update");
 499
 500        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 501
 502        nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
 503
 504        memset(&link, 0, sizeof(struct rte_eth_link));
 505
 506        if (nn_link_status & NFP_NET_CFG_STS_LINK)
 507                link.link_status = RTE_ETH_LINK_UP;
 508
 509        link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 510
 511        nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 512                         NFP_NET_CFG_STS_LINK_RATE_MASK;
 513
 514        if (nn_link_status >= RTE_DIM(ls_to_ethtool))
 515                link.link_speed = RTE_ETH_SPEED_NUM_NONE;
 516        else
 517                link.link_speed = ls_to_ethtool[nn_link_status];
 518
 519        ret = rte_eth_linkstatus_set(dev, &link);
 520        if (ret == 0) {
 521                if (link.link_status)
 522                        PMD_DRV_LOG(INFO, "NIC Link is Up");
 523                else
 524                        PMD_DRV_LOG(INFO, "NIC Link is Down");
 525        }
 526        return ret;
 527}
 528
 529int
 530nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 531{
 532        int i;
 533        struct nfp_net_hw *hw;
 534        struct rte_eth_stats nfp_dev_stats;
 535
 536        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 537
 538        /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
 539
 540        memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
 541
 542        /* reading per RX ring stats */
 543        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 544                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
 545                        break;
 546
 547                nfp_dev_stats.q_ipackets[i] =
 548                        nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
 549
 550                nfp_dev_stats.q_ipackets[i] -=
 551                        hw->eth_stats_base.q_ipackets[i];
 552
 553                nfp_dev_stats.q_ibytes[i] =
 554                        nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
 555
 556                nfp_dev_stats.q_ibytes[i] -=
 557                        hw->eth_stats_base.q_ibytes[i];
 558        }
 559
 560        /* reading per TX ring stats */
 561        for (i = 0; i < dev->data->nb_tx_queues; i++) {
 562                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
 563                        break;
 564
 565                nfp_dev_stats.q_opackets[i] =
 566                        nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
 567
 568                nfp_dev_stats.q_opackets[i] -=
 569                        hw->eth_stats_base.q_opackets[i];
 570
 571                nfp_dev_stats.q_obytes[i] =
 572                        nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
 573
 574                nfp_dev_stats.q_obytes[i] -=
 575                        hw->eth_stats_base.q_obytes[i];
 576        }
 577
 578        nfp_dev_stats.ipackets =
 579                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
 580
 581        nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
 582
 583        nfp_dev_stats.ibytes =
 584                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
 585
 586        nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
 587
 588        nfp_dev_stats.opackets =
 589                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
 590
 591        nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
 592
 593        nfp_dev_stats.obytes =
 594                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
 595
 596        nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
 597
 598        /* reading general device stats */
 599        nfp_dev_stats.ierrors =
 600                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
 601
 602        nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
 603
 604        nfp_dev_stats.oerrors =
 605                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
 606
 607        nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
 608
 609        /* RX ring mbuf allocation failures */
 610        nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 611
 612        nfp_dev_stats.imissed =
 613                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
 614
 615        nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 616
 617        if (stats) {
 618                memcpy(stats, &nfp_dev_stats, sizeof(*stats));
 619                return 0;
 620        }
 621        return -EINVAL;
 622}
 623
 624int
 625nfp_net_stats_reset(struct rte_eth_dev *dev)
 626{
 627        int i;
 628        struct nfp_net_hw *hw;
 629
 630        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 631
 632        /*
 633         * hw->eth_stats_base records the per counter starting point.
 634         * Lets update it now
 635         */
 636
 637        /* reading per RX ring stats */
 638        for (i = 0; i < dev->data->nb_rx_queues; i++) {
 639                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
 640                        break;
 641
 642                hw->eth_stats_base.q_ipackets[i] =
 643                        nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
 644
 645                hw->eth_stats_base.q_ibytes[i] =
 646                        nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
 647        }
 648
 649        /* reading per TX ring stats */
 650        for (i = 0; i < dev->data->nb_tx_queues; i++) {
 651                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
 652                        break;
 653
 654                hw->eth_stats_base.q_opackets[i] =
 655                        nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
 656
 657                hw->eth_stats_base.q_obytes[i] =
 658                        nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
 659        }
 660
 661        hw->eth_stats_base.ipackets =
 662                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
 663
 664        hw->eth_stats_base.ibytes =
 665                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
 666
 667        hw->eth_stats_base.opackets =
 668                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
 669
 670        hw->eth_stats_base.obytes =
 671                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
 672
 673        /* reading general device stats */
 674        hw->eth_stats_base.ierrors =
 675                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
 676
 677        hw->eth_stats_base.oerrors =
 678                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
 679
 680        /* RX ring mbuf allocation failures */
 681        dev->data->rx_mbuf_alloc_failed = 0;
 682
 683        hw->eth_stats_base.imissed =
 684                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
 685
 686        return 0;
 687}
 688
 689int
 690nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 691{
 692        struct nfp_net_hw *hw;
 693
 694        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 695
 696        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
 697        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
 698        dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
 699        dev_info->max_rx_pktlen = hw->max_mtu;
 700        /* Next should change when PF support is implemented */
 701        dev_info->max_mac_addrs = 1;
 702
 703        if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
 704                dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 705
 706        if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
 707                dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
 708                                             RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
 709                                             RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 710
 711        if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
 712                dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 713
 714        if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
 715                dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
 716                                             RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
 717                                             RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 718
 719        if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
 720                dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
 721
 722        if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
 723                dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 724
 725        dev_info->default_rxconf = (struct rte_eth_rxconf) {
 726                .rx_thresh = {
 727                        .pthresh = DEFAULT_RX_PTHRESH,
 728                        .hthresh = DEFAULT_RX_HTHRESH,
 729                        .wthresh = DEFAULT_RX_WTHRESH,
 730                },
 731                .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
 732                .rx_drop_en = 0,
 733        };
 734
 735        dev_info->default_txconf = (struct rte_eth_txconf) {
 736                .tx_thresh = {
 737                        .pthresh = DEFAULT_TX_PTHRESH,
 738                        .hthresh = DEFAULT_TX_HTHRESH,
 739                        .wthresh = DEFAULT_TX_WTHRESH,
 740                },
 741                .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
 742                .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
 743        };
 744
 745        dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
 746                .nb_max = NFP_NET_MAX_RX_DESC,
 747                .nb_min = NFP_NET_MIN_RX_DESC,
 748                .nb_align = NFP_ALIGN_RING_DESC,
 749        };
 750
 751        dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
 752                .nb_max = NFP_NET_MAX_TX_DESC,
 753                .nb_min = NFP_NET_MIN_TX_DESC,
 754                .nb_align = NFP_ALIGN_RING_DESC,
 755                .nb_seg_max = NFP_TX_MAX_SEG,
 756                .nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
 757        };
 758
 759        if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
 760                dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 761
 762                dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
 763                                                   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
 764                                                   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
 765                                                   RTE_ETH_RSS_IPV6 |
 766                                                   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
 767                                                   RTE_ETH_RSS_NONFRAG_IPV6_UDP;
 768
 769                dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 770                dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 771        }
 772
 773        dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
 774                               RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
 775                               RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
 776
 777        return 0;
 778}
 779
 780const uint32_t *
 781nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
 782{
 783        static const uint32_t ptypes[] = {
 784                /* refers to nfp_net_set_hash() */
 785                RTE_PTYPE_INNER_L3_IPV4,
 786                RTE_PTYPE_INNER_L3_IPV6,
 787                RTE_PTYPE_INNER_L3_IPV6_EXT,
 788                RTE_PTYPE_INNER_L4_MASK,
 789                RTE_PTYPE_UNKNOWN
 790        };
 791
 792        if (dev->rx_pkt_burst == nfp_net_recv_pkts)
 793                return ptypes;
 794        return NULL;
 795}
 796
 797int
 798nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 799{
 800        struct rte_pci_device *pci_dev;
 801        struct nfp_net_hw *hw;
 802        int base = 0;
 803
 804        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 805        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 806
 807        if (rte_intr_type_get(pci_dev->intr_handle) !=
 808                                                        RTE_INTR_HANDLE_UIO)
 809                base = 1;
 810
 811        /* Make sure all updates are written before un-masking */
 812        rte_wmb();
 813        nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
 814                      NFP_NET_CFG_ICR_UNMASKED);
 815        return 0;
 816}
 817
 818int
 819nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 820{
 821        struct rte_pci_device *pci_dev;
 822        struct nfp_net_hw *hw;
 823        int base = 0;
 824
 825        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 826        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 827
 828        if (rte_intr_type_get(pci_dev->intr_handle) !=
 829                                                        RTE_INTR_HANDLE_UIO)
 830                base = 1;
 831
 832        /* Make sure all updates are written before un-masking */
 833        rte_wmb();
 834        nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
 835        return 0;
 836}
 837
 838static void
 839nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 840{
 841        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 842        struct rte_eth_link link;
 843
 844        rte_eth_linkstatus_get(dev, &link);
 845        if (link.link_status)
 846                PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
 847                            dev->data->port_id, link.link_speed,
 848                            link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
 849                            ? "full-duplex" : "half-duplex");
 850        else
 851                PMD_DRV_LOG(INFO, " Port %d: Link Down",
 852                            dev->data->port_id);
 853
 854        PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
 855                    pci_dev->addr.domain, pci_dev->addr.bus,
 856                    pci_dev->addr.devid, pci_dev->addr.function);
 857}
 858
 859/* Interrupt configuration and handling */
 860
 861/*
 862 * nfp_net_irq_unmask - Unmask an interrupt
 863 *
 864 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
 865 * clear the ICR for the entry.
 866 */
 867static void
 868nfp_net_irq_unmask(struct rte_eth_dev *dev)
 869{
 870        struct nfp_net_hw *hw;
 871        struct rte_pci_device *pci_dev;
 872
 873        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 874        pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 875
 876        if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
 877                /* If MSI-X auto-masking is used, clear the entry */
 878                rte_wmb();
 879                rte_intr_ack(pci_dev->intr_handle);
 880        } else {
 881                /* Make sure all updates are written before un-masking */
 882                rte_wmb();
 883                nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
 884                              NFP_NET_CFG_ICR_UNMASKED);
 885        }
 886}
 887
 888/*
 889 * Interrupt handler which shall be registered for alarm callback for delayed
 890 * handling specific interrupt to wait for the stable nic state. As the NIC
 891 * interrupt state is not stable for nfp after link is just down, it needs
 892 * to wait 4 seconds to get the stable status.
 893 *
 894 * @param handle   Pointer to interrupt handle.
 895 * @param param    The address of parameter (struct rte_eth_dev *)
 896 *
 897 * @return  void
 898 */
 899void
 900nfp_net_dev_interrupt_delayed_handler(void *param)
 901{
 902        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 903
 904        nfp_net_link_update(dev, 0);
 905        rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 906
 907        nfp_net_dev_link_status_print(dev);
 908
 909        /* Unmasking */
 910        nfp_net_irq_unmask(dev);
 911}
 912
 913void
 914nfp_net_dev_interrupt_handler(void *param)
 915{
 916        int64_t timeout;
 917        struct rte_eth_link link;
 918        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 919
 920        PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
 921
 922        rte_eth_linkstatus_get(dev, &link);
 923
 924        nfp_net_link_update(dev, 0);
 925
 926        /* likely to up */
 927        if (!link.link_status) {
 928                /* handle it 1 sec later, wait it being stable */
 929                timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
 930                /* likely to down */
 931        } else {
 932                /* handle it 4 sec later, wait it being stable */
 933                timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
 934        }
 935
 936        if (rte_eal_alarm_set(timeout * 1000,
 937                              nfp_net_dev_interrupt_delayed_handler,
 938                              (void *)dev) < 0) {
 939                PMD_INIT_LOG(ERR, "Error setting alarm");
 940                /* Unmasking */
 941                nfp_net_irq_unmask(dev);
 942        }
 943}
 944
 945int
 946nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 947{
 948        struct nfp_net_hw *hw;
 949
 950        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 951
 952        /* mtu setting is forbidden if port is started */
 953        if (dev->data->dev_started) {
 954                PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
 955                            dev->data->port_id);
 956                return -EBUSY;
 957        }
 958
 959        /* writing to configuration space */
 960        nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
 961
 962        hw->mtu = mtu;
 963
 964        return 0;
 965}
 966
 967int
 968nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 969{
 970        uint32_t new_ctrl, update;
 971        struct nfp_net_hw *hw;
 972        int ret;
 973
 974        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 975        new_ctrl = 0;
 976
 977        /* Enable vlan strip if it is not configured yet */
 978        if ((mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 979            !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 980                new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
 981
 982        /* Disable vlan strip just if it is configured */
 983        if (!(mask & RTE_ETH_VLAN_STRIP_OFFLOAD) &&
 984            (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
 985                new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
 986
 987        if (new_ctrl == 0)
 988                return 0;
 989
 990        update = NFP_NET_CFG_UPDATE_GEN;
 991
 992        ret = nfp_net_reconfig(hw, new_ctrl, update);
 993        if (!ret)
 994                hw->ctrl = new_ctrl;
 995
 996        return ret;
 997}
 998
 999static int
1000nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1001                    struct rte_eth_rss_reta_entry64 *reta_conf,
1002                    uint16_t reta_size)
1003{
1004        uint32_t reta, mask;
1005        int i, j;
1006        int idx, shift;
1007        struct nfp_net_hw *hw =
1008                NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1009
1010        if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1011                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1012                        "(%d) doesn't match the number hardware can supported "
1013                        "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1014                return -EINVAL;
1015        }
1016
1017        /*
1018         * Update Redirection Table. There are 128 8bit-entries which can be
1019         * manage as 32 32bit-entries
1020         */
1021        for (i = 0; i < reta_size; i += 4) {
1022                /* Handling 4 RSS entries per loop */
1023                idx = i / RTE_ETH_RETA_GROUP_SIZE;
1024                shift = i % RTE_ETH_RETA_GROUP_SIZE;
1025                mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1026
1027                if (!mask)
1028                        continue;
1029
1030                reta = 0;
1031                /* If all 4 entries were set, don't need read RETA register */
1032                if (mask != 0xF)
1033                        reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1034
1035                for (j = 0; j < 4; j++) {
1036                        if (!(mask & (0x1 << j)))
1037                                continue;
1038                        if (mask != 0xF)
1039                                /* Clearing the entry bits */
1040                                reta &= ~(0xFF << (8 * j));
1041                        reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1042                }
1043                nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
1044                              reta);
1045        }
1046        return 0;
1047}
1048
1049/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1050int
1051nfp_net_reta_update(struct rte_eth_dev *dev,
1052                    struct rte_eth_rss_reta_entry64 *reta_conf,
1053                    uint16_t reta_size)
1054{
1055        struct nfp_net_hw *hw =
1056                NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1057        uint32_t update;
1058        int ret;
1059
1060        if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1061                return -EINVAL;
1062
1063        ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1064        if (ret != 0)
1065                return ret;
1066
1067        update = NFP_NET_CFG_UPDATE_RSS;
1068
1069        if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1070                return -EIO;
1071
1072        return 0;
1073}
1074
1075 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1076int
1077nfp_net_reta_query(struct rte_eth_dev *dev,
1078                   struct rte_eth_rss_reta_entry64 *reta_conf,
1079                   uint16_t reta_size)
1080{
1081        uint8_t i, j, mask;
1082        int idx, shift;
1083        uint32_t reta;
1084        struct nfp_net_hw *hw;
1085
1086        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087
1088        if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1089                return -EINVAL;
1090
1091        if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1092                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1093                        "(%d) doesn't match the number hardware can supported "
1094                        "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1095                return -EINVAL;
1096        }
1097
1098        /*
1099         * Reading Redirection Table. There are 128 8bit-entries which can be
1100         * manage as 32 32bit-entries
1101         */
1102        for (i = 0; i < reta_size; i += 4) {
1103                /* Handling 4 RSS entries per loop */
1104                idx = i / RTE_ETH_RETA_GROUP_SIZE;
1105                shift = i % RTE_ETH_RETA_GROUP_SIZE;
1106                mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1107
1108                if (!mask)
1109                        continue;
1110
1111                reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
1112                                    shift);
1113                for (j = 0; j < 4; j++) {
1114                        if (!(mask & (0x1 << j)))
1115                                continue;
1116                        reta_conf[idx].reta[shift + j] =
1117                                (uint8_t)((reta >> (8 * j)) & 0xF);
1118                }
1119        }
1120        return 0;
1121}
1122
1123static int
1124nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1125                        struct rte_eth_rss_conf *rss_conf)
1126{
1127        struct nfp_net_hw *hw;
1128        uint64_t rss_hf;
1129        uint32_t cfg_rss_ctrl = 0;
1130        uint8_t key;
1131        int i;
1132
1133        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1134
1135        /* Writing the key byte a byte */
1136        for (i = 0; i < rss_conf->rss_key_len; i++) {
1137                memcpy(&key, &rss_conf->rss_key[i], 1);
1138                nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1139        }
1140
1141        rss_hf = rss_conf->rss_hf;
1142
1143        if (rss_hf & RTE_ETH_RSS_IPV4)
1144                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1145
1146        if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1147                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1148
1149        if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1150                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1151
1152        if (rss_hf & RTE_ETH_RSS_IPV6)
1153                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1154
1155        if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1156                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1157
1158        if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1159                cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1160
1161        cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1162        cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1163
1164        /* configuring where to apply the RSS hash */
1165        nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1166
1167        /* Writing the key size */
1168        nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1169
1170        return 0;
1171}
1172
1173int
1174nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1175                        struct rte_eth_rss_conf *rss_conf)
1176{
1177        uint32_t update;
1178        uint64_t rss_hf;
1179        struct nfp_net_hw *hw;
1180
1181        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1182
1183        rss_hf = rss_conf->rss_hf;
1184
1185        /* Checking if RSS is enabled */
1186        if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
1187                if (rss_hf != 0) { /* Enable RSS? */
1188                        PMD_DRV_LOG(ERR, "RSS unsupported");
1189                        return -EINVAL;
1190                }
1191                return 0; /* Nothing to do */
1192        }
1193
1194        if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1195                PMD_DRV_LOG(ERR, "hash key too long");
1196                return -EINVAL;
1197        }
1198
1199        nfp_net_rss_hash_write(dev, rss_conf);
1200
1201        update = NFP_NET_CFG_UPDATE_RSS;
1202
1203        if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
1204                return -EIO;
1205
1206        return 0;
1207}
1208
1209int
1210nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1211                          struct rte_eth_rss_conf *rss_conf)
1212{
1213        uint64_t rss_hf;
1214        uint32_t cfg_rss_ctrl;
1215        uint8_t key;
1216        int i;
1217        struct nfp_net_hw *hw;
1218
1219        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1220
1221        if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1222                return -EINVAL;
1223
1224        rss_hf = rss_conf->rss_hf;
1225        cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1226
1227        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
1228                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1229
1230        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
1231                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1232
1233        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
1234                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1235
1236        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
1237                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1238
1239        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
1240                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1241
1242        if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
1243                rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1244
1245        /* Propagate current RSS hash functions to caller */
1246        rss_conf->rss_hf = rss_hf;
1247
1248        /* Reading the key size */
1249        rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1250
1251        /* Reading the key byte a byte */
1252        for (i = 0; i < rss_conf->rss_key_len; i++) {
1253                key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1254                memcpy(&rss_conf->rss_key[i], &key, 1);
1255        }
1256
1257        return 0;
1258}
1259
1260int
1261nfp_net_rss_config_default(struct rte_eth_dev *dev)
1262{
1263        struct rte_eth_conf *dev_conf;
1264        struct rte_eth_rss_conf rss_conf;
1265        struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
1266        uint16_t rx_queues = dev->data->nb_rx_queues;
1267        uint16_t queue;
1268        int i, j, ret;
1269
1270        PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
1271                rx_queues);
1272
1273        nfp_reta_conf[0].mask = ~0x0;
1274        nfp_reta_conf[1].mask = ~0x0;
1275
1276        queue = 0;
1277        for (i = 0; i < 0x40; i += 8) {
1278                for (j = i; j < (i + 8); j++) {
1279                        nfp_reta_conf[0].reta[j] = queue;
1280                        nfp_reta_conf[1].reta[j] = queue++;
1281                        queue %= rx_queues;
1282                }
1283        }
1284        ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
1285        if (ret != 0)
1286                return ret;
1287
1288        dev_conf = &dev->data->dev_conf;
1289        if (!dev_conf) {
1290                PMD_DRV_LOG(INFO, "wrong rss conf");
1291                return -EINVAL;
1292        }
1293        rss_conf = dev_conf->rx_adv_conf.rss_conf;
1294
1295        ret = nfp_net_rss_hash_write(dev, &rss_conf);
1296
1297        return ret;
1298}
1299
1300RTE_LOG_REGISTER_SUFFIX(nfp_logtype_init, init, NOTICE);
1301RTE_LOG_REGISTER_SUFFIX(nfp_logtype_driver, driver, NOTICE);
1302/*
1303 * Local variables:
1304 * c-file-style: "Linux"
1305 * indent-tabs-mode: t
1306 * End:
1307 */
1308