dpdk/drivers/net/txgbe/txgbe_pf.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
   3 * Copyright(c) 2010-2017 Intel Corporation
   4 */
   5
   6#include <stdio.h>
   7#include <errno.h>
   8#include <stdint.h>
   9#include <stdlib.h>
  10#include <unistd.h>
  11#include <stdarg.h>
  12#include <inttypes.h>
  13
  14#include <rte_interrupts.h>
  15#include <rte_log.h>
  16#include <rte_debug.h>
  17#include <rte_eal.h>
  18#include <rte_ether.h>
  19#include <ethdev_driver.h>
  20#include <rte_memcpy.h>
  21#include <rte_malloc.h>
  22#include <rte_random.h>
  23#include <rte_bus_pci.h>
  24
  25#include "base/txgbe.h"
  26#include "txgbe_ethdev.h"
  27#include "rte_pmd_txgbe.h"
  28
  29#define TXGBE_MAX_VFTA     (128)
  30#define TXGBE_VF_MSG_SIZE_DEFAULT 1
  31#define TXGBE_VF_GET_QUEUE_MSG_SIZE 5
  32
  33static inline uint16_t
  34dev_num_vf(struct rte_eth_dev *eth_dev)
  35{
  36        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
  37
  38        return pci_dev->max_vfs;
  39}
  40
  41static inline
  42int txgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
  43{
  44        unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
  45        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
  46        uint16_t vfn;
  47
  48        for (vfn = 0; vfn < vf_num; vfn++) {
  49                rte_eth_random_addr(vf_mac_addr);
  50                /* keep the random address as default */
  51                memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
  52                           RTE_ETHER_ADDR_LEN);
  53        }
  54
  55        return 0;
  56}
  57
  58static inline int
  59txgbe_mb_intr_setup(struct rte_eth_dev *dev)
  60{
  61        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
  62
  63        intr->mask_misc |= TXGBE_ICRMISC_VFMBX;
  64
  65        return 0;
  66}
  67
  68int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
  69{
  70        struct txgbe_vf_info **vfinfo = TXGBE_DEV_VFDATA(eth_dev);
  71        struct txgbe_mirror_info *mirror_info = TXGBE_DEV_MR_INFO(eth_dev);
  72        struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(eth_dev);
  73        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
  74        uint16_t vf_num;
  75        uint8_t nb_queue;
  76        int ret = 0;
  77
  78        PMD_INIT_FUNC_TRACE();
  79
  80        RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
  81        vf_num = dev_num_vf(eth_dev);
  82        if (vf_num == 0)
  83                return ret;
  84
  85        *vfinfo = rte_zmalloc("vf_info",
  86                        sizeof(struct txgbe_vf_info) * vf_num, 0);
  87        if (*vfinfo == NULL) {
  88                PMD_INIT_LOG(ERR,
  89                        "Cannot allocate memory for private VF data\n");
  90                return -ENOMEM;
  91        }
  92
  93        ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
  94        if (ret) {
  95                PMD_INIT_LOG(ERR,
  96                        "failed to allocate switch domain for device %d", ret);
  97                rte_free(*vfinfo);
  98                *vfinfo = NULL;
  99                return ret;
 100        }
 101
 102        memset(mirror_info, 0, sizeof(struct txgbe_mirror_info));
 103        memset(uta_info, 0, sizeof(struct txgbe_uta_info));
 104        hw->mac.mc_filter_type = 0;
 105
 106        if (vf_num >= ETH_32_POOLS) {
 107                nb_queue = 2;
 108                RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
 109        } else if (vf_num >= ETH_16_POOLS) {
 110                nb_queue = 4;
 111                RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
 112        } else {
 113                nb_queue = 8;
 114                RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
 115        }
 116
 117        RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 118        RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 119        RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx =
 120                        (uint16_t)(vf_num * nb_queue);
 121
 122        txgbe_vf_perm_addr_gen(eth_dev, vf_num);
 123
 124        /* init_mailbox_params */
 125        hw->mbx.init_params(hw);
 126
 127        /* set mb interrupt mask */
 128        txgbe_mb_intr_setup(eth_dev);
 129
 130        return ret;
 131}
 132
 133void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
 134{
 135        struct txgbe_vf_info **vfinfo;
 136        uint16_t vf_num;
 137        int ret;
 138
 139        PMD_INIT_FUNC_TRACE();
 140
 141        RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
 142        RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
 143        RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
 144        RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
 145
 146        vf_num = dev_num_vf(eth_dev);
 147        if (vf_num == 0)
 148                return;
 149
 150        vfinfo = TXGBE_DEV_VFDATA(eth_dev);
 151        if (*vfinfo == NULL)
 152                return;
 153
 154        ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
 155        if (ret)
 156                PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
 157
 158        rte_free(*vfinfo);
 159        *vfinfo = NULL;
 160}
 161
 162static void
 163txgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
 164{
 165        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 166        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
 167        uint16_t vf_num;
 168        int i;
 169        struct txgbe_ethertype_filter ethertype_filter;
 170
 171        if (!hw->mac.set_ethertype_anti_spoofing) {
 172                PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n");
 173                return;
 174        }
 175
 176        i = txgbe_ethertype_filter_lookup(filter_info,
 177                                          TXGBE_ETHERTYPE_FLOW_CTRL);
 178        if (i >= 0) {
 179                PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n");
 180                return;
 181        }
 182
 183        ethertype_filter.ethertype = TXGBE_ETHERTYPE_FLOW_CTRL;
 184        ethertype_filter.etqf = TXGBE_ETFLT_ENA |
 185                                TXGBE_ETFLT_TXAS |
 186                                TXGBE_ETHERTYPE_FLOW_CTRL;
 187        ethertype_filter.etqs = 0;
 188        ethertype_filter.conf = TRUE;
 189        i = txgbe_ethertype_filter_insert(filter_info,
 190                                          &ethertype_filter);
 191        if (i < 0) {
 192                PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n");
 193                return;
 194        }
 195
 196        wr32(hw, TXGBE_ETFLT(i),
 197                        (TXGBE_ETFLT_ENA |
 198                        TXGBE_ETFLT_TXAS |
 199                        TXGBE_ETHERTYPE_FLOW_CTRL));
 200
 201        vf_num = dev_num_vf(eth_dev);
 202        for (i = 0; i < vf_num; i++)
 203                hw->mac.set_ethertype_anti_spoofing(hw, true, i);
 204}
 205
 206int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 207{
 208        uint32_t vtctl, fcrth;
 209        uint32_t vfre_slot, vfre_offset;
 210        uint16_t vf_num;
 211        const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
 212        const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
 213        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 214        uint32_t gpie;
 215        uint32_t gcr_ext;
 216        uint32_t vlanctrl;
 217        int i;
 218
 219        vf_num = dev_num_vf(eth_dev);
 220        if (vf_num == 0)
 221                return -1;
 222
 223        /* enable VMDq and set the default pool for PF */
 224        vtctl = rd32(hw, TXGBE_POOLCTL);
 225        vtctl &= ~TXGBE_POOLCTL_DEFPL_MASK;
 226        vtctl |= TXGBE_POOLCTL_DEFPL(RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
 227        vtctl |= TXGBE_POOLCTL_RPLEN;
 228        wr32(hw, TXGBE_POOLCTL, vtctl);
 229
 230        vfre_offset = vf_num & VFRE_MASK;
 231        vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
 232
 233        /* Enable pools reserved to PF only */
 234        wr32(hw, TXGBE_POOLRXENA(vfre_slot), (~0U) << vfre_offset);
 235        wr32(hw, TXGBE_POOLRXENA(vfre_slot ^ 1), vfre_slot - 1);
 236        wr32(hw, TXGBE_POOLTXENA(vfre_slot), (~0U) << vfre_offset);
 237        wr32(hw, TXGBE_POOLTXENA(vfre_slot ^ 1), vfre_slot - 1);
 238
 239        wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
 240
 241        /* clear VMDq map to perment rar 0 */
 242        hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
 243
 244        /* clear VMDq map to scan rar 127 */
 245        wr32(hw, TXGBE_ETHADDRIDX, hw->mac.num_rar_entries);
 246        wr32(hw, TXGBE_ETHADDRASSL, 0);
 247        wr32(hw, TXGBE_ETHADDRASSH, 0);
 248
 249        /* set VMDq map to default PF pool */
 250        hw->mac.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
 251
 252        /*
 253         * SW msut set PORTCTL.VT_Mode the same as GPIE.VT_Mode
 254         */
 255        gpie = rd32(hw, TXGBE_GPIE);
 256        gpie |= TXGBE_GPIE_MSIX;
 257        gcr_ext = rd32(hw, TXGBE_PORTCTL);
 258        gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
 259
 260        switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
 261        case ETH_64_POOLS:
 262                gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
 263                break;
 264        case ETH_32_POOLS:
 265                gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
 266                break;
 267        case ETH_16_POOLS:
 268                gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
 269                break;
 270        }
 271
 272        wr32(hw, TXGBE_PORTCTL, gcr_ext);
 273        wr32(hw, TXGBE_GPIE, gpie);
 274
 275        /*
 276         * enable vlan filtering and allow all vlan tags through
 277         */
 278        vlanctrl = rd32(hw, TXGBE_VLANCTL);
 279        vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
 280        wr32(hw, TXGBE_VLANCTL, vlanctrl);
 281
 282        /* enable all vlan filters */
 283        for (i = 0; i < TXGBE_MAX_VFTA; i++)
 284                wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
 285
 286        /* Enable MAC Anti-Spoofing */
 287        hw->mac.set_mac_anti_spoofing(hw, FALSE, vf_num);
 288
 289        /* set flow control threshold to max to avoid tx switch hang */
 290        for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
 291                wr32(hw, TXGBE_FCWTRLO(i), 0);
 292                fcrth = rd32(hw, TXGBE_PBRXSIZE(i)) - 32;
 293                wr32(hw, TXGBE_FCWTRHI(i), fcrth);
 294        }
 295
 296        txgbe_add_tx_flow_control_drop_filter(eth_dev);
 297
 298        return 0;
 299}
 300
 301static void
 302txgbe_set_rx_mode(struct rte_eth_dev *eth_dev)
 303{
 304        struct rte_eth_dev_data *dev_data = eth_dev->data;
 305        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 306        u32 fctrl, vmolr;
 307        uint16_t vfn = dev_num_vf(eth_dev);
 308
 309        /* disable store-bad-packets */
 310        wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_SAVEBAD, 0);
 311
 312        /* Check for Promiscuous and All Multicast modes */
 313        fctrl = rd32m(hw, TXGBE_PSRCTL,
 314                        ~(TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP));
 315        fctrl |= TXGBE_PSRCTL_BCA |
 316                 TXGBE_PSRCTL_MCHFENA;
 317
 318        vmolr = rd32m(hw, TXGBE_POOLETHCTL(vfn),
 319                        ~(TXGBE_POOLETHCTL_UCP |
 320                          TXGBE_POOLETHCTL_MCP |
 321                          TXGBE_POOLETHCTL_UCHA |
 322                          TXGBE_POOLETHCTL_MCHA));
 323        vmolr |= TXGBE_POOLETHCTL_BCA |
 324                 TXGBE_POOLETHCTL_UTA |
 325                 TXGBE_POOLETHCTL_VLA;
 326
 327        if (dev_data->promiscuous) {
 328                fctrl |= TXGBE_PSRCTL_UCP |
 329                         TXGBE_PSRCTL_MCP;
 330                /* pf don't want packets routing to vf, so clear UPE */
 331                vmolr |= TXGBE_POOLETHCTL_MCP;
 332        } else if (dev_data->all_multicast) {
 333                fctrl |= TXGBE_PSRCTL_MCP;
 334                vmolr |= TXGBE_POOLETHCTL_MCP;
 335        } else {
 336                vmolr |= TXGBE_POOLETHCTL_UCHA;
 337                vmolr |= TXGBE_POOLETHCTL_MCHA;
 338        }
 339
 340        wr32(hw, TXGBE_POOLETHCTL(vfn), vmolr);
 341
 342        wr32(hw, TXGBE_PSRCTL, fctrl);
 343
 344        txgbe_vlan_hw_strip_config(eth_dev);
 345}
 346
 347static inline void
 348txgbe_vf_reset_event(struct rte_eth_dev *eth_dev, uint16_t vf)
 349{
 350        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 351        struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
 352        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 353        uint32_t vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
 354
 355        vmolr |= (TXGBE_POOLETHCTL_UCHA |
 356                        TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_UTA);
 357        wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
 358
 359        wr32(hw, TXGBE_POOLTAG(vf), 0);
 360
 361        /* reset multicast table array for vf */
 362        vfinfo[vf].num_vf_mc_hashes = 0;
 363
 364        /* reset rx mode */
 365        txgbe_set_rx_mode(eth_dev);
 366
 367        hw->mac.clear_rar(hw, rar_entry);
 368}
 369
 370static inline void
 371txgbe_vf_reset_msg(struct rte_eth_dev *eth_dev, uint16_t vf)
 372{
 373        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 374        uint32_t reg;
 375        uint32_t reg_offset, vf_shift;
 376        const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
 377        const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
 378        uint8_t  nb_q_per_pool;
 379        int i;
 380
 381        vf_shift = vf & VFRE_MASK;
 382        reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
 383
 384        /* enable transmit for vf */
 385        reg = rd32(hw, TXGBE_POOLTXENA(reg_offset));
 386        reg |= (reg | (1 << vf_shift));
 387        wr32(hw, TXGBE_POOLTXENA(reg_offset), reg);
 388
 389        /* enable all queue drop for IOV */
 390        nb_q_per_pool = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
 391        for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
 392                txgbe_flush(hw);
 393                reg = 1 << (i % 32);
 394                wr32m(hw, TXGBE_QPRXDROP(i / 32), reg, reg);
 395        }
 396
 397        /* enable receive for vf */
 398        reg = rd32(hw, TXGBE_POOLRXENA(reg_offset));
 399        reg |= (reg | (1 << vf_shift));
 400        wr32(hw, TXGBE_POOLRXENA(reg_offset), reg);
 401
 402        txgbe_vf_reset_event(eth_dev, vf);
 403}
 404
 405static int
 406txgbe_disable_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf)
 407{
 408        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 409        uint32_t vmolr;
 410
 411        vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
 412
 413        PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
 414
 415        vmolr &= ~TXGBE_POOLETHCTL_MCP;
 416
 417        wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
 418
 419        return 0;
 420}
 421
 422static int
 423txgbe_vf_reset(struct rte_eth_dev *eth_dev, uint16_t vf, uint32_t *msgbuf)
 424{
 425        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 426        struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
 427        unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
 428        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 429        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
 430
 431        txgbe_vf_reset_msg(eth_dev, vf);
 432
 433        hw->mac.set_rar(hw, rar_entry, vf_mac, vf, true);
 434
 435        /* Disable multicast promiscuous at reset */
 436        txgbe_disable_vf_mc_promisc(eth_dev, vf);
 437
 438        /* reply to reset with ack and vf mac address */
 439        msgbuf[0] = TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK;
 440        rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
 441        /*
 442         * Piggyback the multicast filter type so VF can compute the
 443         * correct vectors
 444         */
 445        msgbuf[3] = hw->mac.mc_filter_type;
 446        txgbe_write_mbx(hw, msgbuf, TXGBE_VF_PERMADDR_MSG_LEN, vf);
 447
 448        return 0;
 449}
 450
 451static int
 452txgbe_vf_set_mac_addr(struct rte_eth_dev *eth_dev,
 453                uint32_t vf, uint32_t *msgbuf)
 454{
 455        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 456        struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
 457        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 458        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
 459        struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
 460
 461        if (rte_is_valid_assigned_ether_addr(ea)) {
 462                rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
 463                return hw->mac.set_rar(hw, rar_entry, new_mac, vf, true);
 464        }
 465        return -1;
 466}
 467
 468static int
 469txgbe_vf_set_multicast(struct rte_eth_dev *eth_dev,
 470                uint32_t vf, uint32_t *msgbuf)
 471{
 472        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 473        struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
 474        int nb_entries = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >>
 475                TXGBE_VT_MSGINFO_SHIFT;
 476        uint16_t *hash_list = (uint16_t *)&msgbuf[1];
 477        uint32_t mta_idx;
 478        uint32_t mta_shift;
 479        const uint32_t TXGBE_MTA_INDEX_MASK = 0x7F;
 480        const uint32_t TXGBE_MTA_BIT_SHIFT = 5;
 481        const uint32_t TXGBE_MTA_BIT_MASK = (0x1 << TXGBE_MTA_BIT_SHIFT) - 1;
 482        uint32_t reg_val;
 483        int i;
 484        u32 vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
 485
 486        /* Disable multicast promiscuous first */
 487        txgbe_disable_vf_mc_promisc(eth_dev, vf);
 488
 489        /* only so many hash values supported */
 490        nb_entries = RTE_MIN(nb_entries, TXGBE_MAX_VF_MC_ENTRIES);
 491
 492        /* store the mc entries  */
 493        vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
 494        for (i = 0; i < nb_entries; i++)
 495                vfinfo->vf_mc_hashes[i] = hash_list[i];
 496
 497        if (nb_entries == 0) {
 498                vmolr &= ~TXGBE_POOLETHCTL_MCHA;
 499                wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
 500                return 0;
 501        }
 502
 503        for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
 504                mta_idx = (vfinfo->vf_mc_hashes[i] >> TXGBE_MTA_BIT_SHIFT)
 505                                & TXGBE_MTA_INDEX_MASK;
 506                mta_shift = vfinfo->vf_mc_hashes[i] & TXGBE_MTA_BIT_MASK;
 507                reg_val = rd32(hw, TXGBE_MCADDRTBL(mta_idx));
 508                reg_val |= (1 << mta_shift);
 509                wr32(hw, TXGBE_MCADDRTBL(mta_idx), reg_val);
 510        }
 511
 512        vmolr |= TXGBE_POOLETHCTL_MCHA;
 513        wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
 514
 515        return 0;
 516}
 517
 518static int
 519txgbe_vf_set_vlan(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 520{
 521        int add, vid;
 522        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 523        struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
 524
 525        add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK)
 526                >> TXGBE_VT_MSGINFO_SHIFT;
 527        vid = TXGBE_PSRVLAN_VID(msgbuf[1]);
 528
 529        if (add)
 530                vfinfo[vf].vlan_count++;
 531        else if (vfinfo[vf].vlan_count)
 532                vfinfo[vf].vlan_count--;
 533        return hw->mac.set_vfta(hw, vid, vf, (bool)add, false);
 534}
 535
 536static int
 537txgbe_set_vf_lpe(struct rte_eth_dev *eth_dev,
 538                __rte_unused uint32_t vf, uint32_t *msgbuf)
 539{
 540        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 541        uint32_t max_frame = msgbuf[1];
 542        uint32_t max_frs;
 543
 544        if (max_frame < RTE_ETHER_MIN_LEN ||
 545                        max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
 546                return -1;
 547
 548        max_frs = rd32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK);
 549        if (max_frs < max_frame) {
 550                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
 551                        TXGBE_FRMSZ_MAX(max_frame));
 552        }
 553
 554        return 0;
 555}
 556
 557static int
 558txgbe_negotiate_vf_api(struct rte_eth_dev *eth_dev,
 559                uint32_t vf, uint32_t *msgbuf)
 560{
 561        uint32_t api_version = msgbuf[1];
 562        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
 563
 564        switch (api_version) {
 565        case txgbe_mbox_api_10:
 566        case txgbe_mbox_api_11:
 567        case txgbe_mbox_api_12:
 568        case txgbe_mbox_api_13:
 569                vfinfo[vf].api_version = (uint8_t)api_version;
 570                return 0;
 571        default:
 572                break;
 573        }
 574
 575        PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
 576                api_version, vf);
 577
 578        return -1;
 579}
 580
 581static int
 582txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
 583{
 584        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
 585        uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
 586        struct rte_eth_conf *eth_conf;
 587        struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
 588        u8 num_tcs;
 589        struct txgbe_hw *hw;
 590        u32 vmvir;
 591        u32 vlana;
 592        u32 vid;
 593        u32 user_priority;
 594
 595        /* Verify if the PF supports the mbox APIs version or not */
 596        switch (vfinfo[vf].api_version) {
 597        case txgbe_mbox_api_20:
 598        case txgbe_mbox_api_11:
 599        case txgbe_mbox_api_12:
 600        case txgbe_mbox_api_13:
 601                break;
 602        default:
 603                return -1;
 604        }
 605
 606        /* Notify VF of Rx and Tx queue number */
 607        msgbuf[TXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
 608        msgbuf[TXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
 609
 610        /* Notify VF of default queue */
 611        msgbuf[TXGBE_VF_DEF_QUEUE] = default_q;
 612
 613        /* Notify VF of number of DCB traffic classes */
 614        eth_conf = &eth_dev->data->dev_conf;
 615        switch (eth_conf->txmode.mq_mode) {
 616        case ETH_MQ_TX_NONE:
 617        case ETH_MQ_TX_DCB:
 618                PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 619                        ", but its tx mode = %d\n", vf,
 620                        eth_conf->txmode.mq_mode);
 621                return -1;
 622
 623        case ETH_MQ_TX_VMDQ_DCB:
 624                vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 625                switch (vmdq_dcb_tx_conf->nb_queue_pools) {
 626                case ETH_16_POOLS:
 627                        num_tcs = ETH_8_TCS;
 628                        break;
 629                case ETH_32_POOLS:
 630                        num_tcs = ETH_4_TCS;
 631                        break;
 632                default:
 633                        return -1;
 634                }
 635                break;
 636
 637        /* ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
 638        case ETH_MQ_TX_VMDQ_ONLY:
 639                hw = TXGBE_DEV_HW(eth_dev);
 640                vmvir = rd32(hw, TXGBE_POOLTAG(vf));
 641                vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
 642                vid = vmvir & TXGBE_POOLTAG_VTAG_MASK;
 643                user_priority =
 644                        TXGBD_POOLTAG_VTAG_UP(vmvir);
 645                if (vlana == TXGBE_POOLTAG_ACT_ALWAYS &&
 646                        (vid !=  0 || user_priority != 0))
 647                        num_tcs = 1;
 648                else
 649                        num_tcs = 0;
 650                break;
 651
 652        default:
 653                PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n",
 654                        eth_conf->txmode.mq_mode);
 655                return -1;
 656        }
 657        msgbuf[TXGBE_VF_TRANS_VLAN] = num_tcs;
 658
 659        return 0;
 660}
 661
 662static int
 663txgbe_set_vf_mc_promisc(struct rte_eth_dev *eth_dev,
 664                uint32_t vf, uint32_t *msgbuf)
 665{
 666        struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
 667        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 668        int xcast_mode = msgbuf[1];     /* msgbuf contains the flag to enable */
 669        u32 vmolr, fctrl, disable, enable;
 670
 671        switch (vfinfo[vf].api_version) {
 672        case txgbe_mbox_api_12:
 673                /* promisc introduced in 1.3 version */
 674                if (xcast_mode == TXGBEVF_XCAST_MODE_PROMISC)
 675                        return -EOPNOTSUPP;
 676                break;
 677                /* Fall threw */
 678        case txgbe_mbox_api_13:
 679                break;
 680        default:
 681                return -1;
 682        }
 683
 684        if (vfinfo[vf].xcast_mode == xcast_mode)
 685                goto out;
 686
 687        switch (xcast_mode) {
 688        case TXGBEVF_XCAST_MODE_NONE:
 689                disable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
 690                          TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
 691                          TXGBE_POOLETHCTL_VLP;
 692                enable = 0;
 693                break;
 694        case TXGBEVF_XCAST_MODE_MULTI:
 695                disable = TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
 696                          TXGBE_POOLETHCTL_VLP;
 697                enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA;
 698                break;
 699        case TXGBEVF_XCAST_MODE_ALLMULTI:
 700                disable = TXGBE_POOLETHCTL_UCP | TXGBE_POOLETHCTL_VLP;
 701                enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
 702                         TXGBE_POOLETHCTL_MCP;
 703                break;
 704        case TXGBEVF_XCAST_MODE_PROMISC:
 705                fctrl = rd32(hw, TXGBE_PSRCTL);
 706                if (!(fctrl & TXGBE_PSRCTL_UCP)) {
 707                        /* VF promisc requires PF in promisc */
 708                        PMD_DRV_LOG(ERR,
 709                               "Enabling VF promisc requires PF in promisc\n");
 710                        return -1;
 711                }
 712
 713                disable = 0;
 714                enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
 715                         TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
 716                         TXGBE_POOLETHCTL_VLP;
 717                break;
 718        default:
 719                return -1;
 720        }
 721
 722        vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
 723        vmolr &= ~disable;
 724        vmolr |= enable;
 725        wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
 726        vfinfo[vf].xcast_mode = xcast_mode;
 727
 728out:
 729        msgbuf[1] = xcast_mode;
 730
 731        return 0;
 732}
 733
 734static int
 735txgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 736{
 737        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 738        struct txgbe_vf_info *vf_info = *(TXGBE_DEV_VFDATA(dev));
 739        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
 740        struct rte_ether_addr *ea = (struct rte_ether_addr *)new_mac;
 741        int index = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >>
 742                    TXGBE_VT_MSGINFO_SHIFT;
 743
 744        if (index) {
 745                if (!rte_is_valid_assigned_ether_addr(ea)) {
 746                        PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
 747                        return -1;
 748                }
 749
 750                vf_info[vf].mac_count++;
 751
 752                hw->mac.set_rar(hw, vf_info[vf].mac_count,
 753                                new_mac, vf, true);
 754        } else {
 755                if (vf_info[vf].mac_count) {
 756                        hw->mac.clear_rar(hw, vf_info[vf].mac_count);
 757                        vf_info[vf].mac_count = 0;
 758                }
 759        }
 760        return 0;
 761}
 762
 763static int
 764txgbe_rcv_msg_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
 765{
 766        uint16_t mbx_size = TXGBE_P2VMBX_SIZE;
 767        uint16_t msg_size = TXGBE_VF_MSG_SIZE_DEFAULT;
 768        uint32_t msgbuf[TXGBE_P2VMBX_SIZE];
 769        int32_t retval;
 770        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 771        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
 772        struct rte_pmd_txgbe_mb_event_param ret_param;
 773
 774        retval = txgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 775        if (retval) {
 776                PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
 777                return retval;
 778        }
 779
 780        /* do nothing with the message already been processed */
 781        if (msgbuf[0] & (TXGBE_VT_MSGTYPE_ACK | TXGBE_VT_MSGTYPE_NACK))
 782                return retval;
 783
 784        /* flush the ack before we write any messages back */
 785        txgbe_flush(hw);
 786
 787        /**
 788         * initialise structure to send to user application
 789         * will return response from user in retval field
 790         */
 791        ret_param.retval = RTE_PMD_TXGBE_MB_EVENT_PROCEED;
 792        ret_param.vfid = vf;
 793        ret_param.msg_type = msgbuf[0] & 0xFFFF;
 794        ret_param.msg = (void *)msgbuf;
 795
 796        /* perform VF reset */
 797        if (msgbuf[0] == TXGBE_VF_RESET) {
 798                int ret = txgbe_vf_reset(eth_dev, vf, msgbuf);
 799
 800                vfinfo[vf].clear_to_send = true;
 801
 802                /* notify application about VF reset */
 803                rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
 804                                              &ret_param);
 805                return ret;
 806        }
 807
 808        /**
 809         * ask user application if we allowed to perform those functions
 810         * if we get ret_param.retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED
 811         * then business as usual,
 812         * if 0, do nothing and send ACK to VF
 813         * if ret_param.retval > 1, do nothing and send NAK to VF
 814         */
 815        rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
 816                                      &ret_param);
 817
 818        retval = ret_param.retval;
 819
 820        /* check & process VF to PF mailbox message */
 821        switch ((msgbuf[0] & 0xFFFF)) {
 822        case TXGBE_VF_SET_MAC_ADDR:
 823                if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
 824                        retval = txgbe_vf_set_mac_addr(eth_dev, vf, msgbuf);
 825                break;
 826        case TXGBE_VF_SET_MULTICAST:
 827                if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
 828                        retval = txgbe_vf_set_multicast(eth_dev, vf, msgbuf);
 829                break;
 830        case TXGBE_VF_SET_LPE:
 831                if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
 832                        retval = txgbe_set_vf_lpe(eth_dev, vf, msgbuf);
 833                break;
 834        case TXGBE_VF_SET_VLAN:
 835                if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
 836                        retval = txgbe_vf_set_vlan(eth_dev, vf, msgbuf);
 837                break;
 838        case TXGBE_VF_API_NEGOTIATE:
 839                retval = txgbe_negotiate_vf_api(eth_dev, vf, msgbuf);
 840                break;
 841        case TXGBE_VF_GET_QUEUES:
 842                retval = txgbe_get_vf_queues(eth_dev, vf, msgbuf);
 843                msg_size = TXGBE_VF_GET_QUEUE_MSG_SIZE;
 844                break;
 845        case TXGBE_VF_UPDATE_XCAST_MODE:
 846                if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
 847                        retval = txgbe_set_vf_mc_promisc(eth_dev, vf, msgbuf);
 848                break;
 849        case TXGBE_VF_SET_MACVLAN:
 850                if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
 851                        retval = txgbe_set_vf_macvlan_msg(eth_dev, vf, msgbuf);
 852                break;
 853        default:
 854                PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (uint32_t)msgbuf[0]);
 855                retval = TXGBE_ERR_MBX;
 856                break;
 857        }
 858
 859        /* response the VF according to the message process result */
 860        if (retval)
 861                msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK;
 862        else
 863                msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK;
 864
 865        msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS;
 866
 867        txgbe_write_mbx(hw, msgbuf, msg_size, vf);
 868
 869        return retval;
 870}
 871
 872static inline void
 873txgbe_rcv_ack_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
 874{
 875        uint32_t msg = TXGBE_VT_MSGTYPE_NACK;
 876        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 877        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
 878
 879        if (!vfinfo[vf].clear_to_send)
 880                txgbe_write_mbx(hw, &msg, 1, vf);
 881}
 882
 883void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
 884{
 885        uint16_t vf;
 886        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
 887
 888        for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
 889                /* check & process vf function level reset */
 890                if (!txgbe_check_for_rst(hw, vf))
 891                        txgbe_vf_reset_event(eth_dev, vf);
 892
 893                /* check & process vf mailbox messages */
 894                if (!txgbe_check_for_msg(hw, vf))
 895                        txgbe_rcv_msg_from_vf(eth_dev, vf);
 896
 897                /* check & process acks from vf */
 898                if (!txgbe_check_for_ack(hw, vf))
 899                        txgbe_rcv_ack_from_vf(eth_dev, vf);
 900        }
 901}
 902