dpdk/drivers/net/ixgbe/ixgbe_pf.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2016 Intel Corporation
   3 */
   4
   5#include <stdio.h>
   6#include <errno.h>
   7#include <stdint.h>
   8#include <stdlib.h>
   9#include <unistd.h>
  10#include <stdarg.h>
  11#include <inttypes.h>
  12
  13#include <rte_interrupts.h>
  14#include <rte_log.h>
  15#include <rte_debug.h>
  16#include <rte_eal.h>
  17#include <rte_ether.h>
  18#include <ethdev_driver.h>
  19#include <rte_memcpy.h>
  20#include <rte_malloc.h>
  21#include <rte_random.h>
  22
  23#include "base/ixgbe_common.h"
  24#include "ixgbe_ethdev.h"
  25#include "rte_pmd_ixgbe.h"
  26
  27#define IXGBE_MAX_VFTA     (128)
  28#define IXGBE_VF_MSG_SIZE_DEFAULT 1
  29#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
  30#define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
  31
  32static inline uint16_t
  33dev_num_vf(struct rte_eth_dev *eth_dev)
  34{
  35        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
  36
  37        return pci_dev->max_vfs;
  38}
  39
  40static inline
  41int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
  42{
  43        unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
  44        struct ixgbe_vf_info *vfinfo =
  45                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
  46        uint16_t vfn;
  47
  48        for (vfn = 0; vfn < vf_num; vfn++) {
  49                rte_eth_random_addr(vf_mac_addr);
  50                /* keep the random address as default */
  51                memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
  52                           RTE_ETHER_ADDR_LEN);
  53        }
  54
  55        return 0;
  56}
  57
  58static inline int
  59ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
  60{
  61        struct ixgbe_interrupt *intr =
  62                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
  63
  64        intr->mask |= IXGBE_EICR_MAILBOX;
  65
  66        return 0;
  67}
  68
  69int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
  70{
  71        struct ixgbe_vf_info **vfinfo =
  72                IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
  73        struct ixgbe_uta_info *uta_info =
  74        IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
  75        struct ixgbe_hw *hw =
  76                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
  77        uint16_t vf_num;
  78        uint8_t nb_queue;
  79        int ret = 0;
  80
  81        PMD_INIT_FUNC_TRACE();
  82
  83        RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
  84        vf_num = dev_num_vf(eth_dev);
  85        if (vf_num == 0)
  86                return ret;
  87
  88        *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
  89        if (*vfinfo == NULL) {
  90                PMD_INIT_LOG(ERR,
  91                        "Cannot allocate memory for private VF data");
  92                return -ENOMEM;
  93        }
  94
  95        ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
  96        if (ret) {
  97                PMD_INIT_LOG(ERR,
  98                        "failed to allocate switch domain for device %d", ret);
  99                rte_free(*vfinfo);
 100                *vfinfo = NULL;
 101                return ret;
 102        }
 103
 104        memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 105        hw->mac.mc_filter_type = 0;
 106
 107        if (vf_num >= RTE_ETH_32_POOLS) {
 108                nb_queue = 2;
 109                RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
 110        } else if (vf_num >= RTE_ETH_16_POOLS) {
 111                nb_queue = 4;
 112                RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
 113        } else {
 114                nb_queue = 8;
 115                RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
 116        }
 117
 118        RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
 119        RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 120        RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
 121
 122        ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
 123
 124        /* init_mailbox_params */
 125        hw->mbx.ops.init_params(hw);
 126
 127        /* set mb interrupt mask */
 128        ixgbe_mb_intr_setup(eth_dev);
 129
 130        return ret;
 131}
 132
 133void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
 134{
 135        struct ixgbe_vf_info **vfinfo;
 136        uint16_t vf_num;
 137        int ret;
 138
 139        PMD_INIT_FUNC_TRACE();
 140
 141        RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
 142        RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
 143        RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
 144        RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
 145
 146        vf_num = dev_num_vf(eth_dev);
 147        if (vf_num == 0)
 148                return;
 149
 150        vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
 151        if (*vfinfo == NULL)
 152                return;
 153
 154        ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
 155        if (ret)
 156                PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
 157
 158        rte_free(*vfinfo);
 159        *vfinfo = NULL;
 160}
 161
 162static void
 163ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
 164{
 165        struct ixgbe_hw *hw =
 166                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 167        struct ixgbe_filter_info *filter_info =
 168                IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
 169        uint16_t vf_num;
 170        int i;
 171        struct ixgbe_ethertype_filter ethertype_filter;
 172
 173        if (!hw->mac.ops.set_ethertype_anti_spoofing) {
 174                PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n");
 175                return;
 176        }
 177
 178        i = ixgbe_ethertype_filter_lookup(filter_info,
 179                                          IXGBE_ETHERTYPE_FLOW_CTRL);
 180        if (i >= 0) {
 181                PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n");
 182                return;
 183        }
 184
 185        ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
 186        ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
 187                                IXGBE_ETQF_TX_ANTISPOOF |
 188                                IXGBE_ETHERTYPE_FLOW_CTRL;
 189        ethertype_filter.etqs = 0;
 190        ethertype_filter.conf = TRUE;
 191        i = ixgbe_ethertype_filter_insert(filter_info,
 192                                          &ethertype_filter);
 193        if (i < 0) {
 194                PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n");
 195                return;
 196        }
 197
 198        IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
 199                        (IXGBE_ETQF_FILTER_EN |
 200                        IXGBE_ETQF_TX_ANTISPOOF |
 201                        IXGBE_ETHERTYPE_FLOW_CTRL));
 202
 203        vf_num = dev_num_vf(eth_dev);
 204        for (i = 0; i < vf_num; i++)
 205                hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
 206}
 207
 208int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 209{
 210        uint32_t vtctl, fcrth;
 211        uint32_t vfre_slot, vfre_offset;
 212        uint16_t vf_num;
 213        const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
 214        const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
 215        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 216        uint32_t gpie, gcr_ext;
 217        uint32_t vlanctrl;
 218        int i;
 219
 220        vf_num = dev_num_vf(eth_dev);
 221        if (vf_num == 0)
 222                return -1;
 223
 224        /* enable VMDq and set the default pool for PF */
 225        vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 226        vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
 227        vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
 228        vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
 229                << IXGBE_VT_CTL_POOL_SHIFT;
 230        vtctl |= IXGBE_VT_CTL_REPLEN;
 231        IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
 232
 233        vfre_offset = vf_num & VFRE_MASK;
 234        vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
 235
 236        /* Enable pools reserved to PF only */
 237        IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset);
 238        IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
 239        IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset);
 240        IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
 241
 242        /* PFDMA Tx General Switch Control Enables VMDQ loopback */
 243        IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 244
 245        /* clear VMDq map to perment rar 0 */
 246        hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
 247
 248        /* clear VMDq map to scan rar 127 */
 249        IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
 250        IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
 251
 252        /* set VMDq map to default PF pool */
 253        hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
 254
 255        /*
 256         * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
 257         */
 258        gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
 259        gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
 260
 261        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
 262        gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 263        gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 264
 265        switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
 266        case RTE_ETH_64_POOLS:
 267                gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
 268                gpie |= IXGBE_GPIE_VTMODE_64;
 269                break;
 270        case RTE_ETH_32_POOLS:
 271                gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
 272                gpie |= IXGBE_GPIE_VTMODE_32;
 273                break;
 274        case RTE_ETH_16_POOLS:
 275                gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
 276                gpie |= IXGBE_GPIE_VTMODE_16;
 277                break;
 278        }
 279
 280        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 281        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 282
 283        /*
 284         * enable vlan filtering and allow all vlan tags through
 285         */
 286        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 287        vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
 288        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 289
 290        /* VFTA - enable all vlan filters */
 291        for (i = 0; i < IXGBE_MAX_VFTA; i++)
 292                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
 293
 294        /* Enable MAC Anti-Spoofing */
 295        hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
 296
 297        /* set flow control threshold to max to avoid tx switch hang */
 298        for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
 299                IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
 300                fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
 301                IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
 302        }
 303
 304        ixgbe_add_tx_flow_control_drop_filter(eth_dev);
 305
 306        return 0;
 307}
 308
 309static void
 310set_rx_mode(struct rte_eth_dev *dev)
 311{
 312        struct rte_eth_dev_data *dev_data = dev->data;
 313        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 314        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
 315        uint16_t vfn = dev_num_vf(dev);
 316
 317        /* Check for Promiscuous and All Multicast modes */
 318        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 319
 320        /* set all bits that we expect to always be set */
 321        fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
 322        fctrl |= IXGBE_FCTRL_BAM;
 323
 324        /* clear the bits we are changing the status of */
 325        fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 326
 327        if (dev_data->promiscuous) {
 328                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 329                vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
 330        } else {
 331                if (dev_data->all_multicast) {
 332                        fctrl |= IXGBE_FCTRL_MPE;
 333                        vmolr |= IXGBE_VMOLR_MPE;
 334                } else {
 335                        vmolr |= IXGBE_VMOLR_ROMPE;
 336                }
 337        }
 338
 339        if (hw->mac.type != ixgbe_mac_82598EB) {
 340                vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
 341                         ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
 342                           IXGBE_VMOLR_ROPE);
 343                IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
 344        }
 345
 346        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 347
 348        ixgbe_vlan_hw_strip_config(dev);
 349}
 350
 351static inline void
 352ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
 353{
 354        struct ixgbe_hw *hw =
 355                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 356        struct ixgbe_vf_info *vfinfo =
 357                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
 358        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 359        uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 360
 361        vmolr |= (IXGBE_VMOLR_ROPE |
 362                        IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
 363        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 364
 365        IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 366
 367        /* reset multicast table array for vf */
 368        vfinfo[vf].num_vf_mc_hashes = 0;
 369
 370        /* reset rx mode */
 371        set_rx_mode(dev);
 372
 373        hw->mac.ops.clear_rar(hw, rar_entry);
 374}
 375
 376static inline void
 377ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
 378{
 379        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 380        uint32_t reg;
 381        uint32_t reg_offset, vf_shift;
 382        const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
 383        const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
 384        uint8_t  nb_q_per_pool;
 385        int i;
 386
 387        vf_shift = vf & VFRE_MASK;
 388        reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
 389
 390        /* enable transmit for vf */
 391        reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
 392        reg |= (reg | (1 << vf_shift));
 393        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 394
 395        /* enable all queue drop for IOV */
 396        nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
 397        for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
 398                IXGBE_WRITE_FLUSH(hw);
 399                reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
 400                reg |= i << IXGBE_QDE_IDX_SHIFT;
 401                IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
 402        }
 403
 404        /* enable receive for vf */
 405        reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 406        reg |= (reg | (1 << vf_shift));
 407        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 408
 409        /* Enable counting of spoofed packets in the SSVPC register */
 410        reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
 411        reg |= (1 << vf_shift);
 412        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 413
 414        ixgbe_vf_reset_event(dev, vf);
 415}
 416
 417static int
 418ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf)
 419{
 420        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 421        uint32_t vmolr;
 422
 423        vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 424
 425        PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
 426
 427        vmolr &= ~IXGBE_VMOLR_MPE;
 428
 429        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 430
 431        return 0;
 432}
 433
 434static int
 435ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
 436{
 437        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 438        struct ixgbe_vf_info *vfinfo =
 439                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
 440        unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
 441        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 442        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
 443
 444        ixgbe_vf_reset_msg(dev, vf);
 445
 446        hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
 447
 448        /* Disable multicast promiscuous at reset */
 449        ixgbe_disable_vf_mc_promisc(dev, vf);
 450
 451        /* reply to reset with ack and vf mac address */
 452        msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
 453        rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
 454        /*
 455         * Piggyback the multicast filter type so VF can compute the
 456         * correct vectors
 457         */
 458        msgbuf[3] = hw->mac.mc_filter_type;
 459        ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
 460
 461        return 0;
 462}
 463
 464static int
 465ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 466{
 467        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 468        struct ixgbe_vf_info *vfinfo =
 469                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
 470        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 471        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
 472
 473        if (rte_is_valid_assigned_ether_addr(
 474                        (struct rte_ether_addr *)new_mac)) {
 475                rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
 476                return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
 477        }
 478        return -1;
 479}
 480
 481static int
 482ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 483{
 484        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 485        struct ixgbe_vf_info *vfinfo =
 486                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
 487        int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
 488                IXGBE_VT_MSGINFO_SHIFT;
 489        uint16_t *hash_list = (uint16_t *)&msgbuf[1];
 490        uint32_t mta_idx;
 491        uint32_t mta_shift;
 492        const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
 493        const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
 494        const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
 495        uint32_t reg_val;
 496        int i;
 497        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 498
 499        /* Disable multicast promiscuous first */
 500        ixgbe_disable_vf_mc_promisc(dev, vf);
 501
 502        /* only so many hash values supported */
 503        nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
 504
 505        /* store the mc entries  */
 506        vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
 507        for (i = 0; i < nb_entries; i++) {
 508                vfinfo->vf_mc_hashes[i] = hash_list[i];
 509        }
 510
 511        if (nb_entries == 0) {
 512                vmolr &= ~IXGBE_VMOLR_ROMPE;
 513                IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 514                return 0;
 515        }
 516
 517        for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
 518                mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
 519                                & IXGBE_MTA_INDEX_MASK;
 520                mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
 521                reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
 522                reg_val |= (1 << mta_shift);
 523                IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
 524        }
 525
 526        vmolr |= IXGBE_VMOLR_ROMPE;
 527        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 528
 529        return 0;
 530}
 531
 532static int
 533ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 534{
 535        int add, vid;
 536        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 537        struct ixgbe_vf_info *vfinfo =
 538                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
 539
 540        add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
 541                >> IXGBE_VT_MSGINFO_SHIFT;
 542        vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
 543
 544        if (add)
 545                vfinfo[vf].vlan_count++;
 546        else if (vfinfo[vf].vlan_count)
 547                vfinfo[vf].vlan_count--;
 548        return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
 549}
 550
 551static int
 552ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 553{
 554        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 555        uint32_t max_frame = msgbuf[1];
 556        uint32_t max_frs;
 557        uint32_t hlreg0;
 558
 559        /* X540 and X550 support jumbo frames in IOV mode */
 560        if (hw->mac.type != ixgbe_mac_X540 &&
 561                hw->mac.type != ixgbe_mac_X550 &&
 562                hw->mac.type != ixgbe_mac_X550EM_x &&
 563                hw->mac.type != ixgbe_mac_X550EM_a) {
 564                struct ixgbe_vf_info *vfinfo =
 565                        *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
 566
 567                switch (vfinfo[vf].api_version) {
 568                case ixgbe_mbox_api_11:
 569                case ixgbe_mbox_api_12:
 570                case ixgbe_mbox_api_13:
 571                         /**
 572                          * Version 1.1&1.2&1.3 supports jumbo frames on VFs
 573                          * if PF has jumbo frames enabled which means legacy
 574                          * VFs are disabled.
 575                          */
 576                        if (dev->data->mtu > RTE_ETHER_MTU)
 577                                break;
 578                        /* fall through */
 579                default:
 580                        /**
 581                         * If the PF or VF are running w/ jumbo frames enabled,
 582                         * we return -1 as we cannot support jumbo frames on
 583                         * legacy VFs.
 584                         */
 585                        if (max_frame > IXGBE_ETH_MAX_LEN ||
 586                                        dev->data->mtu > RTE_ETHER_MTU)
 587                                return -1;
 588                        break;
 589                }
 590        }
 591
 592        if (max_frame < RTE_ETHER_MIN_LEN ||
 593                        max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
 594                return -1;
 595
 596        max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
 597                   IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
 598        if (max_frs < max_frame) {
 599                hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 600                if (max_frame > IXGBE_ETH_MAX_LEN)
 601                        hlreg0 |= IXGBE_HLREG0_JUMBOEN;
 602                else
 603                        hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 604                IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 605
 606                max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
 607                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
 608        }
 609
 610        return 0;
 611}
 612
 613static int
 614ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 615{
 616        uint32_t api_version = msgbuf[1];
 617        struct ixgbe_vf_info *vfinfo =
 618                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
 619
 620        switch (api_version) {
 621        case ixgbe_mbox_api_10:
 622        case ixgbe_mbox_api_11:
 623        case ixgbe_mbox_api_12:
 624        case ixgbe_mbox_api_13:
 625                vfinfo[vf].api_version = (uint8_t)api_version;
 626                return 0;
 627        default:
 628                break;
 629        }
 630
 631        PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
 632                api_version, vf);
 633
 634        return -1;
 635}
 636
 637static int
 638ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 639{
 640        struct ixgbe_vf_info *vfinfo =
 641                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
 642        uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
 643        struct rte_eth_conf *eth_conf;
 644        struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
 645        u8 num_tcs;
 646        struct ixgbe_hw *hw;
 647        u32 vmvir;
 648#define IXGBE_VMVIR_VLANA_MASK          0xC0000000
 649#define IXGBE_VMVIR_VLAN_VID_MASK       0x00000FFF
 650#define IXGBE_VMVIR_VLAN_UP_MASK        0x0000E000
 651#define VLAN_PRIO_SHIFT                 13
 652        u32 vlana;
 653        u32 vid;
 654        u32 user_priority;
 655
 656        /* Verify if the PF supports the mbox APIs version or not */
 657        switch (vfinfo[vf].api_version) {
 658        case ixgbe_mbox_api_20:
 659        case ixgbe_mbox_api_11:
 660        case ixgbe_mbox_api_12:
 661        case ixgbe_mbox_api_13:
 662                break;
 663        default:
 664                return -1;
 665        }
 666
 667        /* Notify VF of Rx and Tx queue number */
 668        msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
 669        msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
 670
 671        /* Notify VF of default queue */
 672        msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
 673
 674        /* Notify VF of number of DCB traffic classes */
 675        eth_conf = &dev->data->dev_conf;
 676        switch (eth_conf->txmode.mq_mode) {
 677        case RTE_ETH_MQ_TX_NONE:
 678        case RTE_ETH_MQ_TX_DCB:
 679                PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
 680                        ", but its tx mode = %d\n", vf,
 681                        eth_conf->txmode.mq_mode);
 682                return -1;
 683
 684        case RTE_ETH_MQ_TX_VMDQ_DCB:
 685                vmdq_dcb_tx_conf = &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 686                switch (vmdq_dcb_tx_conf->nb_queue_pools) {
 687                case RTE_ETH_16_POOLS:
 688                        num_tcs = RTE_ETH_8_TCS;
 689                        break;
 690                case RTE_ETH_32_POOLS:
 691                        num_tcs = RTE_ETH_4_TCS;
 692                        break;
 693                default:
 694                        return -1;
 695                }
 696                break;
 697
 698        /* RTE_ETH_MQ_TX_VMDQ_ONLY,  DCB not enabled */
 699        case RTE_ETH_MQ_TX_VMDQ_ONLY:
 700                hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 701                vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
 702                vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
 703                vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK;
 704                user_priority =
 705                        (vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT;
 706                if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) &&
 707                        ((vid !=  0) || (user_priority != 0)))
 708                        num_tcs = 1;
 709                else
 710                        num_tcs = 0;
 711                break;
 712
 713        default:
 714                PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n",
 715                        eth_conf->txmode.mq_mode);
 716                return -1;
 717        }
 718        msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
 719
 720        return 0;
 721}
 722
 723static int
 724ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 725{
 726        struct ixgbe_vf_info *vfinfo =
 727                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
 728        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 729        int xcast_mode = msgbuf[1];     /* msgbuf contains the flag to enable */
 730        u32 vmolr, fctrl, disable, enable;
 731
 732        switch (vfinfo[vf].api_version) {
 733        case ixgbe_mbox_api_12:
 734                /* promisc introduced in 1.3 version */
 735                if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
 736                        return -EOPNOTSUPP;
 737                break;
 738                /* Fall threw */
 739        case ixgbe_mbox_api_13:
 740                break;
 741        default:
 742                return -1;
 743        }
 744
 745        if (vfinfo[vf].xcast_mode == xcast_mode)
 746                goto out;
 747
 748        switch (xcast_mode) {
 749        case IXGBEVF_XCAST_MODE_NONE:
 750                disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
 751                          IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
 752                enable = 0;
 753                break;
 754        case IXGBEVF_XCAST_MODE_MULTI:
 755                disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
 756                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
 757                break;
 758        case IXGBEVF_XCAST_MODE_ALLMULTI:
 759                disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
 760                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
 761                break;
 762        case IXGBEVF_XCAST_MODE_PROMISC:
 763                if (hw->mac.type <= ixgbe_mac_82599EB)
 764                        return -1;
 765
 766                fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 767                if (!(fctrl & IXGBE_FCTRL_UPE)) {
 768                        /* VF promisc requires PF in promisc */
 769                        PMD_DRV_LOG(ERR,
 770                               "Enabling VF promisc requires PF in promisc\n");
 771                        return -1;
 772                }
 773
 774                disable = 0;
 775                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
 776                         IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
 777                break;
 778        default:
 779                return -1;
 780        }
 781
 782        vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 783        vmolr &= ~disable;
 784        vmolr |= enable;
 785        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 786        vfinfo[vf].xcast_mode = xcast_mode;
 787
 788out:
 789        msgbuf[1] = xcast_mode;
 790
 791        return 0;
 792}
 793
 794static int
 795ixgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 796{
 797        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 798        struct ixgbe_vf_info *vf_info =
 799                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
 800        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
 801        int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
 802                    IXGBE_VT_MSGINFO_SHIFT;
 803
 804        if (index) {
 805                if (!rte_is_valid_assigned_ether_addr(
 806                        (struct rte_ether_addr *)new_mac)) {
 807                        PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
 808                        return -1;
 809                }
 810
 811                vf_info[vf].mac_count++;
 812
 813                hw->mac.ops.set_rar(hw, vf_info[vf].mac_count,
 814                                new_mac, vf, IXGBE_RAH_AV);
 815        } else {
 816                if (vf_info[vf].mac_count) {
 817                        hw->mac.ops.clear_rar(hw, vf_info[vf].mac_count);
 818                        vf_info[vf].mac_count = 0;
 819                }
 820        }
 821        return 0;
 822}
 823
 824static int
 825ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 826{
 827        uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
 828        uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
 829        uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
 830        int32_t retval;
 831        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 832        struct ixgbe_vf_info *vfinfo =
 833                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
 834        struct rte_pmd_ixgbe_mb_event_param ret_param;
 835
 836        retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 837        if (retval) {
 838                PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
 839                return retval;
 840        }
 841
 842        /* do nothing with the message already been processed */
 843        if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
 844                return retval;
 845
 846        /* flush the ack before we write any messages back */
 847        IXGBE_WRITE_FLUSH(hw);
 848
 849        /**
 850         * initialise structure to send to user application
 851         * will return response from user in retval field
 852         */
 853        ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
 854        ret_param.vfid = vf;
 855        ret_param.msg_type = msgbuf[0] & 0xFFFF;
 856        ret_param.msg = (void *)msgbuf;
 857
 858        /* perform VF reset */
 859        if (msgbuf[0] == IXGBE_VF_RESET) {
 860                int ret = ixgbe_vf_reset(dev, vf, msgbuf);
 861
 862                vfinfo[vf].clear_to_send = true;
 863
 864                /* notify application about VF reset */
 865                rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
 866                                              &ret_param);
 867                return ret;
 868        }
 869
 870        /**
 871         * ask user application if we allowed to perform those functions
 872         * if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
 873         * then business as usual,
 874         * if 0, do nothing and send ACK to VF
 875         * if ret_param.retval > 1, do nothing and send NAK to VF
 876         */
 877        rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
 878
 879        retval = ret_param.retval;
 880
 881        /* check & process VF to PF mailbox message */
 882        switch ((msgbuf[0] & 0xFFFF)) {
 883        case IXGBE_VF_SET_MAC_ADDR:
 884                if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
 885                        retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
 886                break;
 887        case IXGBE_VF_SET_MULTICAST:
 888                if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
 889                        retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
 890                break;
 891        case IXGBE_VF_SET_LPE:
 892                if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
 893                        retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
 894                break;
 895        case IXGBE_VF_SET_VLAN:
 896                if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
 897                        retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
 898                break;
 899        case IXGBE_VF_API_NEGOTIATE:
 900                retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
 901                break;
 902        case IXGBE_VF_GET_QUEUES:
 903                retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
 904                msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
 905                break;
 906        case IXGBE_VF_UPDATE_XCAST_MODE:
 907                if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
 908                        retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
 909                break;
 910        case IXGBE_VF_SET_MACVLAN:
 911                if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
 912                        retval = ixgbe_set_vf_macvlan_msg(dev, vf, msgbuf);
 913                break;
 914        default:
 915                PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
 916                retval = IXGBE_ERR_MBX;
 917                break;
 918        }
 919
 920        /* response the VF according to the message process result */
 921        if (retval)
 922                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 923        else
 924                msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
 925
 926        msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
 927
 928        ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
 929
 930        return retval;
 931}
 932
 933static inline void
 934ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 935{
 936        uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
 937        struct ixgbe_hw *hw =
 938                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 939        struct ixgbe_vf_info *vfinfo =
 940                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
 941
 942        if (!vfinfo[vf].clear_to_send)
 943                ixgbe_write_mbx(hw, &msg, 1, vf);
 944}
 945
 946void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
 947{
 948        uint16_t vf;
 949        struct ixgbe_hw *hw =
 950                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 951
 952        for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
 953                /* check & process vf function level reset */
 954                if (!ixgbe_check_for_rst(hw, vf))
 955                        ixgbe_vf_reset_event(eth_dev, vf);
 956
 957                /* check & process vf mailbox messages */
 958                if (!ixgbe_check_for_msg(hw, vf))
 959                        ixgbe_rcv_msg_from_vf(eth_dev, vf);
 960
 961                /* check & process acks from vf */
 962                if (!ixgbe_check_for_ack(hw, vf))
 963                        ixgbe_rcv_ack_from_vf(eth_dev, vf);
 964        }
 965}
 966