linux/drivers/net/ethernet/intel/ixgbevf/vf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#include "vf.h"
   5#include "ixgbevf.h"
   6
   7/* On Hyper-V, to reset, we need to read from this offset
   8 * from the PCI config space. This is the mechanism used on
   9 * Hyper-V to support PF/VF communication.
  10 */
  11#define IXGBE_HV_RESET_OFFSET           0x201
  12
  13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  14                                             u32 *retmsg, u16 size)
  15{
  16        struct ixgbe_mbx_info *mbx = &hw->mbx;
  17        s32 retval = mbx->ops.write_posted(hw, msg, size);
  18
  19        if (retval)
  20                return retval;
  21
  22        return mbx->ops.read_posted(hw, retmsg, size);
  23}
  24
  25/**
  26 *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  27 *  @hw: pointer to hardware structure
  28 *
  29 *  Starts the hardware by filling the bus info structure and media type, clears
  30 *  all on chip counters, initializes receive address registers, multicast
  31 *  table, VLAN filter table, calls routine to set up link and flow control
  32 *  settings, and leaves transmit and receive units disabled and uninitialized
  33 **/
  34static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  35{
  36        /* Clear adapter stopped flag */
  37        hw->adapter_stopped = false;
  38
  39        return 0;
  40}
  41
  42/**
  43 *  ixgbevf_init_hw_vf - virtual function hardware initialization
  44 *  @hw: pointer to hardware structure
  45 *
  46 *  Initialize the hardware by resetting the hardware and then starting
  47 *  the hardware
  48 **/
  49static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  50{
  51        s32 status = hw->mac.ops.start_hw(hw);
  52
  53        hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  54
  55        return status;
  56}
  57
  58/**
  59 *  ixgbevf_reset_hw_vf - Performs hardware reset
  60 *  @hw: pointer to hardware structure
  61 *
  62 *  Resets the hardware by resetting the transmit and receive units, masks and
  63 *  clears all interrupts.
  64 **/
  65static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  66{
  67        struct ixgbe_mbx_info *mbx = &hw->mbx;
  68        u32 timeout = IXGBE_VF_INIT_TIMEOUT;
  69        s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
  70        u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  71        u8 *addr = (u8 *)(&msgbuf[1]);
  72
  73        /* Call adapter stop to disable tx/rx and clear interrupts */
  74        hw->mac.ops.stop_adapter(hw);
  75
  76        /* reset the api version */
  77        hw->api_version = ixgbe_mbox_api_10;
  78
  79        IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
  80        IXGBE_WRITE_FLUSH(hw);
  81
  82        /* we cannot reset while the RSTI / RSTD bits are asserted */
  83        while (!mbx->ops.check_for_rst(hw) && timeout) {
  84                timeout--;
  85                udelay(5);
  86        }
  87
  88        if (!timeout)
  89                return IXGBE_ERR_RESET_FAILED;
  90
  91        /* mailbox timeout can now become active */
  92        mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
  93
  94        msgbuf[0] = IXGBE_VF_RESET;
  95        mbx->ops.write_posted(hw, msgbuf, 1);
  96
  97        mdelay(10);
  98
  99        /* set our "perm_addr" based on info provided by PF
 100         * also set up the mc_filter_type which is piggy backed
 101         * on the mac address in word 3
 102         */
 103        ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
 104        if (ret_val)
 105                return ret_val;
 106
 107        /* New versions of the PF may NACK the reset return message
 108         * to indicate that no MAC address has yet been assigned for
 109         * the VF.
 110         */
 111        if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
 112            msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
 113                return IXGBE_ERR_INVALID_MAC_ADDR;
 114
 115        if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
 116                ether_addr_copy(hw->mac.perm_addr, addr);
 117
 118        hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 119
 120        return 0;
 121}
 122
 123/**
 124 * Hyper-V variant; the VF/PF communication is through the PCI
 125 * config space.
 126 * @hw: pointer to private hardware struct
 127 */
 128static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
 129{
 130#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
 131        struct ixgbevf_adapter *adapter = hw->back;
 132        int i;
 133
 134        for (i = 0; i < 6; i++)
 135                pci_read_config_byte(adapter->pdev,
 136                                     (i + IXGBE_HV_RESET_OFFSET),
 137                                     &hw->mac.perm_addr[i]);
 138        return 0;
 139#else
 140        pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
 141        return -EOPNOTSUPP;
 142#endif
 143}
 144
 145/**
 146 *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
 147 *  @hw: pointer to hardware structure
 148 *
 149 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 150 *  disables transmit and receive units. The adapter_stopped flag is used by
 151 *  the shared code and drivers to determine if the adapter is in a stopped
 152 *  state and should not touch the hardware.
 153 **/
 154static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
 155{
 156        u32 number_of_queues;
 157        u32 reg_val;
 158        u16 i;
 159
 160        /* Set the adapter_stopped flag so other driver functions stop touching
 161         * the hardware
 162         */
 163        hw->adapter_stopped = true;
 164
 165        /* Disable the receive unit by stopped each queue */
 166        number_of_queues = hw->mac.max_rx_queues;
 167        for (i = 0; i < number_of_queues; i++) {
 168                reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
 169                if (reg_val & IXGBE_RXDCTL_ENABLE) {
 170                        reg_val &= ~IXGBE_RXDCTL_ENABLE;
 171                        IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
 172                }
 173        }
 174
 175        IXGBE_WRITE_FLUSH(hw);
 176
 177        /* Clear interrupt mask to stop from interrupts being generated */
 178        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 179
 180        /* Clear any pending interrupts */
 181        IXGBE_READ_REG(hw, IXGBE_VTEICR);
 182
 183        /* Disable the transmit unit.  Each queue must be disabled. */
 184        number_of_queues = hw->mac.max_tx_queues;
 185        for (i = 0; i < number_of_queues; i++) {
 186                reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
 187                if (reg_val & IXGBE_TXDCTL_ENABLE) {
 188                        reg_val &= ~IXGBE_TXDCTL_ENABLE;
 189                        IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
 190                }
 191        }
 192
 193        return 0;
 194}
 195
 196/**
 197 *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
 198 *  @hw: pointer to hardware structure
 199 *  @mc_addr: the multicast address
 200 *
 201 *  Extracts the 12 bits, from a multicast address, to determine which
 202 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
 203 *  incoming Rx multicast addresses, to determine the bit-vector to check in
 204 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
 205 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
 206 *  to mc_filter_type.
 207 **/
 208static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
 209{
 210        u32 vector = 0;
 211
 212        switch (hw->mac.mc_filter_type) {
 213        case 0:   /* use bits [47:36] of the address */
 214                vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
 215                break;
 216        case 1:   /* use bits [46:35] of the address */
 217                vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
 218                break;
 219        case 2:   /* use bits [45:34] of the address */
 220                vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
 221                break;
 222        case 3:   /* use bits [43:32] of the address */
 223                vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
 224                break;
 225        default:  /* Invalid mc_filter_type */
 226                break;
 227        }
 228
 229        /* vector can only be 12-bits or boundary will be exceeded */
 230        vector &= 0xFFF;
 231        return vector;
 232}
 233
 234/**
 235 *  ixgbevf_get_mac_addr_vf - Read device MAC address
 236 *  @hw: pointer to the HW structure
 237 *  @mac_addr: pointer to storage for retrieved MAC address
 238 **/
 239static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 240{
 241        ether_addr_copy(mac_addr, hw->mac.perm_addr);
 242
 243        return 0;
 244}
 245
 246static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 247{
 248        u32 msgbuf[3], msgbuf_chk;
 249        u8 *msg_addr = (u8 *)(&msgbuf[1]);
 250        s32 ret_val;
 251
 252        memset(msgbuf, 0, sizeof(msgbuf));
 253        /* If index is one then this is the start of a new list and needs
 254         * indication to the PF so it can do it's own list management.
 255         * If it is zero then that tells the PF to just clear all of
 256         * this VF's macvlans and there is no new list.
 257         */
 258        msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
 259        msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
 260        msgbuf_chk = msgbuf[0];
 261
 262        if (addr)
 263                ether_addr_copy(msg_addr, addr);
 264
 265        ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 266                                             ARRAY_SIZE(msgbuf));
 267        if (!ret_val) {
 268                msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 269
 270                if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
 271                        return -ENOMEM;
 272        }
 273
 274        return ret_val;
 275}
 276
 277static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
 278{
 279        return -EOPNOTSUPP;
 280}
 281
 282/**
 283 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
 284 * @hw: pointer to hardware structure
 285 * @reta: buffer to fill with RETA contents.
 286 * @num_rx_queues: Number of Rx queues configured for this port
 287 *
 288 * The "reta" buffer should be big enough to contain 32 registers.
 289 *
 290 * Returns: 0 on success.
 291 *          if API doesn't support this operation - (-EOPNOTSUPP).
 292 */
 293int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
 294{
 295        int err, i, j;
 296        u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 297        u32 *hw_reta = &msgbuf[1];
 298        u32 mask = 0;
 299
 300        /* We have to use a mailbox for 82599 and x540 devices only.
 301         * For these devices RETA has 128 entries.
 302         * Also these VFs support up to 4 RSS queues. Therefore PF will compress
 303         * 16 RETA entries in each DWORD giving 2 bits to each entry.
 304         */
 305        int dwords = IXGBEVF_82599_RETA_SIZE / 16;
 306
 307        /* We support the RSS querying for 82599 and x540 devices only.
 308         * Thus return an error if API doesn't support RETA querying or querying
 309         * is not supported for this device type.
 310         */
 311        switch (hw->api_version) {
 312        case ixgbe_mbox_api_14:
 313        case ixgbe_mbox_api_13:
 314        case ixgbe_mbox_api_12:
 315                if (hw->mac.type < ixgbe_mac_X550_vf)
 316                        break;
 317                /* fall through */
 318        default:
 319                return -EOPNOTSUPP;
 320        }
 321
 322        msgbuf[0] = IXGBE_VF_GET_RETA;
 323
 324        err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 325
 326        if (err)
 327                return err;
 328
 329        err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
 330
 331        if (err)
 332                return err;
 333
 334        msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 335
 336        /* If the operation has been refused by a PF return -EPERM */
 337        if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
 338                return -EPERM;
 339
 340        /* If we didn't get an ACK there must have been
 341         * some sort of mailbox error so we should treat it
 342         * as such.
 343         */
 344        if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
 345                return IXGBE_ERR_MBX;
 346
 347        /* ixgbevf doesn't support more than 2 queues at the moment */
 348        if (num_rx_queues > 1)
 349                mask = 0x1;
 350
 351        for (i = 0; i < dwords; i++)
 352                for (j = 0; j < 16; j++)
 353                        reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
 354
 355        return 0;
 356}
 357
 358/**
 359 * ixgbevf_get_rss_key_locked - get the RSS Random Key
 360 * @hw: pointer to the HW structure
 361 * @rss_key: buffer to fill with RSS Hash Key contents.
 362 *
 363 * The "rss_key" buffer should be big enough to contain 10 registers.
 364 *
 365 * Returns: 0 on success.
 366 *          if API doesn't support this operation - (-EOPNOTSUPP).
 367 */
 368int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
 369{
 370        int err;
 371        u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 372
 373        /* We currently support the RSS Random Key retrieval for 82599 and x540
 374         * devices only.
 375         *
 376         * Thus return an error if API doesn't support RSS Random Key retrieval
 377         * or if the operation is not supported for this device type.
 378         */
 379        switch (hw->api_version) {
 380        case ixgbe_mbox_api_14:
 381        case ixgbe_mbox_api_13:
 382        case ixgbe_mbox_api_12:
 383                if (hw->mac.type < ixgbe_mac_X550_vf)
 384                        break;
 385                /* fall through */
 386        default:
 387                return -EOPNOTSUPP;
 388        }
 389
 390        msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
 391        err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
 392
 393        if (err)
 394                return err;
 395
 396        err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
 397
 398        if (err)
 399                return err;
 400
 401        msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 402
 403        /* If the operation has been refused by a PF return -EPERM */
 404        if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
 405                return -EPERM;
 406
 407        /* If we didn't get an ACK there must have been
 408         * some sort of mailbox error so we should treat it
 409         * as such.
 410         */
 411        if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
 412                return IXGBE_ERR_MBX;
 413
 414        memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
 415
 416        return 0;
 417}
 418
 419/**
 420 *  ixgbevf_set_rar_vf - set device MAC address
 421 *  @hw: pointer to hardware structure
 422 *  @index: Receive address register to write
 423 *  @addr: Address to put into receive address register
 424 *  @vmdq: Unused in this implementation
 425 **/
 426static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 427                              u32 vmdq)
 428{
 429        u32 msgbuf[3];
 430        u8 *msg_addr = (u8 *)(&msgbuf[1]);
 431        s32 ret_val;
 432
 433        memset(msgbuf, 0, sizeof(msgbuf));
 434        msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
 435        ether_addr_copy(msg_addr, addr);
 436
 437        ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 438                                             ARRAY_SIZE(msgbuf));
 439        msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 440
 441        /* if nacked the address was rejected, use "perm_addr" */
 442        if (!ret_val &&
 443            (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
 444                ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
 445                return IXGBE_ERR_MBX;
 446        }
 447
 448        return ret_val;
 449}
 450
 451/**
 452 *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
 453 *  @hw: pointer to hardware structure
 454 *  @index: Receive address register to write
 455 *  @addr: Address to put into receive address register
 456 *  @vmdq: Unused in this implementation
 457 *
 458 * We don't really allow setting the device MAC address. However,
 459 * if the address being set is the permanent MAC address we will
 460 * permit that.
 461 **/
 462static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 463                                 u32 vmdq)
 464{
 465        if (ether_addr_equal(addr, hw->mac.perm_addr))
 466                return 0;
 467
 468        return -EOPNOTSUPP;
 469}
 470
 471/**
 472 *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
 473 *  @hw: pointer to the HW structure
 474 *  @netdev: pointer to net device structure
 475 *
 476 *  Updates the Multicast Table Array.
 477 **/
 478static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 479                                          struct net_device *netdev)
 480{
 481        struct netdev_hw_addr *ha;
 482        u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 483        u16 *vector_list = (u16 *)&msgbuf[1];
 484        u32 cnt, i;
 485
 486        /* Each entry in the list uses 1 16 bit word.  We have 30
 487         * 16 bit words available in our HW msg buffer (minus 1 for the
 488         * msg type).  That's 30 hash values if we pack 'em right.  If
 489         * there are more than 30 MC addresses to add then punt the
 490         * extras for now and then add code to handle more than 30 later.
 491         * It would be unusual for a server to request that many multi-cast
 492         * addresses except for in large enterprise network environments.
 493         */
 494
 495        cnt = netdev_mc_count(netdev);
 496        if (cnt > 30)
 497                cnt = 30;
 498        msgbuf[0] = IXGBE_VF_SET_MULTICAST;
 499        msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
 500
 501        i = 0;
 502        netdev_for_each_mc_addr(ha, netdev) {
 503                if (i == cnt)
 504                        break;
 505                if (is_link_local_ether_addr(ha->addr))
 506                        continue;
 507
 508                vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
 509        }
 510
 511        ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
 512
 513        return 0;
 514}
 515
 516/**
 517 * Hyper-V variant - just a stub.
 518 * @hw: unused
 519 * @netdev: unused
 520 */
 521static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 522                                             struct net_device *netdev)
 523{
 524        return -EOPNOTSUPP;
 525}
 526
 527/**
 528 *  ixgbevf_update_xcast_mode - Update Multicast mode
 529 *  @hw: pointer to the HW structure
 530 *  @xcast_mode: new multicast mode
 531 *
 532 *  Updates the Multicast Mode of VF.
 533 **/
 534static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 535{
 536        u32 msgbuf[2];
 537        s32 err;
 538
 539        switch (hw->api_version) {
 540        case ixgbe_mbox_api_12:
 541                /* promisc introduced in 1.3 version */
 542                if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
 543                        return -EOPNOTSUPP;
 544                /* Fall threw */
 545        case ixgbe_mbox_api_14:
 546        case ixgbe_mbox_api_13:
 547                break;
 548        default:
 549                return -EOPNOTSUPP;
 550        }
 551
 552        msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
 553        msgbuf[1] = xcast_mode;
 554
 555        err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 556                                         ARRAY_SIZE(msgbuf));
 557        if (err)
 558                return err;
 559
 560        msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 561        if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
 562                return -EPERM;
 563
 564        return 0;
 565}
 566
 567/**
 568 * Hyper-V variant - just a stub.
 569 * @hw: unused
 570 * @xcast_mode: unused
 571 */
 572static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 573{
 574        return -EOPNOTSUPP;
 575}
 576
 577/**
 578 *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
 579 *  @hw: pointer to the HW structure
 580 *  @vlan: 12 bit VLAN ID
 581 *  @vind: unused by VF drivers
 582 *  @vlan_on: if true then set bit, else clear bit
 583 **/
 584static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 585                               bool vlan_on)
 586{
 587        u32 msgbuf[2];
 588        s32 err;
 589
 590        msgbuf[0] = IXGBE_VF_SET_VLAN;
 591        msgbuf[1] = vlan;
 592        /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
 593        msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 594
 595        err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 596                                         ARRAY_SIZE(msgbuf));
 597        if (err)
 598                goto mbx_err;
 599
 600        /* remove extra bits from the message */
 601        msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 602        msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
 603
 604        if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
 605                err = IXGBE_ERR_INVALID_ARGUMENT;
 606
 607mbx_err:
 608        return err;
 609}
 610
 611/**
 612 * Hyper-V variant - just a stub.
 613 * @hw: unused
 614 * @vlan: unused
 615 * @vind: unused
 616 * @vlan_on: unused
 617 */
 618static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 619                                  bool vlan_on)
 620{
 621        return -EOPNOTSUPP;
 622}
 623
 624/**
 625 *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
 626 *  @hw: pointer to hardware structure
 627 *  @speed: Unused in this implementation
 628 *  @autoneg: Unused in this implementation
 629 *  @autoneg_wait_to_complete: Unused in this implementation
 630 *
 631 *  Do nothing and return success.  VF drivers are not allowed to change
 632 *  global settings.  Maintained for driver compatibility.
 633 **/
 634static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
 635                                     ixgbe_link_speed speed, bool autoneg,
 636                                     bool autoneg_wait_to_complete)
 637{
 638        return 0;
 639}
 640
 641/**
 642 *  ixgbevf_check_mac_link_vf - Get link/speed status
 643 *  @hw: pointer to hardware structure
 644 *  @speed: pointer to link speed
 645 *  @link_up: true is link is up, false otherwise
 646 *  @autoneg_wait_to_complete: unused
 647 *
 648 *  Reads the links register to determine if link is up and the current speed
 649 **/
 650static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
 651                                     ixgbe_link_speed *speed,
 652                                     bool *link_up,
 653                                     bool autoneg_wait_to_complete)
 654{
 655        struct ixgbe_mbx_info *mbx = &hw->mbx;
 656        struct ixgbe_mac_info *mac = &hw->mac;
 657        s32 ret_val = 0;
 658        u32 links_reg;
 659        u32 in_msg = 0;
 660
 661        /* If we were hit with a reset drop the link */
 662        if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 663                mac->get_link_status = true;
 664
 665        if (!mac->get_link_status)
 666                goto out;
 667
 668        /* if link status is down no point in checking to see if pf is up */
 669        links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 670        if (!(links_reg & IXGBE_LINKS_UP))
 671                goto out;
 672
 673        /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 674         * before the link status is correct
 675         */
 676        if (mac->type == ixgbe_mac_82599_vf) {
 677                int i;
 678
 679                for (i = 0; i < 5; i++) {
 680                        udelay(100);
 681                        links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 682
 683                        if (!(links_reg & IXGBE_LINKS_UP))
 684                                goto out;
 685                }
 686        }
 687
 688        switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 689        case IXGBE_LINKS_SPEED_10G_82599:
 690                *speed = IXGBE_LINK_SPEED_10GB_FULL;
 691                break;
 692        case IXGBE_LINKS_SPEED_1G_82599:
 693                *speed = IXGBE_LINK_SPEED_1GB_FULL;
 694                break;
 695        case IXGBE_LINKS_SPEED_100_82599:
 696                *speed = IXGBE_LINK_SPEED_100_FULL;
 697                break;
 698        }
 699
 700        /* if the read failed it could just be a mailbox collision, best wait
 701         * until we are called again and don't report an error
 702         */
 703        if (mbx->ops.read(hw, &in_msg, 1))
 704                goto out;
 705
 706        if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
 707                /* msg is not CTS and is NACK we must have lost CTS status */
 708                if (in_msg & IXGBE_VT_MSGTYPE_NACK)
 709                        ret_val = -1;
 710                goto out;
 711        }
 712
 713        /* the pf is talking, if we timed out in the past we reinit */
 714        if (!mbx->timeout) {
 715                ret_val = -1;
 716                goto out;
 717        }
 718
 719        /* if we passed all the tests above then the link is up and we no
 720         * longer need to check for link
 721         */
 722        mac->get_link_status = false;
 723
 724out:
 725        *link_up = !mac->get_link_status;
 726        return ret_val;
 727}
 728
 729/**
 730 * Hyper-V variant; there is no mailbox communication.
 731 * @hw: pointer to private hardware struct
 732 * @speed: pointer to link speed
 733 * @link_up: true is link is up, false otherwise
 734 * @autoneg_wait_to_complete: unused
 735 */
 736static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 737                                        ixgbe_link_speed *speed,
 738                                        bool *link_up,
 739                                        bool autoneg_wait_to_complete)
 740{
 741        struct ixgbe_mbx_info *mbx = &hw->mbx;
 742        struct ixgbe_mac_info *mac = &hw->mac;
 743        u32 links_reg;
 744
 745        /* If we were hit with a reset drop the link */
 746        if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
 747                mac->get_link_status = true;
 748
 749        if (!mac->get_link_status)
 750                goto out;
 751
 752        /* if link status is down no point in checking to see if pf is up */
 753        links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 754        if (!(links_reg & IXGBE_LINKS_UP))
 755                goto out;
 756
 757        /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
 758         * before the link status is correct
 759         */
 760        if (mac->type == ixgbe_mac_82599_vf) {
 761                int i;
 762
 763                for (i = 0; i < 5; i++) {
 764                        udelay(100);
 765                        links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
 766
 767                        if (!(links_reg & IXGBE_LINKS_UP))
 768                                goto out;
 769                }
 770        }
 771
 772        switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 773        case IXGBE_LINKS_SPEED_10G_82599:
 774                *speed = IXGBE_LINK_SPEED_10GB_FULL;
 775                break;
 776        case IXGBE_LINKS_SPEED_1G_82599:
 777                *speed = IXGBE_LINK_SPEED_1GB_FULL;
 778                break;
 779        case IXGBE_LINKS_SPEED_100_82599:
 780                *speed = IXGBE_LINK_SPEED_100_FULL;
 781                break;
 782        }
 783
 784        /* if we passed all the tests above then the link is up and we no
 785         * longer need to check for link
 786         */
 787        mac->get_link_status = false;
 788
 789out:
 790        *link_up = !mac->get_link_status;
 791        return 0;
 792}
 793
 794/**
 795 *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
 796 *  @hw: pointer to the HW structure
 797 *  @max_size: value to assign to max frame size
 798 **/
 799static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 800{
 801        u32 msgbuf[2];
 802        s32 ret_val;
 803
 804        msgbuf[0] = IXGBE_VF_SET_LPE;
 805        msgbuf[1] = max_size;
 806
 807        ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
 808                                             ARRAY_SIZE(msgbuf));
 809        if (ret_val)
 810                return ret_val;
 811        if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
 812            (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
 813                return IXGBE_ERR_MBX;
 814
 815        return 0;
 816}
 817
 818/**
 819 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
 820 * @hw: pointer to the HW structure
 821 * @max_size: value to assign to max frame size
 822 * Hyper-V variant.
 823 **/
 824static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 825{
 826        u32 reg;
 827
 828        /* If we are on Hyper-V, we implement this functionality
 829         * differently.
 830         */
 831        reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
 832        /* CRC == 4 */
 833        reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
 834        IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
 835
 836        return 0;
 837}
 838
 839/**
 840 *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
 841 *  @hw: pointer to the HW structure
 842 *  @api: integer containing requested API version
 843 **/
 844static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 845{
 846        int err;
 847        u32 msg[3];
 848
 849        /* Negotiate the mailbox API version */
 850        msg[0] = IXGBE_VF_API_NEGOTIATE;
 851        msg[1] = api;
 852        msg[2] = 0;
 853
 854        err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 855        if (!err) {
 856                msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 857
 858                /* Store value and return 0 on success */
 859                if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
 860                        hw->api_version = api;
 861                        return 0;
 862                }
 863
 864                err = IXGBE_ERR_INVALID_ARGUMENT;
 865        }
 866
 867        return err;
 868}
 869
 870/**
 871 *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
 872 *  @hw: pointer to the HW structure
 873 *  @api: integer containing requested API version
 874 *  Hyper-V version - only ixgbe_mbox_api_10 supported.
 875 **/
 876static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 877{
 878        /* Hyper-V only supports api version ixgbe_mbox_api_10 */
 879        if (api != ixgbe_mbox_api_10)
 880                return IXGBE_ERR_INVALID_ARGUMENT;
 881
 882        return 0;
 883}
 884
 885int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 886                       unsigned int *default_tc)
 887{
 888        int err;
 889        u32 msg[5];
 890
 891        /* do nothing if API doesn't support ixgbevf_get_queues */
 892        switch (hw->api_version) {
 893        case ixgbe_mbox_api_11:
 894        case ixgbe_mbox_api_12:
 895        case ixgbe_mbox_api_13:
 896        case ixgbe_mbox_api_14:
 897                break;
 898        default:
 899                return 0;
 900        }
 901
 902        /* Fetch queue configuration from the PF */
 903        msg[0] = IXGBE_VF_GET_QUEUE;
 904        msg[1] = msg[2] = msg[3] = msg[4] = 0;
 905
 906        err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
 907        if (!err) {
 908                msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
 909
 910                /* if we we didn't get an ACK there must have been
 911                 * some sort of mailbox error so we should treat it
 912                 * as such
 913                 */
 914                if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
 915                        return IXGBE_ERR_MBX;
 916
 917                /* record and validate values from message */
 918                hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
 919                if (hw->mac.max_tx_queues == 0 ||
 920                    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
 921                        hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
 922
 923                hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
 924                if (hw->mac.max_rx_queues == 0 ||
 925                    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
 926                        hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
 927
 928                *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
 929                /* in case of unknown state assume we cannot tag frames */
 930                if (*num_tcs > hw->mac.max_rx_queues)
 931                        *num_tcs = 1;
 932
 933                *default_tc = msg[IXGBE_VF_DEF_QUEUE];
 934                /* default to queue 0 on out-of-bounds queue number */
 935                if (*default_tc >= hw->mac.max_tx_queues)
 936                        *default_tc = 0;
 937        }
 938
 939        return err;
 940}
 941
 942static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
 943        .init_hw                = ixgbevf_init_hw_vf,
 944        .reset_hw               = ixgbevf_reset_hw_vf,
 945        .start_hw               = ixgbevf_start_hw_vf,
 946        .get_mac_addr           = ixgbevf_get_mac_addr_vf,
 947        .stop_adapter           = ixgbevf_stop_hw_vf,
 948        .setup_link             = ixgbevf_setup_mac_link_vf,
 949        .check_link             = ixgbevf_check_mac_link_vf,
 950        .negotiate_api_version  = ixgbevf_negotiate_api_version_vf,
 951        .set_rar                = ixgbevf_set_rar_vf,
 952        .update_mc_addr_list    = ixgbevf_update_mc_addr_list_vf,
 953        .update_xcast_mode      = ixgbevf_update_xcast_mode,
 954        .set_uc_addr            = ixgbevf_set_uc_addr_vf,
 955        .set_vfta               = ixgbevf_set_vfta_vf,
 956        .set_rlpml              = ixgbevf_set_rlpml_vf,
 957};
 958
 959static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
 960        .init_hw                = ixgbevf_init_hw_vf,
 961        .reset_hw               = ixgbevf_hv_reset_hw_vf,
 962        .start_hw               = ixgbevf_start_hw_vf,
 963        .get_mac_addr           = ixgbevf_get_mac_addr_vf,
 964        .stop_adapter           = ixgbevf_stop_hw_vf,
 965        .setup_link             = ixgbevf_setup_mac_link_vf,
 966        .check_link             = ixgbevf_hv_check_mac_link_vf,
 967        .negotiate_api_version  = ixgbevf_hv_negotiate_api_version_vf,
 968        .set_rar                = ixgbevf_hv_set_rar_vf,
 969        .update_mc_addr_list    = ixgbevf_hv_update_mc_addr_list_vf,
 970        .update_xcast_mode      = ixgbevf_hv_update_xcast_mode,
 971        .set_uc_addr            = ixgbevf_hv_set_uc_addr_vf,
 972        .set_vfta               = ixgbevf_hv_set_vfta_vf,
 973        .set_rlpml              = ixgbevf_hv_set_rlpml_vf,
 974};
 975
 976const struct ixgbevf_info ixgbevf_82599_vf_info = {
 977        .mac = ixgbe_mac_82599_vf,
 978        .mac_ops = &ixgbevf_mac_ops,
 979};
 980
 981const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
 982        .mac = ixgbe_mac_82599_vf,
 983        .mac_ops = &ixgbevf_hv_mac_ops,
 984};
 985
 986const struct ixgbevf_info ixgbevf_X540_vf_info = {
 987        .mac = ixgbe_mac_X540_vf,
 988        .mac_ops = &ixgbevf_mac_ops,
 989};
 990
 991const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
 992        .mac = ixgbe_mac_X540_vf,
 993        .mac_ops = &ixgbevf_hv_mac_ops,
 994};
 995
 996const struct ixgbevf_info ixgbevf_X550_vf_info = {
 997        .mac = ixgbe_mac_X550_vf,
 998        .mac_ops = &ixgbevf_mac_ops,
 999};
1000
1001const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1002        .mac = ixgbe_mac_X550_vf,
1003        .mac_ops = &ixgbevf_hv_mac_ops,
1004};
1005
1006const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1007        .mac = ixgbe_mac_X550EM_x_vf,
1008        .mac_ops = &ixgbevf_mac_ops,
1009};
1010
1011const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1012        .mac = ixgbe_mac_X550EM_x_vf,
1013        .mac_ops = &ixgbevf_hv_mac_ops,
1014};
1015
1016const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1017        .mac = ixgbe_mac_x550em_a_vf,
1018        .mac_ops = &ixgbevf_mac_ops,
1019};
1020