linux/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
<<
>>
Prefs
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2018 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/pci.h>
  13#include <linux/netdevice.h>
  14#include <linux/if_vlan.h>
  15#include <linux/interrupt.h>
  16#include <linux/etherdevice.h>
  17#include "bnxt_hsi.h"
  18#include "bnxt.h"
  19#include "bnxt_ulp.h"
  20#include "bnxt_sriov.h"
  21#include "bnxt_vfr.h"
  22#include "bnxt_ethtool.h"
  23
  24#ifdef CONFIG_BNXT_SRIOV
  25static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
  26                                          struct bnxt_vf_info *vf, u16 event_id)
  27{
  28        struct hwrm_fwd_async_event_cmpl_input req = {0};
  29        struct hwrm_async_event_cmpl *async_cmpl;
  30        int rc = 0;
  31
  32        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
  33        if (vf)
  34                req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
  35        else
  36                /* broadcast this async event to all VFs */
  37                req.encap_async_event_target_id = cpu_to_le16(0xffff);
  38        async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
  39        async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
  40        async_cmpl->event_id = cpu_to_le16(event_id);
  41
  42        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  43        if (rc)
  44                netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
  45                           rc);
  46        return rc;
  47}
  48
  49static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
  50{
  51        if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  52                netdev_err(bp->dev, "vf ndo called though PF is down\n");
  53                return -EINVAL;
  54        }
  55        if (!bp->pf.active_vfs) {
  56                netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
  57                return -EINVAL;
  58        }
  59        if (vf_id >= bp->pf.active_vfs) {
  60                netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
  61                return -EINVAL;
  62        }
  63        return 0;
  64}
  65
  66int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
  67{
  68        struct hwrm_func_cfg_input req = {0};
  69        struct bnxt *bp = netdev_priv(dev);
  70        struct bnxt_vf_info *vf;
  71        bool old_setting = false;
  72        u32 func_flags;
  73        int rc;
  74
  75        if (bp->hwrm_spec_code < 0x10701)
  76                return -ENOTSUPP;
  77
  78        rc = bnxt_vf_ndo_prep(bp, vf_id);
  79        if (rc)
  80                return rc;
  81
  82        vf = &bp->pf.vf[vf_id];
  83        if (vf->flags & BNXT_VF_SPOOFCHK)
  84                old_setting = true;
  85        if (old_setting == setting)
  86                return 0;
  87
  88        if (setting)
  89                func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
  90        else
  91                func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
  92        /*TODO: if the driver supports VLAN filter on guest VLAN,
  93         * the spoof check should also include vlan anti-spoofing
  94         */
  95        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  96        req.fid = cpu_to_le16(vf->fw_fid);
  97        req.flags = cpu_to_le32(func_flags);
  98        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  99        if (!rc) {
 100                if (setting)
 101                        vf->flags |= BNXT_VF_SPOOFCHK;
 102                else
 103                        vf->flags &= ~BNXT_VF_SPOOFCHK;
 104        }
 105        return rc;
 106}
 107
 108static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
 109{
 110        struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 111        struct hwrm_func_qcfg_input req = {0};
 112        int rc;
 113
 114        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
 115        req.fid = cpu_to_le16(vf->fw_fid);
 116        mutex_lock(&bp->hwrm_cmd_lock);
 117        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 118        if (rc) {
 119                mutex_unlock(&bp->hwrm_cmd_lock);
 120                return rc;
 121        }
 122        vf->func_qcfg_flags = le16_to_cpu(resp->flags);
 123        mutex_unlock(&bp->hwrm_cmd_lock);
 124        return 0;
 125}
 126
 127static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 128{
 129        if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
 130                return !!(vf->flags & BNXT_VF_TRUST);
 131
 132        bnxt_hwrm_func_qcfg_flags(bp, vf);
 133        return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
 134}
 135
 136static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
 137{
 138        struct hwrm_func_cfg_input req = {0};
 139
 140        if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
 141                return 0;
 142
 143        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 144        req.fid = cpu_to_le16(vf->fw_fid);
 145        if (vf->flags & BNXT_VF_TRUST)
 146                req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 147        else
 148                req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
 149        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 150}
 151
 152int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
 153{
 154        struct bnxt *bp = netdev_priv(dev);
 155        struct bnxt_vf_info *vf;
 156
 157        if (bnxt_vf_ndo_prep(bp, vf_id))
 158                return -EINVAL;
 159
 160        vf = &bp->pf.vf[vf_id];
 161        if (trusted)
 162                vf->flags |= BNXT_VF_TRUST;
 163        else
 164                vf->flags &= ~BNXT_VF_TRUST;
 165
 166        bnxt_hwrm_set_trusted_vf(bp, vf);
 167        return 0;
 168}
 169
 170int bnxt_get_vf_config(struct net_device *dev, int vf_id,
 171                       struct ifla_vf_info *ivi)
 172{
 173        struct bnxt *bp = netdev_priv(dev);
 174        struct bnxt_vf_info *vf;
 175        int rc;
 176
 177        rc = bnxt_vf_ndo_prep(bp, vf_id);
 178        if (rc)
 179                return rc;
 180
 181        ivi->vf = vf_id;
 182        vf = &bp->pf.vf[vf_id];
 183
 184        if (is_valid_ether_addr(vf->mac_addr))
 185                memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
 186        else
 187                memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
 188        ivi->max_tx_rate = vf->max_tx_rate;
 189        ivi->min_tx_rate = vf->min_tx_rate;
 190        ivi->vlan = vf->vlan;
 191        if (vf->flags & BNXT_VF_QOS)
 192                ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
 193        else
 194                ivi->qos = 0;
 195        ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
 196        ivi->trusted = bnxt_is_trusted_vf(bp, vf);
 197        if (!(vf->flags & BNXT_VF_LINK_FORCED))
 198                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
 199        else if (vf->flags & BNXT_VF_LINK_UP)
 200                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
 201        else
 202                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
 203
 204        return 0;
 205}
 206
 207int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
 208{
 209        struct hwrm_func_cfg_input req = {0};
 210        struct bnxt *bp = netdev_priv(dev);
 211        struct bnxt_vf_info *vf;
 212        int rc;
 213
 214        rc = bnxt_vf_ndo_prep(bp, vf_id);
 215        if (rc)
 216                return rc;
 217        /* reject bc or mc mac addr, zero mac addr means allow
 218         * VF to use its own mac addr
 219         */
 220        if (is_multicast_ether_addr(mac)) {
 221                netdev_err(dev, "Invalid VF ethernet address\n");
 222                return -EINVAL;
 223        }
 224        vf = &bp->pf.vf[vf_id];
 225
 226        memcpy(vf->mac_addr, mac, ETH_ALEN);
 227        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 228        req.fid = cpu_to_le16(vf->fw_fid);
 229        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
 230        memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
 231        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 232}
 233
 234int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
 235                     __be16 vlan_proto)
 236{
 237        struct hwrm_func_cfg_input req = {0};
 238        struct bnxt *bp = netdev_priv(dev);
 239        struct bnxt_vf_info *vf;
 240        u16 vlan_tag;
 241        int rc;
 242
 243        if (bp->hwrm_spec_code < 0x10201)
 244                return -ENOTSUPP;
 245
 246        if (vlan_proto != htons(ETH_P_8021Q))
 247                return -EPROTONOSUPPORT;
 248
 249        rc = bnxt_vf_ndo_prep(bp, vf_id);
 250        if (rc)
 251                return rc;
 252
 253        /* TODO: needed to implement proper handling of user priority,
 254         * currently fail the command if there is valid priority
 255         */
 256        if (vlan_id > 4095 || qos)
 257                return -EINVAL;
 258
 259        vf = &bp->pf.vf[vf_id];
 260        vlan_tag = vlan_id;
 261        if (vlan_tag == vf->vlan)
 262                return 0;
 263
 264        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 265        req.fid = cpu_to_le16(vf->fw_fid);
 266        req.dflt_vlan = cpu_to_le16(vlan_tag);
 267        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
 268        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 269        if (!rc)
 270                vf->vlan = vlan_tag;
 271        return rc;
 272}
 273
 274int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
 275                   int max_tx_rate)
 276{
 277        struct hwrm_func_cfg_input req = {0};
 278        struct bnxt *bp = netdev_priv(dev);
 279        struct bnxt_vf_info *vf;
 280        u32 pf_link_speed;
 281        int rc;
 282
 283        rc = bnxt_vf_ndo_prep(bp, vf_id);
 284        if (rc)
 285                return rc;
 286
 287        vf = &bp->pf.vf[vf_id];
 288        pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
 289        if (max_tx_rate > pf_link_speed) {
 290                netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
 291                            max_tx_rate, vf_id);
 292                return -EINVAL;
 293        }
 294
 295        if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
 296                netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
 297                            min_tx_rate, vf_id);
 298                return -EINVAL;
 299        }
 300        if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
 301                return 0;
 302        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 303        req.fid = cpu_to_le16(vf->fw_fid);
 304        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
 305        req.max_bw = cpu_to_le32(max_tx_rate);
 306        req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
 307        req.min_bw = cpu_to_le32(min_tx_rate);
 308        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 309        if (!rc) {
 310                vf->min_tx_rate = min_tx_rate;
 311                vf->max_tx_rate = max_tx_rate;
 312        }
 313        return rc;
 314}
 315
 316int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
 317{
 318        struct bnxt *bp = netdev_priv(dev);
 319        struct bnxt_vf_info *vf;
 320        int rc;
 321
 322        rc = bnxt_vf_ndo_prep(bp, vf_id);
 323        if (rc)
 324                return rc;
 325
 326        vf = &bp->pf.vf[vf_id];
 327
 328        vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
 329        switch (link) {
 330        case IFLA_VF_LINK_STATE_AUTO:
 331                vf->flags |= BNXT_VF_LINK_UP;
 332                break;
 333        case IFLA_VF_LINK_STATE_DISABLE:
 334                vf->flags |= BNXT_VF_LINK_FORCED;
 335                break;
 336        case IFLA_VF_LINK_STATE_ENABLE:
 337                vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
 338                break;
 339        default:
 340                netdev_err(bp->dev, "Invalid link option\n");
 341                rc = -EINVAL;
 342                break;
 343        }
 344        if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
 345                rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
 346                        ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
 347        return rc;
 348}
 349
 350static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
 351{
 352        int i;
 353        struct bnxt_vf_info *vf;
 354
 355        for (i = 0; i < num_vfs; i++) {
 356                vf = &bp->pf.vf[i];
 357                memset(vf, 0, sizeof(*vf));
 358        }
 359        return 0;
 360}
 361
 362static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
 363{
 364        int i, rc = 0;
 365        struct bnxt_pf_info *pf = &bp->pf;
 366        struct hwrm_func_vf_resc_free_input req = {0};
 367
 368        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
 369
 370        mutex_lock(&bp->hwrm_cmd_lock);
 371        for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
 372                req.vf_id = cpu_to_le16(i);
 373                rc = _hwrm_send_message(bp, &req, sizeof(req),
 374                                        HWRM_CMD_TIMEOUT);
 375                if (rc)
 376                        break;
 377        }
 378        mutex_unlock(&bp->hwrm_cmd_lock);
 379        return rc;
 380}
 381
 382static void bnxt_free_vf_resources(struct bnxt *bp)
 383{
 384        struct pci_dev *pdev = bp->pdev;
 385        int i;
 386
 387        kfree(bp->pf.vf_event_bmap);
 388        bp->pf.vf_event_bmap = NULL;
 389
 390        for (i = 0; i < 4; i++) {
 391                if (bp->pf.hwrm_cmd_req_addr[i]) {
 392                        dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
 393                                          bp->pf.hwrm_cmd_req_addr[i],
 394                                          bp->pf.hwrm_cmd_req_dma_addr[i]);
 395                        bp->pf.hwrm_cmd_req_addr[i] = NULL;
 396                }
 397        }
 398
 399        bp->pf.active_vfs = 0;
 400        kfree(bp->pf.vf);
 401        bp->pf.vf = NULL;
 402}
 403
 404static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
 405{
 406        struct pci_dev *pdev = bp->pdev;
 407        u32 nr_pages, size, i, j, k = 0;
 408
 409        bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
 410        if (!bp->pf.vf)
 411                return -ENOMEM;
 412
 413        bnxt_set_vf_attr(bp, num_vfs);
 414
 415        size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
 416        nr_pages = size / BNXT_PAGE_SIZE;
 417        if (size & (BNXT_PAGE_SIZE - 1))
 418                nr_pages++;
 419
 420        for (i = 0; i < nr_pages; i++) {
 421                bp->pf.hwrm_cmd_req_addr[i] =
 422                        dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
 423                                           &bp->pf.hwrm_cmd_req_dma_addr[i],
 424                                           GFP_KERNEL);
 425
 426                if (!bp->pf.hwrm_cmd_req_addr[i])
 427                        return -ENOMEM;
 428
 429                for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
 430                        struct bnxt_vf_info *vf = &bp->pf.vf[k];
 431
 432                        vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
 433                                                j * BNXT_HWRM_REQ_MAX_SIZE;
 434                        vf->hwrm_cmd_req_dma_addr =
 435                                bp->pf.hwrm_cmd_req_dma_addr[i] + j *
 436                                BNXT_HWRM_REQ_MAX_SIZE;
 437                        k++;
 438                }
 439        }
 440
 441        /* Max 128 VF's */
 442        bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
 443        if (!bp->pf.vf_event_bmap)
 444                return -ENOMEM;
 445
 446        bp->pf.hwrm_cmd_req_pages = nr_pages;
 447        return 0;
 448}
 449
 450static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 451{
 452        struct hwrm_func_buf_rgtr_input req = {0};
 453
 454        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
 455
 456        req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
 457        req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
 458        req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
 459        req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
 460        req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
 461        req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
 462        req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
 463
 464        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 465}
 466
 467/* Caller holds bp->hwrm_cmd_lock mutex lock */
 468static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
 469{
 470        struct hwrm_func_cfg_input req = {0};
 471        struct bnxt_vf_info *vf;
 472
 473        vf = &bp->pf.vf[vf_id];
 474        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 475        req.fid = cpu_to_le16(vf->fw_fid);
 476
 477        if (is_valid_ether_addr(vf->mac_addr)) {
 478                req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
 479                memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
 480        }
 481        if (vf->vlan) {
 482                req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
 483                req.dflt_vlan = cpu_to_le16(vf->vlan);
 484        }
 485        if (vf->max_tx_rate) {
 486                req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
 487                req.max_bw = cpu_to_le32(vf->max_tx_rate);
 488#ifdef HAVE_IFLA_TX_RATE
 489                req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
 490                req.min_bw = cpu_to_le32(vf->min_tx_rate);
 491#endif
 492        }
 493        if (vf->flags & BNXT_VF_TRUST)
 494                req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
 495
 496        _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 497}
 498
 499/* Only called by PF to reserve resources for VFs, returns actual number of
 500 * VFs configured, or < 0 on error.
 501 */
 502static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
 503{
 504        struct hwrm_func_vf_resource_cfg_input req = {0};
 505        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 506        u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
 507        u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
 508        struct bnxt_pf_info *pf = &bp->pf;
 509        int i, rc = 0, min = 1;
 510        u16 vf_msix = 0;
 511        u16 vf_rss;
 512
 513        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
 514
 515        if (bp->flags & BNXT_FLAG_CHIP_P5) {
 516                vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
 517                vf_ring_grps = 0;
 518        } else {
 519                vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
 520        }
 521        vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
 522        vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
 523        if (bp->flags & BNXT_FLAG_AGG_RINGS)
 524                vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
 525        else
 526                vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
 527        vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
 528        vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
 529        vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 530        vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
 531
 532        req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
 533        if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
 534                min = 0;
 535                req.min_rsscos_ctx = cpu_to_le16(min);
 536        }
 537        if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
 538            pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
 539                req.min_cmpl_rings = cpu_to_le16(min);
 540                req.min_tx_rings = cpu_to_le16(min);
 541                req.min_rx_rings = cpu_to_le16(min);
 542                req.min_l2_ctxs = cpu_to_le16(min);
 543                req.min_vnics = cpu_to_le16(min);
 544                req.min_stat_ctx = cpu_to_le16(min);
 545                if (!(bp->flags & BNXT_FLAG_CHIP_P5))
 546                        req.min_hw_ring_grps = cpu_to_le16(min);
 547        } else {
 548                vf_cp_rings /= num_vfs;
 549                vf_tx_rings /= num_vfs;
 550                vf_rx_rings /= num_vfs;
 551                vf_vnics /= num_vfs;
 552                vf_stat_ctx /= num_vfs;
 553                vf_ring_grps /= num_vfs;
 554                vf_rss /= num_vfs;
 555
 556                req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
 557                req.min_tx_rings = cpu_to_le16(vf_tx_rings);
 558                req.min_rx_rings = cpu_to_le16(vf_rx_rings);
 559                req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
 560                req.min_vnics = cpu_to_le16(vf_vnics);
 561                req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
 562                req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 563                req.min_rsscos_ctx = cpu_to_le16(vf_rss);
 564        }
 565        req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
 566        req.max_tx_rings = cpu_to_le16(vf_tx_rings);
 567        req.max_rx_rings = cpu_to_le16(vf_rx_rings);
 568        req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
 569        req.max_vnics = cpu_to_le16(vf_vnics);
 570        req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
 571        req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 572        req.max_rsscos_ctx = cpu_to_le16(vf_rss);
 573        if (bp->flags & BNXT_FLAG_CHIP_P5)
 574                req.max_msix = cpu_to_le16(vf_msix / num_vfs);
 575
 576        mutex_lock(&bp->hwrm_cmd_lock);
 577        for (i = 0; i < num_vfs; i++) {
 578                if (reset)
 579                        __bnxt_set_vf_params(bp, i);
 580
 581                req.vf_id = cpu_to_le16(pf->first_vf_id + i);
 582                rc = _hwrm_send_message(bp, &req, sizeof(req),
 583                                        HWRM_CMD_TIMEOUT);
 584                if (rc)
 585                        break;
 586                pf->active_vfs = i + 1;
 587                pf->vf[i].fw_fid = pf->first_vf_id + i;
 588        }
 589        mutex_unlock(&bp->hwrm_cmd_lock);
 590        if (pf->active_vfs) {
 591                u16 n = pf->active_vfs;
 592
 593                hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
 594                hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
 595                hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
 596                                             n;
 597                hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
 598                hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
 599                hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
 600                hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
 601                if (bp->flags & BNXT_FLAG_CHIP_P5)
 602                        hw_resc->max_irqs -= vf_msix * n;
 603
 604                rc = pf->active_vfs;
 605        }
 606        return rc;
 607}
 608
 609/* Only called by PF to reserve resources for VFs, returns actual number of
 610 * VFs configured, or < 0 on error.
 611 */
 612static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 613{
 614        u32 rc = 0, mtu, i;
 615        u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
 616        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 617        struct hwrm_func_cfg_input req = {0};
 618        struct bnxt_pf_info *pf = &bp->pf;
 619        int total_vf_tx_rings = 0;
 620        u16 vf_ring_grps;
 621
 622        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 623
 624        /* Remaining rings are distributed equally amongs VF's for now */
 625        vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
 626        vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
 627        if (bp->flags & BNXT_FLAG_AGG_RINGS)
 628                vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
 629                              num_vfs;
 630        else
 631                vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
 632                              num_vfs;
 633        vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
 634        vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
 635        vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
 636        vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 637
 638        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
 639                                  FUNC_CFG_REQ_ENABLES_MRU |
 640                                  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
 641                                  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
 642                                  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
 643                                  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
 644                                  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
 645                                  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
 646                                  FUNC_CFG_REQ_ENABLES_NUM_VNICS |
 647                                  FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
 648
 649        mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
 650        req.mru = cpu_to_le16(mtu);
 651        req.mtu = cpu_to_le16(mtu);
 652
 653        req.num_rsscos_ctxs = cpu_to_le16(1);
 654        req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
 655        req.num_tx_rings = cpu_to_le16(vf_tx_rings);
 656        req.num_rx_rings = cpu_to_le16(vf_rx_rings);
 657        req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
 658        req.num_l2_ctxs = cpu_to_le16(4);
 659
 660        req.num_vnics = cpu_to_le16(vf_vnics);
 661        /* FIXME spec currently uses 1 bit for stats ctx */
 662        req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
 663
 664        mutex_lock(&bp->hwrm_cmd_lock);
 665        for (i = 0; i < num_vfs; i++) {
 666                int vf_tx_rsvd = vf_tx_rings;
 667
 668                req.fid = cpu_to_le16(pf->first_vf_id + i);
 669                rc = _hwrm_send_message(bp, &req, sizeof(req),
 670                                        HWRM_CMD_TIMEOUT);
 671                if (rc)
 672                        break;
 673                pf->active_vfs = i + 1;
 674                pf->vf[i].fw_fid = le16_to_cpu(req.fid);
 675                rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
 676                                              &vf_tx_rsvd);
 677                if (rc)
 678                        break;
 679                total_vf_tx_rings += vf_tx_rsvd;
 680        }
 681        mutex_unlock(&bp->hwrm_cmd_lock);
 682        if (pf->active_vfs) {
 683                hw_resc->max_tx_rings -= total_vf_tx_rings;
 684                hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
 685                hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
 686                hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
 687                hw_resc->max_rsscos_ctxs -= num_vfs;
 688                hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
 689                hw_resc->max_vnics -= vf_vnics * num_vfs;
 690                rc = pf->active_vfs;
 691        }
 692        return rc;
 693}
 694
 695static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
 696{
 697        if (BNXT_NEW_RM(bp))
 698                return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
 699        else
 700                return bnxt_hwrm_func_cfg(bp, num_vfs);
 701}
 702
 703int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
 704{
 705        int rc;
 706
 707        /* Register buffers for VFs */
 708        rc = bnxt_hwrm_func_buf_rgtr(bp);
 709        if (rc)
 710                return rc;
 711
 712        /* Reserve resources for VFs */
 713        rc = bnxt_func_cfg(bp, *num_vfs, reset);
 714        if (rc != *num_vfs) {
 715                if (rc <= 0) {
 716                        netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
 717                        *num_vfs = 0;
 718                        return rc;
 719                }
 720                netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
 721                            rc);
 722                *num_vfs = rc;
 723        }
 724
 725        bnxt_ulp_sriov_cfg(bp, *num_vfs);
 726        return 0;
 727}
 728
 729static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 730{
 731        int rc = 0, vfs_supported;
 732        int min_rx_rings, min_tx_rings, min_rss_ctxs;
 733        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 734        int tx_ok = 0, rx_ok = 0, rss_ok = 0;
 735        int avail_cp, avail_stat;
 736
 737        /* Check if we can enable requested num of vf's. At a mininum
 738         * we require 1 RX 1 TX rings for each VF. In this minimum conf
 739         * features like TPA will not be available.
 740         */
 741        vfs_supported = *num_vfs;
 742
 743        avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
 744        avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
 745        avail_cp = min_t(int, avail_cp, avail_stat);
 746
 747        while (vfs_supported) {
 748                min_rx_rings = vfs_supported;
 749                min_tx_rings = vfs_supported;
 750                min_rss_ctxs = vfs_supported;
 751
 752                if (bp->flags & BNXT_FLAG_AGG_RINGS) {
 753                        if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
 754                            min_rx_rings)
 755                                rx_ok = 1;
 756                } else {
 757                        if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
 758                            min_rx_rings)
 759                                rx_ok = 1;
 760                }
 761                if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
 762                    avail_cp < min_rx_rings)
 763                        rx_ok = 0;
 764
 765                if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
 766                    avail_cp >= min_tx_rings)
 767                        tx_ok = 1;
 768
 769                if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
 770                    min_rss_ctxs)
 771                        rss_ok = 1;
 772
 773                if (tx_ok && rx_ok && rss_ok)
 774                        break;
 775
 776                vfs_supported--;
 777        }
 778
 779        if (!vfs_supported) {
 780                netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
 781                return -EINVAL;
 782        }
 783
 784        if (vfs_supported != *num_vfs) {
 785                netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
 786                            *num_vfs, vfs_supported);
 787                *num_vfs = vfs_supported;
 788        }
 789
 790        rc = bnxt_alloc_vf_resources(bp, *num_vfs);
 791        if (rc)
 792                goto err_out1;
 793
 794        rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
 795        if (rc)
 796                goto err_out2;
 797
 798        rc = pci_enable_sriov(bp->pdev, *num_vfs);
 799        if (rc)
 800                goto err_out2;
 801
 802        return 0;
 803
 804err_out2:
 805        /* Free the resources reserved for various VF's */
 806        bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
 807
 808err_out1:
 809        bnxt_free_vf_resources(bp);
 810
 811        return rc;
 812}
 813
 814void bnxt_sriov_disable(struct bnxt *bp)
 815{
 816        u16 num_vfs = pci_num_vf(bp->pdev);
 817
 818        if (!num_vfs)
 819                return;
 820
 821        /* synchronize VF and VF-rep create and destroy */
 822        mutex_lock(&bp->sriov_lock);
 823        bnxt_vf_reps_destroy(bp);
 824
 825        if (pci_vfs_assigned(bp->pdev)) {
 826                bnxt_hwrm_fwd_async_event_cmpl(
 827                        bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
 828                netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
 829                            num_vfs);
 830        } else {
 831                pci_disable_sriov(bp->pdev);
 832                /* Free the HW resources reserved for various VF's */
 833                bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
 834        }
 835        mutex_unlock(&bp->sriov_lock);
 836
 837        bnxt_free_vf_resources(bp);
 838
 839        /* Reclaim all resources for the PF. */
 840        rtnl_lock();
 841        bnxt_restore_pf_fw_resources(bp);
 842        rtnl_unlock();
 843
 844        bnxt_ulp_sriov_cfg(bp, 0);
 845}
 846
 847int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
 848{
 849        struct net_device *dev = pci_get_drvdata(pdev);
 850        struct bnxt *bp = netdev_priv(dev);
 851
 852        if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
 853                netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
 854                return 0;
 855        }
 856
 857        rtnl_lock();
 858        if (!netif_running(dev)) {
 859                netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
 860                rtnl_unlock();
 861                return 0;
 862        }
 863        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
 864                netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
 865                rtnl_unlock();
 866                return 0;
 867        }
 868        bp->sriov_cfg = true;
 869        rtnl_unlock();
 870
 871        if (pci_vfs_assigned(bp->pdev)) {
 872                netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
 873                num_vfs = 0;
 874                goto sriov_cfg_exit;
 875        }
 876
 877        /* Check if enabled VFs is same as requested */
 878        if (num_vfs && num_vfs == bp->pf.active_vfs)
 879                goto sriov_cfg_exit;
 880
 881        /* if there are previous existing VFs, clean them up */
 882        bnxt_sriov_disable(bp);
 883        if (!num_vfs)
 884                goto sriov_cfg_exit;
 885
 886        bnxt_sriov_enable(bp, &num_vfs);
 887
 888sriov_cfg_exit:
 889        bp->sriov_cfg = false;
 890        wake_up(&bp->sriov_cfg_wait);
 891
 892        return num_vfs;
 893}
 894
 895static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 896                              void *encap_resp, __le64 encap_resp_addr,
 897                              __le16 encap_resp_cpr, u32 msg_size)
 898{
 899        int rc = 0;
 900        struct hwrm_fwd_resp_input req = {0};
 901
 902        if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
 903                return -EINVAL;
 904
 905        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
 906
 907        /* Set the new target id */
 908        req.target_id = cpu_to_le16(vf->fw_fid);
 909        req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 910        req.encap_resp_len = cpu_to_le16(msg_size);
 911        req.encap_resp_addr = encap_resp_addr;
 912        req.encap_resp_cmpl_ring = encap_resp_cpr;
 913        memcpy(req.encap_resp, encap_resp, msg_size);
 914
 915        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 916        if (rc)
 917                netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
 918        return rc;
 919}
 920
 921static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 922                                  u32 msg_size)
 923{
 924        int rc = 0;
 925        struct hwrm_reject_fwd_resp_input req = {0};
 926
 927        if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
 928                return -EINVAL;
 929
 930        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
 931        /* Set the new target id */
 932        req.target_id = cpu_to_le16(vf->fw_fid);
 933        req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 934        memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
 935
 936        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 937        if (rc)
 938                netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
 939        return rc;
 940}
 941
 942static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 943                                   u32 msg_size)
 944{
 945        int rc = 0;
 946        struct hwrm_exec_fwd_resp_input req = {0};
 947
 948        if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
 949                return -EINVAL;
 950
 951        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
 952        /* Set the new target id */
 953        req.target_id = cpu_to_le16(vf->fw_fid);
 954        req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
 955        memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
 956
 957        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 958        if (rc)
 959                netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
 960        return rc;
 961}
 962
 963static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
 964{
 965        u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
 966        struct hwrm_func_vf_cfg_input *req =
 967                (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
 968
 969        /* Allow VF to set a valid MAC address, if trust is set to on or
 970         * if the PF assigned MAC address is zero
 971         */
 972        if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
 973                bool trust = bnxt_is_trusted_vf(bp, vf);
 974
 975                if (is_valid_ether_addr(req->dflt_mac_addr) &&
 976                    (trust || !is_valid_ether_addr(vf->mac_addr) ||
 977                     ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
 978                        ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
 979                        return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
 980                }
 981                return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
 982        }
 983        return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
 984}
 985
 986static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
 987{
 988        u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
 989        struct hwrm_cfa_l2_filter_alloc_input *req =
 990                (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
 991        bool mac_ok = false;
 992
 993        if (!is_valid_ether_addr((const u8 *)req->l2_addr))
 994                return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
 995
 996        /* Allow VF to set a valid MAC address, if trust is set to on.
 997         * Or VF MAC address must first match MAC address in PF's context.
 998         * Otherwise, it must match the VF MAC address if firmware spec >=
 999         * 1.2.2
1000         */
1001        if (bnxt_is_trusted_vf(bp, vf)) {
1002                mac_ok = true;
1003        } else if (is_valid_ether_addr(vf->mac_addr)) {
1004                if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1005                        mac_ok = true;
1006        } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1007                if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1008                        mac_ok = true;
1009        } else {
1010                /* There are two cases:
1011                 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1012                 *   to the PF and so it doesn't have to match
1013                 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1014                 *   valid MAC address and firmware spec >= 0x10202
1015                 */
1016                mac_ok = true;
1017        }
1018        if (mac_ok)
1019                return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1020        return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1021}
1022
1023static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1024{
1025        int rc = 0;
1026
1027        if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1028                /* real link */
1029                rc = bnxt_hwrm_exec_fwd_resp(
1030                        bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1031        } else {
1032                struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
1033                struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1034
1035                phy_qcfg_req =
1036                (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1037                mutex_lock(&bp->hwrm_cmd_lock);
1038                memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1039                       sizeof(phy_qcfg_resp));
1040                mutex_unlock(&bp->hwrm_cmd_lock);
1041                phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1042                phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1043                phy_qcfg_resp.valid = 1;
1044
1045                if (vf->flags & BNXT_VF_LINK_UP) {
1046                        /* if physical link is down, force link up on VF */
1047                        if (phy_qcfg_resp.link !=
1048                            PORT_PHY_QCFG_RESP_LINK_LINK) {
1049                                phy_qcfg_resp.link =
1050                                        PORT_PHY_QCFG_RESP_LINK_LINK;
1051                                phy_qcfg_resp.link_speed = cpu_to_le16(
1052                                        PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1053                                phy_qcfg_resp.duplex_cfg =
1054                                        PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1055                                phy_qcfg_resp.duplex_state =
1056                                        PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1057                                phy_qcfg_resp.pause =
1058                                        (PORT_PHY_QCFG_RESP_PAUSE_TX |
1059                                         PORT_PHY_QCFG_RESP_PAUSE_RX);
1060                        }
1061                } else {
1062                        /* force link down */
1063                        phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1064                        phy_qcfg_resp.link_speed = 0;
1065                        phy_qcfg_resp.duplex_state =
1066                                PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1067                        phy_qcfg_resp.pause = 0;
1068                }
1069                rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1070                                        phy_qcfg_req->resp_addr,
1071                                        phy_qcfg_req->cmpl_ring,
1072                                        sizeof(phy_qcfg_resp));
1073        }
1074        return rc;
1075}
1076
1077static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1078{
1079        int rc = 0;
1080        struct input *encap_req = vf->hwrm_cmd_req_addr;
1081        u32 req_type = le16_to_cpu(encap_req->req_type);
1082
1083        switch (req_type) {
1084        case HWRM_FUNC_VF_CFG:
1085                rc = bnxt_vf_configure_mac(bp, vf);
1086                break;
1087        case HWRM_CFA_L2_FILTER_ALLOC:
1088                rc = bnxt_vf_validate_set_mac(bp, vf);
1089                break;
1090        case HWRM_FUNC_CFG:
1091                /* TODO Validate if VF is allowed to change mac address,
1092                 * mtu, num of rings etc
1093                 */
1094                rc = bnxt_hwrm_exec_fwd_resp(
1095                        bp, vf, sizeof(struct hwrm_func_cfg_input));
1096                break;
1097        case HWRM_PORT_PHY_QCFG:
1098                rc = bnxt_vf_set_link(bp, vf);
1099                break;
1100        default:
1101                break;
1102        }
1103        return rc;
1104}
1105
1106void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1107{
1108        u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1109
1110        /* Scan through VF's and process commands */
1111        while (1) {
1112                vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1113                if (vf_id >= active_vfs)
1114                        break;
1115
1116                clear_bit(vf_id, bp->pf.vf_event_bmap);
1117                bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1118                i = vf_id + 1;
1119        }
1120}
1121
1122void bnxt_update_vf_mac(struct bnxt *bp)
1123{
1124        struct hwrm_func_qcaps_input req = {0};
1125        struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1126
1127        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1128        req.fid = cpu_to_le16(0xffff);
1129
1130        mutex_lock(&bp->hwrm_cmd_lock);
1131        if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1132                goto update_vf_mac_exit;
1133
1134        /* Store MAC address from the firmware.  There are 2 cases:
1135         * 1. MAC address is valid.  It is assigned from the PF and we
1136         *    need to override the current VF MAC address with it.
1137         * 2. MAC address is zero.  The VF will use a random MAC address by
1138         *    default but the stored zero MAC will allow the VF user to change
1139         *    the random MAC address using ndo_set_mac_address() if he wants.
1140         */
1141        if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1142                memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1143
1144        /* overwrite netdev dev_addr with admin VF MAC */
1145        if (is_valid_ether_addr(bp->vf.mac_addr))
1146                memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1147update_vf_mac_exit:
1148        mutex_unlock(&bp->hwrm_cmd_lock);
1149}
1150
1151int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1152{
1153        struct hwrm_func_vf_cfg_input req = {0};
1154        int rc = 0;
1155
1156        if (!BNXT_VF(bp))
1157                return 0;
1158
1159        if (bp->hwrm_spec_code < 0x10202) {
1160                if (is_valid_ether_addr(bp->vf.mac_addr))
1161                        rc = -EADDRNOTAVAIL;
1162                goto mac_done;
1163        }
1164        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1165        req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1166        memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1167        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1168mac_done:
1169        if (rc && strict) {
1170                rc = -EADDRNOTAVAIL;
1171                netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1172                            mac);
1173                return rc;
1174        }
1175        return 0;
1176}
1177#else
1178
1179int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1180{
1181        if (*num_vfs)
1182                return -EOPNOTSUPP;
1183        return 0;
1184}
1185
1186void bnxt_sriov_disable(struct bnxt *bp)
1187{
1188}
1189
1190void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1191{
1192        netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1193}
1194
1195void bnxt_update_vf_mac(struct bnxt *bp)
1196{
1197}
1198
1199int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1200{
1201        return 0;
1202}
1203#endif
1204