linux/drivers/net/ethernet/qlogic/qed/qed_sriov.c
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/etherdevice.h>
  34#include <linux/crc32.h>
  35#include <linux/vmalloc.h>
  36#include <linux/qed/qed_iov_if.h>
  37#include "qed_cxt.h"
  38#include "qed_hsi.h"
  39#include "qed_hw.h"
  40#include "qed_init_ops.h"
  41#include "qed_int.h"
  42#include "qed_mcp.h"
  43#include "qed_reg_addr.h"
  44#include "qed_sp.h"
  45#include "qed_sriov.h"
  46#include "qed_vf.h"
  47static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
  48                               u8 opcode,
  49                               __le16 echo,
  50                               union event_ring_data *data, u8 fw_return_code);
  51static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
  52
  53static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
  54{
  55        u8 legacy = 0;
  56
  57        if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
  58            ETH_HSI_VER_NO_PKT_LEN_TUNN)
  59                legacy |= QED_QCID_LEGACY_VF_RX_PROD;
  60
  61        if (!(p_vf->acquire.vfdev_info.capabilities &
  62              VFPF_ACQUIRE_CAP_QUEUE_QIDS))
  63                legacy |= QED_QCID_LEGACY_VF_CID;
  64
  65        return legacy;
  66}
  67
  68/* IOV ramrods */
  69static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
  70{
  71        struct vf_start_ramrod_data *p_ramrod = NULL;
  72        struct qed_spq_entry *p_ent = NULL;
  73        struct qed_sp_init_data init_data;
  74        int rc = -EINVAL;
  75        u8 fp_minor;
  76
  77        /* Get SPQ entry */
  78        memset(&init_data, 0, sizeof(init_data));
  79        init_data.cid = qed_spq_get_cid(p_hwfn);
  80        init_data.opaque_fid = p_vf->opaque_fid;
  81        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
  82
  83        rc = qed_sp_init_request(p_hwfn, &p_ent,
  84                                 COMMON_RAMROD_VF_START,
  85                                 PROTOCOLID_COMMON, &init_data);
  86        if (rc)
  87                return rc;
  88
  89        p_ramrod = &p_ent->ramrod.vf_start;
  90
  91        p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
  92        p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
  93
  94        switch (p_hwfn->hw_info.personality) {
  95        case QED_PCI_ETH:
  96                p_ramrod->personality = PERSONALITY_ETH;
  97                break;
  98        case QED_PCI_ETH_ROCE:
  99                p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
 100                break;
 101        default:
 102                DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
 103                          p_hwfn->hw_info.personality);
 104                qed_sp_destroy_request(p_hwfn, p_ent);
 105                return -EINVAL;
 106        }
 107
 108        fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
 109        if (fp_minor > ETH_HSI_VER_MINOR &&
 110            fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
 111                DP_VERBOSE(p_hwfn,
 112                           QED_MSG_IOV,
 113                           "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
 114                           p_vf->abs_vf_id,
 115                           ETH_HSI_VER_MAJOR,
 116                           fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
 117                fp_minor = ETH_HSI_VER_MINOR;
 118        }
 119
 120        p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
 121        p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
 122
 123        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
 124                   "VF[%d] - Starting using HSI %02x.%02x\n",
 125                   p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
 126
 127        return qed_spq_post(p_hwfn, p_ent, NULL);
 128}
 129
 130static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
 131                          u32 concrete_vfid, u16 opaque_vfid)
 132{
 133        struct vf_stop_ramrod_data *p_ramrod = NULL;
 134        struct qed_spq_entry *p_ent = NULL;
 135        struct qed_sp_init_data init_data;
 136        int rc = -EINVAL;
 137
 138        /* Get SPQ entry */
 139        memset(&init_data, 0, sizeof(init_data));
 140        init_data.cid = qed_spq_get_cid(p_hwfn);
 141        init_data.opaque_fid = opaque_vfid;
 142        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 143
 144        rc = qed_sp_init_request(p_hwfn, &p_ent,
 145                                 COMMON_RAMROD_VF_STOP,
 146                                 PROTOCOLID_COMMON, &init_data);
 147        if (rc)
 148                return rc;
 149
 150        p_ramrod = &p_ent->ramrod.vf_stop;
 151
 152        p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
 153
 154        return qed_spq_post(p_hwfn, p_ent, NULL);
 155}
 156
 157bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
 158                           int rel_vf_id,
 159                           bool b_enabled_only, bool b_non_malicious)
 160{
 161        if (!p_hwfn->pf_iov_info) {
 162                DP_NOTICE(p_hwfn->cdev, "No iov info\n");
 163                return false;
 164        }
 165
 166        if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
 167            (rel_vf_id < 0))
 168                return false;
 169
 170        if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
 171            b_enabled_only)
 172                return false;
 173
 174        if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
 175            b_non_malicious)
 176                return false;
 177
 178        return true;
 179}
 180
 181static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
 182                                               u16 relative_vf_id,
 183                                               bool b_enabled_only)
 184{
 185        struct qed_vf_info *vf = NULL;
 186
 187        if (!p_hwfn->pf_iov_info) {
 188                DP_NOTICE(p_hwfn->cdev, "No iov info\n");
 189                return NULL;
 190        }
 191
 192        if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
 193                                  b_enabled_only, false))
 194                vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
 195        else
 196                DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
 197                       relative_vf_id);
 198
 199        return vf;
 200}
 201
 202static struct qed_queue_cid *
 203qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
 204{
 205        int i;
 206
 207        for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
 208                if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
 209                        return p_queue->cids[i].p_cid;
 210        }
 211
 212        return NULL;
 213}
 214
 215enum qed_iov_validate_q_mode {
 216        QED_IOV_VALIDATE_Q_NA,
 217        QED_IOV_VALIDATE_Q_ENABLE,
 218        QED_IOV_VALIDATE_Q_DISABLE,
 219};
 220
 221static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
 222                                        struct qed_vf_info *p_vf,
 223                                        u16 qid,
 224                                        enum qed_iov_validate_q_mode mode,
 225                                        bool b_is_tx)
 226{
 227        int i;
 228
 229        if (mode == QED_IOV_VALIDATE_Q_NA)
 230                return true;
 231
 232        for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
 233                struct qed_vf_queue_cid *p_qcid;
 234
 235                p_qcid = &p_vf->vf_queues[qid].cids[i];
 236
 237                if (!p_qcid->p_cid)
 238                        continue;
 239
 240                if (p_qcid->b_is_tx != b_is_tx)
 241                        continue;
 242
 243                return mode == QED_IOV_VALIDATE_Q_ENABLE;
 244        }
 245
 246        /* In case we haven't found any valid cid, then its disabled */
 247        return mode == QED_IOV_VALIDATE_Q_DISABLE;
 248}
 249
 250static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
 251                                 struct qed_vf_info *p_vf,
 252                                 u16 rx_qid,
 253                                 enum qed_iov_validate_q_mode mode)
 254{
 255        if (rx_qid >= p_vf->num_rxqs) {
 256                DP_VERBOSE(p_hwfn,
 257                           QED_MSG_IOV,
 258                           "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
 259                           p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
 260                return false;
 261        }
 262
 263        return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
 264}
 265
 266static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
 267                                 struct qed_vf_info *p_vf,
 268                                 u16 tx_qid,
 269                                 enum qed_iov_validate_q_mode mode)
 270{
 271        if (tx_qid >= p_vf->num_txqs) {
 272                DP_VERBOSE(p_hwfn,
 273                           QED_MSG_IOV,
 274                           "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
 275                           p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
 276                return false;
 277        }
 278
 279        return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
 280}
 281
 282static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
 283                                struct qed_vf_info *p_vf, u16 sb_idx)
 284{
 285        int i;
 286
 287        for (i = 0; i < p_vf->num_sbs; i++)
 288                if (p_vf->igu_sbs[i] == sb_idx)
 289                        return true;
 290
 291        DP_VERBOSE(p_hwfn,
 292                   QED_MSG_IOV,
 293                   "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
 294                   p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
 295
 296        return false;
 297}
 298
 299static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
 300                                        struct qed_vf_info *p_vf)
 301{
 302        u8 i;
 303
 304        for (i = 0; i < p_vf->num_rxqs; i++)
 305                if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
 306                                                QED_IOV_VALIDATE_Q_ENABLE,
 307                                                false))
 308                        return true;
 309
 310        return false;
 311}
 312
 313static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
 314                                        struct qed_vf_info *p_vf)
 315{
 316        u8 i;
 317
 318        for (i = 0; i < p_vf->num_txqs; i++)
 319                if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
 320                                                QED_IOV_VALIDATE_Q_ENABLE,
 321                                                true))
 322                        return true;
 323
 324        return false;
 325}
 326
 327static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
 328                                    int vfid, struct qed_ptt *p_ptt)
 329{
 330        struct qed_bulletin_content *p_bulletin;
 331        int crc_size = sizeof(p_bulletin->crc);
 332        struct qed_dmae_params params;
 333        struct qed_vf_info *p_vf;
 334
 335        p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
 336        if (!p_vf)
 337                return -EINVAL;
 338
 339        if (!p_vf->vf_bulletin)
 340                return -EINVAL;
 341
 342        p_bulletin = p_vf->bulletin.p_virt;
 343
 344        /* Increment bulletin board version and compute crc */
 345        p_bulletin->version++;
 346        p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
 347                                p_vf->bulletin.size - crc_size);
 348
 349        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
 350                   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
 351                   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
 352
 353        /* propagate bulletin board via dmae to vm memory */
 354        memset(&params, 0, sizeof(params));
 355        params.flags = QED_DMAE_FLAG_VF_DST;
 356        params.dst_vfid = p_vf->abs_vf_id;
 357        return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
 358                                  p_vf->vf_bulletin, p_vf->bulletin.size / 4,
 359                                  &params);
 360}
 361
 362static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
 363{
 364        struct qed_hw_sriov_info *iov = cdev->p_iov_info;
 365        int pos = iov->pos;
 366
 367        DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
 368        pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
 369
 370        pci_read_config_word(cdev->pdev,
 371                             pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
 372        pci_read_config_word(cdev->pdev,
 373                             pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
 374
 375        pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
 376        if (iov->num_vfs) {
 377                DP_VERBOSE(cdev,
 378                           QED_MSG_IOV,
 379                           "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
 380                iov->num_vfs = 0;
 381        }
 382
 383        pci_read_config_word(cdev->pdev,
 384                             pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
 385
 386        pci_read_config_word(cdev->pdev,
 387                             pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
 388
 389        pci_read_config_word(cdev->pdev,
 390                             pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
 391
 392        pci_read_config_dword(cdev->pdev,
 393                              pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
 394
 395        pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
 396
 397        pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
 398
 399        DP_VERBOSE(cdev,
 400                   QED_MSG_IOV,
 401                   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
 402                   iov->nres,
 403                   iov->cap,
 404                   iov->ctrl,
 405                   iov->total_vfs,
 406                   iov->initial_vfs,
 407                   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
 408
 409        /* Some sanity checks */
 410        if (iov->num_vfs > NUM_OF_VFS(cdev) ||
 411            iov->total_vfs > NUM_OF_VFS(cdev)) {
 412                /* This can happen only due to a bug. In this case we set
 413                 * num_vfs to zero to avoid memory corruption in the code that
 414                 * assumes max number of vfs
 415                 */
 416                DP_NOTICE(cdev,
 417                          "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
 418                          iov->num_vfs);
 419
 420                iov->num_vfs = 0;
 421                iov->total_vfs = 0;
 422        }
 423
 424        return 0;
 425}
 426
 427static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
 428{
 429        struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
 430        struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
 431        struct qed_bulletin_content *p_bulletin_virt;
 432        dma_addr_t req_p, rply_p, bulletin_p;
 433        union pfvf_tlvs *p_reply_virt_addr;
 434        union vfpf_tlvs *p_req_virt_addr;
 435        u8 idx = 0;
 436
 437        memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
 438
 439        p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
 440        req_p = p_iov_info->mbx_msg_phys_addr;
 441        p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
 442        rply_p = p_iov_info->mbx_reply_phys_addr;
 443        p_bulletin_virt = p_iov_info->p_bulletins;
 444        bulletin_p = p_iov_info->bulletins_phys;
 445        if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
 446                DP_ERR(p_hwfn,
 447                       "qed_iov_setup_vfdb called without allocating mem first\n");
 448                return;
 449        }
 450
 451        for (idx = 0; idx < p_iov->total_vfs; idx++) {
 452                struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
 453                u32 concrete;
 454
 455                vf->vf_mbx.req_virt = p_req_virt_addr + idx;
 456                vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
 457                vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
 458                vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
 459
 460                vf->state = VF_STOPPED;
 461                vf->b_init = false;
 462
 463                vf->bulletin.phys = idx *
 464                                    sizeof(struct qed_bulletin_content) +
 465                                    bulletin_p;
 466                vf->bulletin.p_virt = p_bulletin_virt + idx;
 467                vf->bulletin.size = sizeof(struct qed_bulletin_content);
 468
 469                vf->relative_vf_id = idx;
 470                vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
 471                concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
 472                vf->concrete_fid = concrete;
 473                vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
 474                                 (vf->abs_vf_id << 8);
 475                vf->vport_id = idx + 1;
 476
 477                vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
 478                vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
 479        }
 480}
 481
 482static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
 483{
 484        struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
 485        void **p_v_addr;
 486        u16 num_vfs = 0;
 487
 488        num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
 489
 490        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
 491                   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
 492
 493        /* Allocate PF Mailbox buffer (per-VF) */
 494        p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
 495        p_v_addr = &p_iov_info->mbx_msg_virt_addr;
 496        *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 497                                       p_iov_info->mbx_msg_size,
 498                                       &p_iov_info->mbx_msg_phys_addr,
 499                                       GFP_KERNEL);
 500        if (!*p_v_addr)
 501                return -ENOMEM;
 502
 503        /* Allocate PF Mailbox Reply buffer (per-VF) */
 504        p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
 505        p_v_addr = &p_iov_info->mbx_reply_virt_addr;
 506        *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 507                                       p_iov_info->mbx_reply_size,
 508                                       &p_iov_info->mbx_reply_phys_addr,
 509                                       GFP_KERNEL);
 510        if (!*p_v_addr)
 511                return -ENOMEM;
 512
 513        p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
 514                                     num_vfs;
 515        p_v_addr = &p_iov_info->p_bulletins;
 516        *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 517                                       p_iov_info->bulletins_size,
 518                                       &p_iov_info->bulletins_phys,
 519                                       GFP_KERNEL);
 520        if (!*p_v_addr)
 521                return -ENOMEM;
 522
 523        DP_VERBOSE(p_hwfn,
 524                   QED_MSG_IOV,
 525                   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
 526                   p_iov_info->mbx_msg_virt_addr,
 527                   (u64) p_iov_info->mbx_msg_phys_addr,
 528                   p_iov_info->mbx_reply_virt_addr,
 529                   (u64) p_iov_info->mbx_reply_phys_addr,
 530                   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
 531
 532        return 0;
 533}
 534
 535static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
 536{
 537        struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
 538
 539        if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
 540                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 541                                  p_iov_info->mbx_msg_size,
 542                                  p_iov_info->mbx_msg_virt_addr,
 543                                  p_iov_info->mbx_msg_phys_addr);
 544
 545        if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
 546                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 547                                  p_iov_info->mbx_reply_size,
 548                                  p_iov_info->mbx_reply_virt_addr,
 549                                  p_iov_info->mbx_reply_phys_addr);
 550
 551        if (p_iov_info->p_bulletins)
 552                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 553                                  p_iov_info->bulletins_size,
 554                                  p_iov_info->p_bulletins,
 555                                  p_iov_info->bulletins_phys);
 556}
 557
 558int qed_iov_alloc(struct qed_hwfn *p_hwfn)
 559{
 560        struct qed_pf_iov *p_sriov;
 561
 562        if (!IS_PF_SRIOV(p_hwfn)) {
 563                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
 564                           "No SR-IOV - no need for IOV db\n");
 565                return 0;
 566        }
 567
 568        p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
 569        if (!p_sriov)
 570                return -ENOMEM;
 571
 572        p_hwfn->pf_iov_info = p_sriov;
 573
 574        qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
 575                                  qed_sriov_eqe_event);
 576
 577        return qed_iov_allocate_vfdb(p_hwfn);
 578}
 579
 580void qed_iov_setup(struct qed_hwfn *p_hwfn)
 581{
 582        if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
 583                return;
 584
 585        qed_iov_setup_vfdb(p_hwfn);
 586}
 587
 588void qed_iov_free(struct qed_hwfn *p_hwfn)
 589{
 590        qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
 591
 592        if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
 593                qed_iov_free_vfdb(p_hwfn);
 594                kfree(p_hwfn->pf_iov_info);
 595        }
 596}
 597
 598void qed_iov_free_hw_info(struct qed_dev *cdev)
 599{
 600        kfree(cdev->p_iov_info);
 601        cdev->p_iov_info = NULL;
 602}
 603
 604int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
 605{
 606        struct qed_dev *cdev = p_hwfn->cdev;
 607        int pos;
 608        int rc;
 609
 610        if (IS_VF(p_hwfn->cdev))
 611                return 0;
 612
 613        /* Learn the PCI configuration */
 614        pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
 615                                      PCI_EXT_CAP_ID_SRIOV);
 616        if (!pos) {
 617                DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
 618                return 0;
 619        }
 620
 621        /* Allocate a new struct for IOV information */
 622        cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
 623        if (!cdev->p_iov_info)
 624                return -ENOMEM;
 625
 626        cdev->p_iov_info->pos = pos;
 627
 628        rc = qed_iov_pci_cfg_info(cdev);
 629        if (rc)
 630                return rc;
 631
 632        /* We want PF IOV to be synonemous with the existance of p_iov_info;
 633         * In case the capability is published but there are no VFs, simply
 634         * de-allocate the struct.
 635         */
 636        if (!cdev->p_iov_info->total_vfs) {
 637                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
 638                           "IOV capabilities, but no VFs are published\n");
 639                kfree(cdev->p_iov_info);
 640                cdev->p_iov_info = NULL;
 641                return 0;
 642        }
 643
 644        /* First VF index based on offset is tricky:
 645         *  - If ARI is supported [likely], offset - (16 - pf_id) would
 646         *    provide the number for eng0. 2nd engine Vfs would begin
 647         *    after the first engine's VFs.
 648         *  - If !ARI, VFs would start on next device.
 649         *    so offset - (256 - pf_id) would provide the number.
 650         * Utilize the fact that (256 - pf_id) is achieved only by later
 651         * to differentiate between the two.
 652         */
 653
 654        if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
 655                u32 first = p_hwfn->cdev->p_iov_info->offset +
 656                            p_hwfn->abs_pf_id - 16;
 657
 658                cdev->p_iov_info->first_vf_in_pf = first;
 659
 660                if (QED_PATH_ID(p_hwfn))
 661                        cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
 662        } else {
 663                u32 first = p_hwfn->cdev->p_iov_info->offset +
 664                            p_hwfn->abs_pf_id - 256;
 665
 666                cdev->p_iov_info->first_vf_in_pf = first;
 667        }
 668
 669        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
 670                   "First VF in hwfn 0x%08x\n",
 671                   cdev->p_iov_info->first_vf_in_pf);
 672
 673        return 0;
 674}
 675
 676static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
 677                                     int vfid, bool b_fail_malicious)
 678{
 679        /* Check PF supports sriov */
 680        if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
 681            !IS_PF_SRIOV_ALLOC(p_hwfn))
 682                return false;
 683
 684        /* Check VF validity */
 685        if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
 686                return false;
 687
 688        return true;
 689}
 690
 691static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
 692{
 693        return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
 694}
 695
 696static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
 697                                      u16 rel_vf_id, u8 to_disable)
 698{
 699        struct qed_vf_info *vf;
 700        int i;
 701
 702        for_each_hwfn(cdev, i) {
 703                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 704
 705                vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
 706                if (!vf)
 707                        continue;
 708
 709                vf->to_disable = to_disable;
 710        }
 711}
 712
 713static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
 714{
 715        u16 i;
 716
 717        if (!IS_QED_SRIOV(cdev))
 718                return;
 719
 720        for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
 721                qed_iov_set_vf_to_disable(cdev, i, to_disable);
 722}
 723
 724static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
 725                                       struct qed_ptt *p_ptt, u8 abs_vfid)
 726{
 727        qed_wr(p_hwfn, p_ptt,
 728               PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
 729               1 << (abs_vfid & 0x1f));
 730}
 731
 732static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
 733                                 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
 734{
 735        int i;
 736
 737        /* Set VF masks and configuration - pretend */
 738        qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
 739
 740        qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
 741
 742        /* unpretend */
 743        qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
 744
 745        /* iterate over all queues, clear sb consumer */
 746        for (i = 0; i < vf->num_sbs; i++)
 747                qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
 748                                                vf->igu_sbs[i],
 749                                                vf->opaque_fid, true);
 750}
 751
 752static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
 753                                   struct qed_ptt *p_ptt,
 754                                   struct qed_vf_info *vf, bool enable)
 755{
 756        u32 igu_vf_conf;
 757
 758        qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
 759
 760        igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
 761
 762        if (enable)
 763                igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
 764        else
 765                igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
 766
 767        qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
 768
 769        /* unpretend */
 770        qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
 771}
 772
 773static int
 774qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
 775                              struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
 776{
 777        u8 current_max = 0;
 778        int i;
 779
 780        /* For AH onward, configuration is per-PF. Find maximum of all
 781         * the currently enabled child VFs, and set the number to be that.
 782         */
 783        if (!QED_IS_BB(p_hwfn->cdev)) {
 784                qed_for_each_vf(p_hwfn, i) {
 785                        struct qed_vf_info *p_vf;
 786
 787                        p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
 788                        if (!p_vf)
 789                                continue;
 790
 791                        current_max = max_t(u8, current_max, p_vf->num_sbs);
 792                }
 793        }
 794
 795        if (num_sbs > current_max)
 796                return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
 797                                              abs_vf_id, num_sbs);
 798
 799        return 0;
 800}
 801
 802static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
 803                                    struct qed_ptt *p_ptt,
 804                                    struct qed_vf_info *vf)
 805{
 806        u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
 807        int rc;
 808
 809        /* It's possible VF was previously considered malicious -
 810         * clear the indication even if we're only going to disable VF.
 811         */
 812        vf->b_malicious = false;
 813
 814        if (vf->to_disable)
 815                return 0;
 816
 817        DP_VERBOSE(p_hwfn,
 818                   QED_MSG_IOV,
 819                   "Enable internal access for vf %x [abs %x]\n",
 820                   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
 821
 822        qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
 823
 824        qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
 825
 826        rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
 827                                           vf->abs_vf_id, vf->num_sbs);
 828        if (rc)
 829                return rc;
 830
 831        qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
 832
 833        SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
 834        STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
 835
 836        qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
 837                     p_hwfn->hw_info.hw_mode);
 838
 839        /* unpretend */
 840        qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
 841
 842        vf->state = VF_FREE;
 843
 844        return rc;
 845}
 846
 847/**
 848 * @brief qed_iov_config_perm_table - configure the permission
 849 *      zone table.
 850 *      In E4, queue zone permission table size is 320x9. There
 851 *      are 320 VF queues for single engine device (256 for dual
 852 *      engine device), and each entry has the following format:
 853 *      {Valid, VF[7:0]}
 854 * @param p_hwfn
 855 * @param p_ptt
 856 * @param vf
 857 * @param enable
 858 */
 859static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
 860                                      struct qed_ptt *p_ptt,
 861                                      struct qed_vf_info *vf, u8 enable)
 862{
 863        u32 reg_addr, val;
 864        u16 qzone_id = 0;
 865        int qid;
 866
 867        for (qid = 0; qid < vf->num_rxqs; qid++) {
 868                qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
 869                                &qzone_id);
 870
 871                reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
 872                val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
 873                qed_wr(p_hwfn, p_ptt, reg_addr, val);
 874        }
 875}
 876
 877static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
 878                                      struct qed_ptt *p_ptt,
 879                                      struct qed_vf_info *vf)
 880{
 881        /* Reset vf in IGU - interrupts are still disabled */
 882        qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
 883
 884        qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
 885
 886        /* Permission Table */
 887        qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
 888}
 889
 890static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
 891                                   struct qed_ptt *p_ptt,
 892                                   struct qed_vf_info *vf, u16 num_rx_queues)
 893{
 894        struct qed_igu_block *p_block;
 895        struct cau_sb_entry sb_entry;
 896        int qid = 0;
 897        u32 val = 0;
 898
 899        if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
 900                num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
 901        p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
 902
 903        SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
 904        SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
 905        SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
 906
 907        for (qid = 0; qid < num_rx_queues; qid++) {
 908                p_block = qed_get_igu_free_sb(p_hwfn, false);
 909                vf->igu_sbs[qid] = p_block->igu_sb_id;
 910                p_block->status &= ~QED_IGU_STATUS_FREE;
 911                SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
 912
 913                qed_wr(p_hwfn, p_ptt,
 914                       IGU_REG_MAPPING_MEMORY +
 915                       sizeof(u32) * p_block->igu_sb_id, val);
 916
 917                /* Configure igu sb in CAU which were marked valid */
 918                qed_init_cau_sb_entry(p_hwfn, &sb_entry,
 919                                      p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
 920                qed_dmae_host2grc(p_hwfn, p_ptt,
 921                                  (u64)(uintptr_t)&sb_entry,
 922                                  CAU_REG_SB_VAR_MEMORY +
 923                                  p_block->igu_sb_id * sizeof(u64), 2, 0);
 924        }
 925
 926        vf->num_sbs = (u8) num_rx_queues;
 927
 928        return vf->num_sbs;
 929}
 930
 931static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
 932                                    struct qed_ptt *p_ptt,
 933                                    struct qed_vf_info *vf)
 934{
 935        struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
 936        int idx, igu_id;
 937        u32 addr, val;
 938
 939        /* Invalidate igu CAM lines and mark them as free */
 940        for (idx = 0; idx < vf->num_sbs; idx++) {
 941                igu_id = vf->igu_sbs[idx];
 942                addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
 943
 944                val = qed_rd(p_hwfn, p_ptt, addr);
 945                SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
 946                qed_wr(p_hwfn, p_ptt, addr, val);
 947
 948                p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
 949                p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
 950        }
 951
 952        vf->num_sbs = 0;
 953}
 954
 955static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
 956                             u16 vfid,
 957                             struct qed_mcp_link_params *params,
 958                             struct qed_mcp_link_state *link,
 959                             struct qed_mcp_link_capabilities *p_caps)
 960{
 961        struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
 962                                                       vfid,
 963                                                       false);
 964        struct qed_bulletin_content *p_bulletin;
 965
 966        if (!p_vf)
 967                return;
 968
 969        p_bulletin = p_vf->bulletin.p_virt;
 970        p_bulletin->req_autoneg = params->speed.autoneg;
 971        p_bulletin->req_adv_speed = params->speed.advertised_speeds;
 972        p_bulletin->req_forced_speed = params->speed.forced_speed;
 973        p_bulletin->req_autoneg_pause = params->pause.autoneg;
 974        p_bulletin->req_forced_rx = params->pause.forced_rx;
 975        p_bulletin->req_forced_tx = params->pause.forced_tx;
 976        p_bulletin->req_loopback = params->loopback_mode;
 977
 978        p_bulletin->link_up = link->link_up;
 979        p_bulletin->speed = link->speed;
 980        p_bulletin->full_duplex = link->full_duplex;
 981        p_bulletin->autoneg = link->an;
 982        p_bulletin->autoneg_complete = link->an_complete;
 983        p_bulletin->parallel_detection = link->parallel_detection;
 984        p_bulletin->pfc_enabled = link->pfc_enabled;
 985        p_bulletin->partner_adv_speed = link->partner_adv_speed;
 986        p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
 987        p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
 988        p_bulletin->partner_adv_pause = link->partner_adv_pause;
 989        p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
 990
 991        p_bulletin->capability_speed = p_caps->speed_capabilities;
 992}
 993
 994static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
 995                                  struct qed_ptt *p_ptt,
 996                                  struct qed_iov_vf_init_params *p_params)
 997{
 998        struct qed_mcp_link_capabilities link_caps;
 999        struct qed_mcp_link_params link_params;
1000        struct qed_mcp_link_state link_state;
1001        u8 num_of_vf_avaiable_chains = 0;
1002        struct qed_vf_info *vf = NULL;
1003        u16 qid, num_irqs;
1004        int rc = 0;
1005        u32 cids;
1006        u8 i;
1007
1008        vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1009        if (!vf) {
1010                DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
1011                return -EINVAL;
1012        }
1013
1014        if (vf->b_init) {
1015                DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
1016                          p_params->rel_vf_id);
1017                return -EINVAL;
1018        }
1019
1020        /* Perform sanity checking on the requested queue_id */
1021        for (i = 0; i < p_params->num_queues; i++) {
1022                u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1023                u16 max_vf_qzone = min_vf_qzone +
1024                    FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1025
1026                qid = p_params->req_rx_queue[i];
1027                if (qid < min_vf_qzone || qid > max_vf_qzone) {
1028                        DP_NOTICE(p_hwfn,
1029                                  "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1030                                  qid,
1031                                  p_params->rel_vf_id,
1032                                  min_vf_qzone, max_vf_qzone);
1033                        return -EINVAL;
1034                }
1035
1036                qid = p_params->req_tx_queue[i];
1037                if (qid > max_vf_qzone) {
1038                        DP_NOTICE(p_hwfn,
1039                                  "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1040                                  qid, p_params->rel_vf_id, max_vf_qzone);
1041                        return -EINVAL;
1042                }
1043
1044                /* If client *really* wants, Tx qid can be shared with PF */
1045                if (qid < min_vf_qzone)
1046                        DP_VERBOSE(p_hwfn,
1047                                   QED_MSG_IOV,
1048                                   "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1049                                   p_params->rel_vf_id, qid, i);
1050        }
1051
1052        /* Limit number of queues according to number of CIDs */
1053        qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1054        DP_VERBOSE(p_hwfn,
1055                   QED_MSG_IOV,
1056                   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1057                   vf->relative_vf_id, p_params->num_queues, (u16)cids);
1058        num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1059
1060        num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1061                                                             p_ptt,
1062                                                             vf, num_irqs);
1063        if (!num_of_vf_avaiable_chains) {
1064                DP_ERR(p_hwfn, "no available igu sbs\n");
1065                return -ENOMEM;
1066        }
1067
1068        /* Choose queue number and index ranges */
1069        vf->num_rxqs = num_of_vf_avaiable_chains;
1070        vf->num_txqs = num_of_vf_avaiable_chains;
1071
1072        for (i = 0; i < vf->num_rxqs; i++) {
1073                struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1074
1075                p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1076                p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1077
1078                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1079                           "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1080                           vf->relative_vf_id, i, vf->igu_sbs[i],
1081                           p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1082        }
1083
1084        /* Update the link configuration in bulletin */
1085        memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1086               sizeof(link_params));
1087        memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1088        memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1089               sizeof(link_caps));
1090        qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1091                         &link_params, &link_state, &link_caps);
1092
1093        rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1094        if (!rc) {
1095                vf->b_init = true;
1096
1097                if (IS_LEAD_HWFN(p_hwfn))
1098                        p_hwfn->cdev->p_iov_info->num_vfs++;
1099        }
1100
1101        return rc;
1102}
1103
1104static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1105                                     struct qed_ptt *p_ptt, u16 rel_vf_id)
1106{
1107        struct qed_mcp_link_capabilities caps;
1108        struct qed_mcp_link_params params;
1109        struct qed_mcp_link_state link;
1110        struct qed_vf_info *vf = NULL;
1111
1112        vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1113        if (!vf) {
1114                DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1115                return -EINVAL;
1116        }
1117
1118        if (vf->bulletin.p_virt)
1119                memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1120
1121        memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1122
1123        /* Get the link configuration back in bulletin so
1124         * that when VFs are re-enabled they get the actual
1125         * link configuration.
1126         */
1127        memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1128        memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1129        memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1130        qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1131
1132        /* Forget the VF's acquisition message */
1133        memset(&vf->acquire, 0, sizeof(vf->acquire));
1134
1135        /* disablng interrupts and resetting permission table was done during
1136         * vf-close, however, we could get here without going through vf_close
1137         */
1138        /* Disable Interrupts for VF */
1139        qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1140
1141        /* Reset Permission table */
1142        qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1143
1144        vf->num_rxqs = 0;
1145        vf->num_txqs = 0;
1146        qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1147
1148        if (vf->b_init) {
1149                vf->b_init = false;
1150
1151                if (IS_LEAD_HWFN(p_hwfn))
1152                        p_hwfn->cdev->p_iov_info->num_vfs--;
1153        }
1154
1155        return 0;
1156}
1157
1158static bool qed_iov_tlv_supported(u16 tlvtype)
1159{
1160        return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1161}
1162
1163/* place a given tlv on the tlv buffer, continuing current tlv list */
1164void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1165{
1166        struct channel_tlv *tl = (struct channel_tlv *)*offset;
1167
1168        tl->type = type;
1169        tl->length = length;
1170
1171        /* Offset should keep pointing to next TLV (the end of the last) */
1172        *offset += length;
1173
1174        /* Return a pointer to the start of the added tlv */
1175        return *offset - length;
1176}
1177
1178/* list the types and lengths of the tlvs on the buffer */
1179void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1180{
1181        u16 i = 1, total_length = 0;
1182        struct channel_tlv *tlv;
1183
1184        do {
1185                tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1186
1187                /* output tlv */
1188                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1189                           "TLV number %d: type %d, length %d\n",
1190                           i, tlv->type, tlv->length);
1191
1192                if (tlv->type == CHANNEL_TLV_LIST_END)
1193                        return;
1194
1195                /* Validate entry - protect against malicious VFs */
1196                if (!tlv->length) {
1197                        DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1198                        return;
1199                }
1200
1201                total_length += tlv->length;
1202
1203                if (total_length >= sizeof(struct tlv_buffer_size)) {
1204                        DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1205                        return;
1206                }
1207
1208                i++;
1209        } while (1);
1210}
1211
1212static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1213                                  struct qed_ptt *p_ptt,
1214                                  struct qed_vf_info *p_vf,
1215                                  u16 length, u8 status)
1216{
1217        struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1218        struct qed_dmae_params params;
1219        u8 eng_vf_id;
1220
1221        mbx->reply_virt->default_resp.hdr.status = status;
1222
1223        qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1224
1225        eng_vf_id = p_vf->abs_vf_id;
1226
1227        memset(&params, 0, sizeof(struct qed_dmae_params));
1228        params.flags = QED_DMAE_FLAG_VF_DST;
1229        params.dst_vfid = eng_vf_id;
1230
1231        qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1232                           mbx->req_virt->first_tlv.reply_address +
1233                           sizeof(u64),
1234                           (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1235                           &params);
1236
1237        /* Once PF copies the rc to the VF, the latter can continue
1238         * and send an additional message. So we have to make sure the
1239         * channel would be re-set to ready prior to that.
1240         */
1241        REG_WR(p_hwfn,
1242               GTT_BAR0_MAP_REG_USDM_RAM +
1243               USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1244
1245        qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1246                           mbx->req_virt->first_tlv.reply_address,
1247                           sizeof(u64) / 4, &params);
1248}
1249
1250static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1251                                enum qed_iov_vport_update_flag flag)
1252{
1253        switch (flag) {
1254        case QED_IOV_VP_UPDATE_ACTIVATE:
1255                return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1256        case QED_IOV_VP_UPDATE_VLAN_STRIP:
1257                return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1258        case QED_IOV_VP_UPDATE_TX_SWITCH:
1259                return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1260        case QED_IOV_VP_UPDATE_MCAST:
1261                return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1262        case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1263                return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1264        case QED_IOV_VP_UPDATE_RSS:
1265                return CHANNEL_TLV_VPORT_UPDATE_RSS;
1266        case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1267                return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1268        case QED_IOV_VP_UPDATE_SGE_TPA:
1269                return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1270        default:
1271                return 0;
1272        }
1273}
1274
1275static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1276                                            struct qed_vf_info *p_vf,
1277                                            struct qed_iov_vf_mbx *p_mbx,
1278                                            u8 status,
1279                                            u16 tlvs_mask, u16 tlvs_accepted)
1280{
1281        struct pfvf_def_resp_tlv *resp;
1282        u16 size, total_len, i;
1283
1284        memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1285        p_mbx->offset = (u8 *)p_mbx->reply_virt;
1286        size = sizeof(struct pfvf_def_resp_tlv);
1287        total_len = size;
1288
1289        qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1290
1291        /* Prepare response for all extended tlvs if they are found by PF */
1292        for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1293                if (!(tlvs_mask & BIT(i)))
1294                        continue;
1295
1296                resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1297                                   qed_iov_vport_to_tlv(p_hwfn, i), size);
1298
1299                if (tlvs_accepted & BIT(i))
1300                        resp->hdr.status = status;
1301                else
1302                        resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1303
1304                DP_VERBOSE(p_hwfn,
1305                           QED_MSG_IOV,
1306                           "VF[%d] - vport_update response: TLV %d, status %02x\n",
1307                           p_vf->relative_vf_id,
1308                           qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1309
1310                total_len += size;
1311        }
1312
1313        qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1314                    sizeof(struct channel_list_end_tlv));
1315
1316        return total_len;
1317}
1318
1319static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1320                                 struct qed_ptt *p_ptt,
1321                                 struct qed_vf_info *vf_info,
1322                                 u16 type, u16 length, u8 status)
1323{
1324        struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1325
1326        mbx->offset = (u8 *)mbx->reply_virt;
1327
1328        qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1329        qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1330                    sizeof(struct channel_list_end_tlv));
1331
1332        qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1333}
1334
1335static struct
1336qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1337                                               u16 relative_vf_id,
1338                                               bool b_enabled_only)
1339{
1340        struct qed_vf_info *vf = NULL;
1341
1342        vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1343        if (!vf)
1344                return NULL;
1345
1346        return &vf->p_vf_info;
1347}
1348
1349static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1350{
1351        struct qed_public_vf_info *vf_info;
1352
1353        vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1354
1355        if (!vf_info)
1356                return;
1357
1358        /* Clear the VF mac */
1359        eth_zero_addr(vf_info->mac);
1360
1361        vf_info->rx_accept_mode = 0;
1362        vf_info->tx_accept_mode = 0;
1363}
1364
1365static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1366                               struct qed_vf_info *p_vf)
1367{
1368        u32 i, j;
1369
1370        p_vf->vf_bulletin = 0;
1371        p_vf->vport_instance = 0;
1372        p_vf->configured_features = 0;
1373
1374        /* If VF previously requested less resources, go back to default */
1375        p_vf->num_rxqs = p_vf->num_sbs;
1376        p_vf->num_txqs = p_vf->num_sbs;
1377
1378        p_vf->num_active_rxqs = 0;
1379
1380        for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1381                struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1382
1383                for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1384                        if (!p_queue->cids[j].p_cid)
1385                                continue;
1386
1387                        qed_eth_queue_cid_release(p_hwfn,
1388                                                  p_queue->cids[j].p_cid);
1389                        p_queue->cids[j].p_cid = NULL;
1390                }
1391        }
1392
1393        memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1394        memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1395        qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1396}
1397
1398/* Returns either 0, or log(size) */
1399static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1400                                  struct qed_ptt *p_ptt)
1401{
1402        u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1403
1404        if (val)
1405                return val + 11;
1406        return 0;
1407}
1408
1409static void
1410qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1411                                 struct qed_ptt *p_ptt,
1412                                 struct qed_vf_info *p_vf,
1413                                 struct vf_pf_resc_request *p_req,
1414                                 struct pf_vf_resc *p_resp)
1415{
1416        u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1417        u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1418                     qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1419        u32 bar_size;
1420
1421        p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1422
1423        /* If VF didn't bother asking for QIDs than don't bother limiting
1424         * number of CIDs. The VF doesn't care about the number, and this
1425         * has the likely result of causing an additional acquisition.
1426         */
1427        if (!(p_vf->acquire.vfdev_info.capabilities &
1428              VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1429                return;
1430
1431        /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1432         * that would make sure doorbells for all CIDs fall within the bar.
1433         * If it doesn't, make sure regview window is sufficient.
1434         */
1435        if (p_vf->acquire.vfdev_info.capabilities &
1436            VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1437                bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1438                if (bar_size)
1439                        bar_size = 1 << bar_size;
1440
1441                if (p_hwfn->cdev->num_hwfns > 1)
1442                        bar_size /= 2;
1443        } else {
1444                bar_size = PXP_VF_BAR0_DQ_LENGTH;
1445        }
1446
1447        if (bar_size / db_size < 256)
1448                p_resp->num_cids = min_t(u8, p_resp->num_cids,
1449                                         (u8)(bar_size / db_size));
1450}
1451
1452static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1453                                      struct qed_ptt *p_ptt,
1454                                      struct qed_vf_info *p_vf,
1455                                      struct vf_pf_resc_request *p_req,
1456                                      struct pf_vf_resc *p_resp)
1457{
1458        u8 i;
1459
1460        /* Queue related information */
1461        p_resp->num_rxqs = p_vf->num_rxqs;
1462        p_resp->num_txqs = p_vf->num_txqs;
1463        p_resp->num_sbs = p_vf->num_sbs;
1464
1465        for (i = 0; i < p_resp->num_sbs; i++) {
1466                p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1467                p_resp->hw_sbs[i].sb_qid = 0;
1468        }
1469
1470        /* These fields are filled for backward compatibility.
1471         * Unused by modern vfs.
1472         */
1473        for (i = 0; i < p_resp->num_rxqs; i++) {
1474                qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1475                                (u16 *)&p_resp->hw_qid[i]);
1476                p_resp->cid[i] = i;
1477        }
1478
1479        /* Filter related information */
1480        p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1481                                        p_req->num_mac_filters);
1482        p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1483                                         p_req->num_vlan_filters);
1484
1485        qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1486
1487        /* This isn't really needed/enforced, but some legacy VFs might depend
1488         * on the correct filling of this field.
1489         */
1490        p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1491
1492        /* Validate sufficient resources for VF */
1493        if (p_resp->num_rxqs < p_req->num_rxqs ||
1494            p_resp->num_txqs < p_req->num_txqs ||
1495            p_resp->num_sbs < p_req->num_sbs ||
1496            p_resp->num_mac_filters < p_req->num_mac_filters ||
1497            p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1498            p_resp->num_mc_filters < p_req->num_mc_filters ||
1499            p_resp->num_cids < p_req->num_cids) {
1500                DP_VERBOSE(p_hwfn,
1501                           QED_MSG_IOV,
1502                           "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1503                           p_vf->abs_vf_id,
1504                           p_req->num_rxqs,
1505                           p_resp->num_rxqs,
1506                           p_req->num_rxqs,
1507                           p_resp->num_txqs,
1508                           p_req->num_sbs,
1509                           p_resp->num_sbs,
1510                           p_req->num_mac_filters,
1511                           p_resp->num_mac_filters,
1512                           p_req->num_vlan_filters,
1513                           p_resp->num_vlan_filters,
1514                           p_req->num_mc_filters,
1515                           p_resp->num_mc_filters,
1516                           p_req->num_cids, p_resp->num_cids);
1517
1518                /* Some legacy OSes are incapable of correctly handling this
1519                 * failure.
1520                 */
1521                if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1522                     ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1523                    (p_vf->acquire.vfdev_info.os_type ==
1524                     VFPF_ACQUIRE_OS_WINDOWS))
1525                        return PFVF_STATUS_SUCCESS;
1526
1527                return PFVF_STATUS_NO_RESOURCE;
1528        }
1529
1530        return PFVF_STATUS_SUCCESS;
1531}
1532
1533static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1534                                         struct pfvf_stats_info *p_stats)
1535{
1536        p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1537                                  offsetof(struct mstorm_vf_zone,
1538                                           non_trigger.eth_queue_stat);
1539        p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1540        p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1541                                  offsetof(struct ustorm_vf_zone,
1542                                           non_trigger.eth_queue_stat);
1543        p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1544        p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1545                                  offsetof(struct pstorm_vf_zone,
1546                                           non_trigger.eth_queue_stat);
1547        p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1548        p_stats->tstats.address = 0;
1549        p_stats->tstats.len = 0;
1550}
1551
1552static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1553                                   struct qed_ptt *p_ptt,
1554                                   struct qed_vf_info *vf)
1555{
1556        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1557        struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1558        struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1559        struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1560        u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1561        struct pf_vf_resc *resc = &resp->resc;
1562        int rc;
1563
1564        memset(resp, 0, sizeof(*resp));
1565
1566        /* Write the PF version so that VF would know which version
1567         * is supported - might be later overriden. This guarantees that
1568         * VF could recognize legacy PF based on lack of versions in reply.
1569         */
1570        pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1571        pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1572
1573        if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1574                DP_VERBOSE(p_hwfn,
1575                           QED_MSG_IOV,
1576                           "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1577                           vf->abs_vf_id, vf->state);
1578                goto out;
1579        }
1580
1581        /* Validate FW compatibility */
1582        if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1583                if (req->vfdev_info.capabilities &
1584                    VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1585                        struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1586
1587                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1588                                   "VF[%d] is pre-fastpath HSI\n",
1589                                   vf->abs_vf_id);
1590                        p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1591                        p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1592                } else {
1593                        DP_INFO(p_hwfn,
1594                                "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1595                                vf->abs_vf_id,
1596                                req->vfdev_info.eth_fp_hsi_major,
1597                                req->vfdev_info.eth_fp_hsi_minor,
1598                                ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1599
1600                        goto out;
1601                }
1602        }
1603
1604        /* On 100g PFs, prevent old VFs from loading */
1605        if ((p_hwfn->cdev->num_hwfns > 1) &&
1606            !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1607                DP_INFO(p_hwfn,
1608                        "VF[%d] is running an old driver that doesn't support 100g\n",
1609                        vf->abs_vf_id);
1610                goto out;
1611        }
1612
1613        /* Store the acquire message */
1614        memcpy(&vf->acquire, req, sizeof(vf->acquire));
1615
1616        vf->opaque_fid = req->vfdev_info.opaque_fid;
1617
1618        vf->vf_bulletin = req->bulletin_addr;
1619        vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1620                            vf->bulletin.size : req->bulletin_size;
1621
1622        /* fill in pfdev info */
1623        pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1624        pfdev_info->db_size = 0;
1625        pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1626
1627        pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1628                                   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1629        if (p_hwfn->cdev->num_hwfns > 1)
1630                pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1631
1632        /* Share our ability to use multiple queue-ids only with VFs
1633         * that request it.
1634         */
1635        if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1636                pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1637
1638        /* Share the sizes of the bars with VF */
1639        resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1640
1641        qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1642
1643        memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1644
1645        pfdev_info->fw_major = FW_MAJOR_VERSION;
1646        pfdev_info->fw_minor = FW_MINOR_VERSION;
1647        pfdev_info->fw_rev = FW_REVISION_VERSION;
1648        pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1649
1650        /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1651         * this field.
1652         */
1653        pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1654                                         req->vfdev_info.eth_fp_hsi_minor);
1655        pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1656        qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1657
1658        pfdev_info->dev_type = p_hwfn->cdev->type;
1659        pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1660
1661        /* Fill resources available to VF; Make sure there are enough to
1662         * satisfy the VF's request.
1663         */
1664        vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1665                                                  &req->resc_request, resc);
1666        if (vfpf_status != PFVF_STATUS_SUCCESS)
1667                goto out;
1668
1669        /* Start the VF in FW */
1670        rc = qed_sp_vf_start(p_hwfn, vf);
1671        if (rc) {
1672                DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1673                vfpf_status = PFVF_STATUS_FAILURE;
1674                goto out;
1675        }
1676
1677        /* Fill agreed size of bulletin board in response */
1678        resp->bulletin_size = vf->bulletin.size;
1679        qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1680
1681        DP_VERBOSE(p_hwfn,
1682                   QED_MSG_IOV,
1683                   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1684                   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1685                   vf->abs_vf_id,
1686                   resp->pfdev_info.chip_num,
1687                   resp->pfdev_info.db_size,
1688                   resp->pfdev_info.indices_per_sb,
1689                   resp->pfdev_info.capabilities,
1690                   resc->num_rxqs,
1691                   resc->num_txqs,
1692                   resc->num_sbs,
1693                   resc->num_mac_filters,
1694                   resc->num_vlan_filters);
1695        vf->state = VF_ACQUIRED;
1696
1697        /* Prepare Response */
1698out:
1699        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1700                             sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1701}
1702
1703static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1704                                  struct qed_vf_info *p_vf, bool val)
1705{
1706        struct qed_sp_vport_update_params params;
1707        int rc;
1708
1709        if (val == p_vf->spoof_chk) {
1710                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1711                           "Spoofchk value[%d] is already configured\n", val);
1712                return 0;
1713        }
1714
1715        memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1716        params.opaque_fid = p_vf->opaque_fid;
1717        params.vport_id = p_vf->vport_id;
1718        params.update_anti_spoofing_en_flg = 1;
1719        params.anti_spoofing_en = val;
1720
1721        rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1722        if (!rc) {
1723                p_vf->spoof_chk = val;
1724                p_vf->req_spoofchk_val = p_vf->spoof_chk;
1725                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1726                           "Spoofchk val[%d] configured\n", val);
1727        } else {
1728                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1729                           "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1730                           val, p_vf->relative_vf_id);
1731        }
1732
1733        return rc;
1734}
1735
1736static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1737                                            struct qed_vf_info *p_vf)
1738{
1739        struct qed_filter_ucast filter;
1740        int rc = 0;
1741        int i;
1742
1743        memset(&filter, 0, sizeof(filter));
1744        filter.is_rx_filter = 1;
1745        filter.is_tx_filter = 1;
1746        filter.vport_to_add_to = p_vf->vport_id;
1747        filter.opcode = QED_FILTER_ADD;
1748
1749        /* Reconfigure vlans */
1750        for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1751                if (!p_vf->shadow_config.vlans[i].used)
1752                        continue;
1753
1754                filter.type = QED_FILTER_VLAN;
1755                filter.vlan = p_vf->shadow_config.vlans[i].vid;
1756                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1757                           "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1758                           filter.vlan, p_vf->relative_vf_id);
1759                rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1760                                             &filter, QED_SPQ_MODE_CB, NULL);
1761                if (rc) {
1762                        DP_NOTICE(p_hwfn,
1763                                  "Failed to configure VLAN [%04x] to VF [%04x]\n",
1764                                  filter.vlan, p_vf->relative_vf_id);
1765                        break;
1766                }
1767        }
1768
1769        return rc;
1770}
1771
1772static int
1773qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1774                                   struct qed_vf_info *p_vf, u64 events)
1775{
1776        int rc = 0;
1777
1778        if ((events & BIT(VLAN_ADDR_FORCED)) &&
1779            !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1780                rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1781
1782        return rc;
1783}
1784
1785static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1786                                          struct qed_vf_info *p_vf, u64 events)
1787{
1788        int rc = 0;
1789        struct qed_filter_ucast filter;
1790
1791        if (!p_vf->vport_instance)
1792                return -EINVAL;
1793
1794        if ((events & BIT(MAC_ADDR_FORCED)) ||
1795            p_vf->p_vf_info.is_trusted_configured) {
1796                /* Since there's no way [currently] of removing the MAC,
1797                 * we can always assume this means we need to force it.
1798                 */
1799                memset(&filter, 0, sizeof(filter));
1800                filter.type = QED_FILTER_MAC;
1801                filter.opcode = QED_FILTER_REPLACE;
1802                filter.is_rx_filter = 1;
1803                filter.is_tx_filter = 1;
1804                filter.vport_to_add_to = p_vf->vport_id;
1805                ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1806
1807                rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1808                                             &filter, QED_SPQ_MODE_CB, NULL);
1809                if (rc) {
1810                        DP_NOTICE(p_hwfn,
1811                                  "PF failed to configure MAC for VF\n");
1812                        return rc;
1813                }
1814                if (p_vf->p_vf_info.is_trusted_configured)
1815                        p_vf->configured_features |=
1816                                BIT(VFPF_BULLETIN_MAC_ADDR);
1817                else
1818                        p_vf->configured_features |=
1819                                BIT(MAC_ADDR_FORCED);
1820        }
1821
1822        if (events & BIT(VLAN_ADDR_FORCED)) {
1823                struct qed_sp_vport_update_params vport_update;
1824                u8 removal;
1825                int i;
1826
1827                memset(&filter, 0, sizeof(filter));
1828                filter.type = QED_FILTER_VLAN;
1829                filter.is_rx_filter = 1;
1830                filter.is_tx_filter = 1;
1831                filter.vport_to_add_to = p_vf->vport_id;
1832                filter.vlan = p_vf->bulletin.p_virt->pvid;
1833                filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1834                                              QED_FILTER_FLUSH;
1835
1836                /* Send the ramrod */
1837                rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1838                                             &filter, QED_SPQ_MODE_CB, NULL);
1839                if (rc) {
1840                        DP_NOTICE(p_hwfn,
1841                                  "PF failed to configure VLAN for VF\n");
1842                        return rc;
1843                }
1844
1845                /* Update the default-vlan & silent vlan stripping */
1846                memset(&vport_update, 0, sizeof(vport_update));
1847                vport_update.opaque_fid = p_vf->opaque_fid;
1848                vport_update.vport_id = p_vf->vport_id;
1849                vport_update.update_default_vlan_enable_flg = 1;
1850                vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1851                vport_update.update_default_vlan_flg = 1;
1852                vport_update.default_vlan = filter.vlan;
1853
1854                vport_update.update_inner_vlan_removal_flg = 1;
1855                removal = filter.vlan ? 1
1856                                      : p_vf->shadow_config.inner_vlan_removal;
1857                vport_update.inner_vlan_removal_flg = removal;
1858                vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1859                rc = qed_sp_vport_update(p_hwfn,
1860                                         &vport_update,
1861                                         QED_SPQ_MODE_EBLOCK, NULL);
1862                if (rc) {
1863                        DP_NOTICE(p_hwfn,
1864                                  "PF failed to configure VF vport for vlan\n");
1865                        return rc;
1866                }
1867
1868                /* Update all the Rx queues */
1869                for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1870                        struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1871                        struct qed_queue_cid *p_cid = NULL;
1872
1873                        /* There can be at most 1 Rx queue on qzone. Find it */
1874                        p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1875                        if (!p_cid)
1876                                continue;
1877
1878                        rc = qed_sp_eth_rx_queues_update(p_hwfn,
1879                                                         (void **)&p_cid,
1880                                                         1, 0, 1,
1881                                                         QED_SPQ_MODE_EBLOCK,
1882                                                         NULL);
1883                        if (rc) {
1884                                DP_NOTICE(p_hwfn,
1885                                          "Failed to send Rx update fo queue[0x%04x]\n",
1886                                          p_cid->rel.queue_id);
1887                                return rc;
1888                        }
1889                }
1890
1891                if (filter.vlan)
1892                        p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1893                else
1894                        p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1895        }
1896
1897        /* If forced features are terminated, we need to configure the shadow
1898         * configuration back again.
1899         */
1900        if (events)
1901                qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1902
1903        return rc;
1904}
1905
1906static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1907                                       struct qed_ptt *p_ptt,
1908                                       struct qed_vf_info *vf)
1909{
1910        struct qed_sp_vport_start_params params = { 0 };
1911        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1912        struct vfpf_vport_start_tlv *start;
1913        u8 status = PFVF_STATUS_SUCCESS;
1914        struct qed_vf_info *vf_info;
1915        u64 *p_bitmap;
1916        int sb_id;
1917        int rc;
1918
1919        vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1920        if (!vf_info) {
1921                DP_NOTICE(p_hwfn->cdev,
1922                          "Failed to get VF info, invalid vfid [%d]\n",
1923                          vf->relative_vf_id);
1924                return;
1925        }
1926
1927        vf->state = VF_ENABLED;
1928        start = &mbx->req_virt->start_vport;
1929
1930        qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1931
1932        /* Initialize Status block in CAU */
1933        for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1934                if (!start->sb_addr[sb_id]) {
1935                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1936                                   "VF[%d] did not fill the address of SB %d\n",
1937                                   vf->relative_vf_id, sb_id);
1938                        break;
1939                }
1940
1941                qed_int_cau_conf_sb(p_hwfn, p_ptt,
1942                                    start->sb_addr[sb_id],
1943                                    vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1944        }
1945
1946        vf->mtu = start->mtu;
1947        vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1948
1949        /* Take into consideration configuration forced by hypervisor;
1950         * If none is configured, use the supplied VF values [for old
1951         * vfs that would still be fine, since they passed '0' as padding].
1952         */
1953        p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1954        if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1955                u8 vf_req = start->only_untagged;
1956
1957                vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1958                *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1959        }
1960
1961        params.tpa_mode = start->tpa_mode;
1962        params.remove_inner_vlan = start->inner_vlan_removal;
1963        params.tx_switching = true;
1964
1965        params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1966        params.drop_ttl0 = false;
1967        params.concrete_fid = vf->concrete_fid;
1968        params.opaque_fid = vf->opaque_fid;
1969        params.vport_id = vf->vport_id;
1970        params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1971        params.mtu = vf->mtu;
1972
1973        /* Non trusted VFs should enable control frame filtering */
1974        params.check_mac = !vf->p_vf_info.is_trusted_configured;
1975
1976        rc = qed_sp_eth_vport_start(p_hwfn, &params);
1977        if (rc) {
1978                DP_ERR(p_hwfn,
1979                       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1980                status = PFVF_STATUS_FAILURE;
1981        } else {
1982                vf->vport_instance++;
1983
1984                /* Force configuration if needed on the newly opened vport */
1985                qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1986
1987                __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1988        }
1989        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1990                             sizeof(struct pfvf_def_resp_tlv), status);
1991}
1992
1993static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1994                                      struct qed_ptt *p_ptt,
1995                                      struct qed_vf_info *vf)
1996{
1997        u8 status = PFVF_STATUS_SUCCESS;
1998        int rc;
1999
2000        vf->vport_instance--;
2001        vf->spoof_chk = false;
2002
2003        if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
2004            (qed_iov_validate_active_txq(p_hwfn, vf))) {
2005                vf->b_malicious = true;
2006                DP_NOTICE(p_hwfn,
2007                          "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
2008                          vf->abs_vf_id);
2009                status = PFVF_STATUS_MALICIOUS;
2010                goto out;
2011        }
2012
2013        rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2014        if (rc) {
2015                DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
2016                       rc);
2017                status = PFVF_STATUS_FAILURE;
2018        }
2019
2020        /* Forget the configuration on the vport */
2021        vf->configured_features = 0;
2022        memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2023
2024out:
2025        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2026                             sizeof(struct pfvf_def_resp_tlv), status);
2027}
2028
2029static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2030                                          struct qed_ptt *p_ptt,
2031                                          struct qed_vf_info *vf,
2032                                          u8 status, bool b_legacy)
2033{
2034        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2035        struct pfvf_start_queue_resp_tlv *p_tlv;
2036        struct vfpf_start_rxq_tlv *req;
2037        u16 length;
2038
2039        mbx->offset = (u8 *)mbx->reply_virt;
2040
2041        /* Taking a bigger struct instead of adding a TLV to list was a
2042         * mistake, but one which we're now stuck with, as some older
2043         * clients assume the size of the previous response.
2044         */
2045        if (!b_legacy)
2046                length = sizeof(*p_tlv);
2047        else
2048                length = sizeof(struct pfvf_def_resp_tlv);
2049
2050        p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2051                            length);
2052        qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2053                    sizeof(struct channel_list_end_tlv));
2054
2055        /* Update the TLV with the response */
2056        if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2057                req = &mbx->req_virt->start_rxq;
2058                p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2059                                offsetof(struct mstorm_vf_zone,
2060                                         non_trigger.eth_rx_queue_producers) +
2061                                sizeof(struct eth_rx_prod_data) * req->rx_qid;
2062        }
2063
2064        qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2065}
2066
2067static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2068                             struct qed_vf_info *p_vf, bool b_is_tx)
2069{
2070        struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2071        struct vfpf_qid_tlv *p_qid_tlv;
2072
2073        /* Search for the qid if the VF published its going to provide it */
2074        if (!(p_vf->acquire.vfdev_info.capabilities &
2075              VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2076                if (b_is_tx)
2077                        return QED_IOV_LEGACY_QID_TX;
2078                else
2079                        return QED_IOV_LEGACY_QID_RX;
2080        }
2081
2082        p_qid_tlv = (struct vfpf_qid_tlv *)
2083                    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2084                                             CHANNEL_TLV_QID);
2085        if (!p_qid_tlv) {
2086                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2087                           "VF[%2x]: Failed to provide qid\n",
2088                           p_vf->relative_vf_id);
2089
2090                return QED_IOV_QID_INVALID;
2091        }
2092
2093        if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2094                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2095                           "VF[%02x]: Provided qid out-of-bounds %02x\n",
2096                           p_vf->relative_vf_id, p_qid_tlv->qid);
2097                return QED_IOV_QID_INVALID;
2098        }
2099
2100        return p_qid_tlv->qid;
2101}
2102
2103static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2104                                     struct qed_ptt *p_ptt,
2105                                     struct qed_vf_info *vf)
2106{
2107        struct qed_queue_start_common_params params;
2108        struct qed_queue_cid_vf_params vf_params;
2109        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2110        u8 status = PFVF_STATUS_NO_RESOURCE;
2111        u8 qid_usage_idx, vf_legacy = 0;
2112        struct vfpf_start_rxq_tlv *req;
2113        struct qed_vf_queue *p_queue;
2114        struct qed_queue_cid *p_cid;
2115        struct qed_sb_info sb_dummy;
2116        int rc;
2117
2118        req = &mbx->req_virt->start_rxq;
2119
2120        if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2121                                  QED_IOV_VALIDATE_Q_DISABLE) ||
2122            !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2123                goto out;
2124
2125        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2126        if (qid_usage_idx == QED_IOV_QID_INVALID)
2127                goto out;
2128
2129        p_queue = &vf->vf_queues[req->rx_qid];
2130        if (p_queue->cids[qid_usage_idx].p_cid)
2131                goto out;
2132
2133        vf_legacy = qed_vf_calculate_legacy(vf);
2134
2135        /* Acquire a new queue-cid */
2136        memset(&params, 0, sizeof(params));
2137        params.queue_id = p_queue->fw_rx_qid;
2138        params.vport_id = vf->vport_id;
2139        params.stats_id = vf->abs_vf_id + 0x10;
2140        /* Since IGU index is passed via sb_info, construct a dummy one */
2141        memset(&sb_dummy, 0, sizeof(sb_dummy));
2142        sb_dummy.igu_sb_id = req->hw_sb;
2143        params.p_sb = &sb_dummy;
2144        params.sb_idx = req->sb_index;
2145
2146        memset(&vf_params, 0, sizeof(vf_params));
2147        vf_params.vfid = vf->relative_vf_id;
2148        vf_params.vf_qid = (u8)req->rx_qid;
2149        vf_params.vf_legacy = vf_legacy;
2150        vf_params.qid_usage_idx = qid_usage_idx;
2151        p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2152                                     &params, true, &vf_params);
2153        if (!p_cid)
2154                goto out;
2155
2156        /* Legacy VFs have their Producers in a different location, which they
2157         * calculate on their own and clean the producer prior to this.
2158         */
2159        if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2160                REG_WR(p_hwfn,
2161                       GTT_BAR0_MAP_REG_MSDM_RAM +
2162                       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2163                       0);
2164
2165        rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2166                                      req->bd_max_bytes,
2167                                      req->rxq_addr,
2168                                      req->cqe_pbl_addr, req->cqe_pbl_size);
2169        if (rc) {
2170                status = PFVF_STATUS_FAILURE;
2171                qed_eth_queue_cid_release(p_hwfn, p_cid);
2172        } else {
2173                p_queue->cids[qid_usage_idx].p_cid = p_cid;
2174                p_queue->cids[qid_usage_idx].b_is_tx = false;
2175                status = PFVF_STATUS_SUCCESS;
2176                vf->num_active_rxqs++;
2177        }
2178
2179out:
2180        qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2181                                      !!(vf_legacy &
2182                                         QED_QCID_LEGACY_VF_RX_PROD));
2183}
2184
2185static void
2186qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2187                               struct qed_tunnel_info *p_tun,
2188                               u16 tunn_feature_mask)
2189{
2190        p_resp->tunn_feature_mask = tunn_feature_mask;
2191        p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2192        p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2193        p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2194        p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2195        p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2196        p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2197        p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2198        p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2199        p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2200        p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2201        p_resp->geneve_udp_port = p_tun->geneve_port.port;
2202        p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2203}
2204
2205static void
2206__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2207                              struct qed_tunn_update_type *p_tun,
2208                              enum qed_tunn_mode mask, u8 tun_cls)
2209{
2210        if (p_req->tun_mode_update_mask & BIT(mask)) {
2211                p_tun->b_update_mode = true;
2212
2213                if (p_req->tunn_mode & BIT(mask))
2214                        p_tun->b_mode_enabled = true;
2215        }
2216
2217        p_tun->tun_cls = tun_cls;
2218}
2219
2220static void
2221qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2222                            struct qed_tunn_update_type *p_tun,
2223                            struct qed_tunn_update_udp_port *p_port,
2224                            enum qed_tunn_mode mask,
2225                            u8 tun_cls, u8 update_port, u16 port)
2226{
2227        if (update_port) {
2228                p_port->b_update_port = true;
2229                p_port->port = port;
2230        }
2231
2232        __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2233}
2234
2235static bool
2236qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2237{
2238        bool b_update_requested = false;
2239
2240        if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2241            p_req->update_geneve_port || p_req->update_vxlan_port)
2242                b_update_requested = true;
2243
2244        return b_update_requested;
2245}
2246
2247static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2248{
2249        if (tun->b_update_mode && !tun->b_mode_enabled) {
2250                tun->b_update_mode = false;
2251                *rc = -EINVAL;
2252        }
2253}
2254
2255static int
2256qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2257                                   u16 *tun_features, bool *update,
2258                                   struct qed_tunnel_info *tun_src)
2259{
2260        struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2261        struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2262        u16 bultn_vxlan_port, bultn_geneve_port;
2263        void *cookie = p_hwfn->cdev->ops_cookie;
2264        int i, rc = 0;
2265
2266        *tun_features = p_hwfn->cdev->tunn_feature_mask;
2267        bultn_vxlan_port = tun->vxlan_port.port;
2268        bultn_geneve_port = tun->geneve_port.port;
2269        qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2270        qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2271        qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2272        qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2273        qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2274
2275        if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2276            (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2277             tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2278             tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2279             tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2280             tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2281                tun_src->b_update_rx_cls = false;
2282                tun_src->b_update_tx_cls = false;
2283                rc = -EINVAL;
2284        }
2285
2286        if (tun_src->vxlan_port.b_update_port) {
2287                if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2288                        tun_src->vxlan_port.b_update_port = false;
2289                } else {
2290                        *update = true;
2291                        bultn_vxlan_port = tun_src->vxlan_port.port;
2292                }
2293        }
2294
2295        if (tun_src->geneve_port.b_update_port) {
2296                if (tun_src->geneve_port.port == tun->geneve_port.port) {
2297                        tun_src->geneve_port.b_update_port = false;
2298                } else {
2299                        *update = true;
2300                        bultn_geneve_port = tun_src->geneve_port.port;
2301                }
2302        }
2303
2304        qed_for_each_vf(p_hwfn, i) {
2305                qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2306                                               bultn_geneve_port);
2307        }
2308
2309        qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2310        ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2311
2312        return rc;
2313}
2314
2315static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2316                                             struct qed_ptt *p_ptt,
2317                                             struct qed_vf_info *p_vf)
2318{
2319        struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2320        struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2321        struct pfvf_update_tunn_param_tlv *p_resp;
2322        struct vfpf_update_tunn_param_tlv *p_req;
2323        u8 status = PFVF_STATUS_SUCCESS;
2324        bool b_update_required = false;
2325        struct qed_tunnel_info tunn;
2326        u16 tunn_feature_mask = 0;
2327        int i, rc = 0;
2328
2329        mbx->offset = (u8 *)mbx->reply_virt;
2330
2331        memset(&tunn, 0, sizeof(tunn));
2332        p_req = &mbx->req_virt->tunn_param_update;
2333
2334        if (!qed_iov_pf_validate_tunn_param(p_req)) {
2335                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2336                           "No tunnel update requested by VF\n");
2337                status = PFVF_STATUS_FAILURE;
2338                goto send_resp;
2339        }
2340
2341        tunn.b_update_rx_cls = p_req->update_tun_cls;
2342        tunn.b_update_tx_cls = p_req->update_tun_cls;
2343
2344        qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2345                                    QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2346                                    p_req->update_vxlan_port,
2347                                    p_req->vxlan_port);
2348        qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2349                                    QED_MODE_L2GENEVE_TUNN,
2350                                    p_req->l2geneve_clss,
2351                                    p_req->update_geneve_port,
2352                                    p_req->geneve_port);
2353        __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2354                                      QED_MODE_IPGENEVE_TUNN,
2355                                      p_req->ipgeneve_clss);
2356        __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2357                                      QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2358        __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2359                                      QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2360
2361        /* If PF modifies VF's req then it should
2362         * still return an error in case of partial configuration
2363         * or modified configuration as opposed to requested one.
2364         */
2365        rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2366                                                &b_update_required, &tunn);
2367
2368        if (rc)
2369                status = PFVF_STATUS_FAILURE;
2370
2371        /* If QED client is willing to update anything ? */
2372        if (b_update_required) {
2373                u16 geneve_port;
2374
2375                rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2376                                               QED_SPQ_MODE_EBLOCK, NULL);
2377                if (rc)
2378                        status = PFVF_STATUS_FAILURE;
2379
2380                geneve_port = p_tun->geneve_port.port;
2381                qed_for_each_vf(p_hwfn, i) {
2382                        qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2383                                                       p_tun->vxlan_port.port,
2384                                                       geneve_port);
2385                }
2386        }
2387
2388send_resp:
2389        p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2390                             CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2391
2392        qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2393        qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2394                    sizeof(struct channel_list_end_tlv));
2395
2396        qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2397}
2398
2399static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2400                                          struct qed_ptt *p_ptt,
2401                                          struct qed_vf_info *p_vf,
2402                                          u32 cid, u8 status)
2403{
2404        struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2405        struct pfvf_start_queue_resp_tlv *p_tlv;
2406        bool b_legacy = false;
2407        u16 length;
2408
2409        mbx->offset = (u8 *)mbx->reply_virt;
2410
2411        /* Taking a bigger struct instead of adding a TLV to list was a
2412         * mistake, but one which we're now stuck with, as some older
2413         * clients assume the size of the previous response.
2414         */
2415        if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2416            ETH_HSI_VER_NO_PKT_LEN_TUNN)
2417                b_legacy = true;
2418
2419        if (!b_legacy)
2420                length = sizeof(*p_tlv);
2421        else
2422                length = sizeof(struct pfvf_def_resp_tlv);
2423
2424        p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2425                            length);
2426        qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2427                    sizeof(struct channel_list_end_tlv));
2428
2429        /* Update the TLV with the response */
2430        if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2431                p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2432
2433        qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2434}
2435
2436static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2437                                     struct qed_ptt *p_ptt,
2438                                     struct qed_vf_info *vf)
2439{
2440        struct qed_queue_start_common_params params;
2441        struct qed_queue_cid_vf_params vf_params;
2442        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2443        u8 status = PFVF_STATUS_NO_RESOURCE;
2444        struct vfpf_start_txq_tlv *req;
2445        struct qed_vf_queue *p_queue;
2446        struct qed_queue_cid *p_cid;
2447        struct qed_sb_info sb_dummy;
2448        u8 qid_usage_idx, vf_legacy;
2449        u32 cid = 0;
2450        int rc;
2451        u16 pq;
2452
2453        memset(&params, 0, sizeof(params));
2454        req = &mbx->req_virt->start_txq;
2455
2456        if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2457                                  QED_IOV_VALIDATE_Q_NA) ||
2458            !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2459                goto out;
2460
2461        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2462        if (qid_usage_idx == QED_IOV_QID_INVALID)
2463                goto out;
2464
2465        p_queue = &vf->vf_queues[req->tx_qid];
2466        if (p_queue->cids[qid_usage_idx].p_cid)
2467                goto out;
2468
2469        vf_legacy = qed_vf_calculate_legacy(vf);
2470
2471        /* Acquire a new queue-cid */
2472        params.queue_id = p_queue->fw_tx_qid;
2473        params.vport_id = vf->vport_id;
2474        params.stats_id = vf->abs_vf_id + 0x10;
2475
2476        /* Since IGU index is passed via sb_info, construct a dummy one */
2477        memset(&sb_dummy, 0, sizeof(sb_dummy));
2478        sb_dummy.igu_sb_id = req->hw_sb;
2479        params.p_sb = &sb_dummy;
2480        params.sb_idx = req->sb_index;
2481
2482        memset(&vf_params, 0, sizeof(vf_params));
2483        vf_params.vfid = vf->relative_vf_id;
2484        vf_params.vf_qid = (u8)req->tx_qid;
2485        vf_params.vf_legacy = vf_legacy;
2486        vf_params.qid_usage_idx = qid_usage_idx;
2487
2488        p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2489                                     &params, false, &vf_params);
2490        if (!p_cid)
2491                goto out;
2492
2493        pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2494        rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2495                                      req->pbl_addr, req->pbl_size, pq);
2496        if (rc) {
2497                status = PFVF_STATUS_FAILURE;
2498                qed_eth_queue_cid_release(p_hwfn, p_cid);
2499        } else {
2500                status = PFVF_STATUS_SUCCESS;
2501                p_queue->cids[qid_usage_idx].p_cid = p_cid;
2502                p_queue->cids[qid_usage_idx].b_is_tx = true;
2503                cid = p_cid->cid;
2504        }
2505
2506out:
2507        qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2508}
2509
2510static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2511                                struct qed_vf_info *vf,
2512                                u16 rxq_id,
2513                                u8 qid_usage_idx, bool cqe_completion)
2514{
2515        struct qed_vf_queue *p_queue;
2516        int rc = 0;
2517
2518        if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2519                DP_VERBOSE(p_hwfn,
2520                           QED_MSG_IOV,
2521                           "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2522                           vf->relative_vf_id, rxq_id, qid_usage_idx);
2523                return -EINVAL;
2524        }
2525
2526        p_queue = &vf->vf_queues[rxq_id];
2527
2528        /* We've validated the index and the existence of the active RXQ -
2529         * now we need to make sure that it's using the correct qid.
2530         */
2531        if (!p_queue->cids[qid_usage_idx].p_cid ||
2532            p_queue->cids[qid_usage_idx].b_is_tx) {
2533                struct qed_queue_cid *p_cid;
2534
2535                p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2536                DP_VERBOSE(p_hwfn,
2537                           QED_MSG_IOV,
2538                           "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2539                           vf->relative_vf_id,
2540                           rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2541                return -EINVAL;
2542        }
2543
2544        /* Now that we know we have a valid Rx-queue - close it */
2545        rc = qed_eth_rx_queue_stop(p_hwfn,
2546                                   p_queue->cids[qid_usage_idx].p_cid,
2547                                   false, cqe_completion);
2548        if (rc)
2549                return rc;
2550
2551        p_queue->cids[qid_usage_idx].p_cid = NULL;
2552        vf->num_active_rxqs--;
2553
2554        return 0;
2555}
2556
2557static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2558                                struct qed_vf_info *vf,
2559                                u16 txq_id, u8 qid_usage_idx)
2560{
2561        struct qed_vf_queue *p_queue;
2562        int rc = 0;
2563
2564        if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2565                return -EINVAL;
2566
2567        p_queue = &vf->vf_queues[txq_id];
2568        if (!p_queue->cids[qid_usage_idx].p_cid ||
2569            !p_queue->cids[qid_usage_idx].b_is_tx)
2570                return -EINVAL;
2571
2572        rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2573        if (rc)
2574                return rc;
2575
2576        p_queue->cids[qid_usage_idx].p_cid = NULL;
2577        return 0;
2578}
2579
2580static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2581                                     struct qed_ptt *p_ptt,
2582                                     struct qed_vf_info *vf)
2583{
2584        u16 length = sizeof(struct pfvf_def_resp_tlv);
2585        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2586        u8 status = PFVF_STATUS_FAILURE;
2587        struct vfpf_stop_rxqs_tlv *req;
2588        u8 qid_usage_idx;
2589        int rc;
2590
2591        /* There has never been an official driver that used this interface
2592         * for stopping multiple queues, and it is now considered deprecated.
2593         * Validate this isn't used here.
2594         */
2595        req = &mbx->req_virt->stop_rxqs;
2596        if (req->num_rxqs != 1) {
2597                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2598                           "Odd; VF[%d] tried stopping multiple Rx queues\n",
2599                           vf->relative_vf_id);
2600                status = PFVF_STATUS_NOT_SUPPORTED;
2601                goto out;
2602        }
2603
2604        /* Find which qid-index is associated with the queue */
2605        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2606        if (qid_usage_idx == QED_IOV_QID_INVALID)
2607                goto out;
2608
2609        rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2610                                  qid_usage_idx, req->cqe_completion);
2611        if (!rc)
2612                status = PFVF_STATUS_SUCCESS;
2613out:
2614        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2615                             length, status);
2616}
2617
2618static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2619                                     struct qed_ptt *p_ptt,
2620                                     struct qed_vf_info *vf)
2621{
2622        u16 length = sizeof(struct pfvf_def_resp_tlv);
2623        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2624        u8 status = PFVF_STATUS_FAILURE;
2625        struct vfpf_stop_txqs_tlv *req;
2626        u8 qid_usage_idx;
2627        int rc;
2628
2629        /* There has never been an official driver that used this interface
2630         * for stopping multiple queues, and it is now considered deprecated.
2631         * Validate this isn't used here.
2632         */
2633        req = &mbx->req_virt->stop_txqs;
2634        if (req->num_txqs != 1) {
2635                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2636                           "Odd; VF[%d] tried stopping multiple Tx queues\n",
2637                           vf->relative_vf_id);
2638                status = PFVF_STATUS_NOT_SUPPORTED;
2639                goto out;
2640        }
2641
2642        /* Find which qid-index is associated with the queue */
2643        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2644        if (qid_usage_idx == QED_IOV_QID_INVALID)
2645                goto out;
2646
2647        rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2648        if (!rc)
2649                status = PFVF_STATUS_SUCCESS;
2650
2651out:
2652        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2653                             length, status);
2654}
2655
2656static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2657                                       struct qed_ptt *p_ptt,
2658                                       struct qed_vf_info *vf)
2659{
2660        struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2661        u16 length = sizeof(struct pfvf_def_resp_tlv);
2662        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2663        struct vfpf_update_rxq_tlv *req;
2664        u8 status = PFVF_STATUS_FAILURE;
2665        u8 complete_event_flg;
2666        u8 complete_cqe_flg;
2667        u8 qid_usage_idx;
2668        int rc;
2669        u8 i;
2670
2671        req = &mbx->req_virt->update_rxq;
2672        complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2673        complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2674
2675        qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2676        if (qid_usage_idx == QED_IOV_QID_INVALID)
2677                goto out;
2678
2679        /* There shouldn't exist a VF that uses queue-qids yet uses this
2680         * API with multiple Rx queues. Validate this.
2681         */
2682        if ((vf->acquire.vfdev_info.capabilities &
2683             VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2684                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2685                           "VF[%d] supports QIDs but sends multiple queues\n",
2686                           vf->relative_vf_id);
2687                goto out;
2688        }
2689
2690        /* Validate inputs - for the legacy case this is still true since
2691         * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2692         */
2693        for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2694                if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2695                                          QED_IOV_VALIDATE_Q_NA) ||
2696                    !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2697                    vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2698                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2699                                   "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2700                                   vf->relative_vf_id, req->rx_qid,
2701                                   req->num_rxqs);
2702                        goto out;
2703                }
2704        }
2705
2706        /* Prepare the handlers */
2707        for (i = 0; i < req->num_rxqs; i++) {
2708                u16 qid = req->rx_qid + i;
2709
2710                handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2711        }
2712
2713        rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2714                                         req->num_rxqs,
2715                                         complete_cqe_flg,
2716                                         complete_event_flg,
2717                                         QED_SPQ_MODE_EBLOCK, NULL);
2718        if (rc)
2719                goto out;
2720
2721        status = PFVF_STATUS_SUCCESS;
2722out:
2723        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2724                             length, status);
2725}
2726
2727void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2728                               void *p_tlvs_list, u16 req_type)
2729{
2730        struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2731        int len = 0;
2732
2733        do {
2734                if (!p_tlv->length) {
2735                        DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2736                        return NULL;
2737                }
2738
2739                if (p_tlv->type == req_type) {
2740                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2741                                   "Extended tlv type %d, length %d found\n",
2742                                   p_tlv->type, p_tlv->length);
2743                        return p_tlv;
2744                }
2745
2746                len += p_tlv->length;
2747                p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2748
2749                if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2750                        DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2751                        return NULL;
2752                }
2753        } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2754
2755        return NULL;
2756}
2757
2758static void
2759qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2760                            struct qed_sp_vport_update_params *p_data,
2761                            struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2762{
2763        struct vfpf_vport_update_activate_tlv *p_act_tlv;
2764        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2765
2766        p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2767                    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2768        if (!p_act_tlv)
2769                return;
2770
2771        p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2772        p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2773        p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2774        p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2775        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2776}
2777
2778static void
2779qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2780                             struct qed_sp_vport_update_params *p_data,
2781                             struct qed_vf_info *p_vf,
2782                             struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2783{
2784        struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2785        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2786
2787        p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2788                     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2789        if (!p_vlan_tlv)
2790                return;
2791
2792        p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2793
2794        /* Ignore the VF request if we're forcing a vlan */
2795        if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2796                p_data->update_inner_vlan_removal_flg = 1;
2797                p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2798        }
2799
2800        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2801}
2802
2803static void
2804qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2805                            struct qed_sp_vport_update_params *p_data,
2806                            struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2807{
2808        struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2809        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2810
2811        p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2812                          qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2813                                                   tlv);
2814        if (!p_tx_switch_tlv)
2815                return;
2816
2817        p_data->update_tx_switching_flg = 1;
2818        p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2819        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2820}
2821
2822static void
2823qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2824                                  struct qed_sp_vport_update_params *p_data,
2825                                  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2826{
2827        struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2828        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2829
2830        p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2831            qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2832        if (!p_mcast_tlv)
2833                return;
2834
2835        p_data->update_approx_mcast_flg = 1;
2836        memcpy(p_data->bins, p_mcast_tlv->bins,
2837               sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2838        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2839}
2840
2841static void
2842qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2843                              struct qed_sp_vport_update_params *p_data,
2844                              struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2845{
2846        struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2847        struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2848        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2849
2850        p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2851            qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2852        if (!p_accept_tlv)
2853                return;
2854
2855        p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2856        p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2857        p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2858        p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2859        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2860}
2861
2862static void
2863qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2864                                  struct qed_sp_vport_update_params *p_data,
2865                                  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2866{
2867        struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2868        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2869
2870        p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2871                            qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2872                                                     tlv);
2873        if (!p_accept_any_vlan)
2874                return;
2875
2876        p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2877        p_data->update_accept_any_vlan_flg =
2878                    p_accept_any_vlan->update_accept_any_vlan_flg;
2879        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2880}
2881
2882static void
2883qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2884                            struct qed_vf_info *vf,
2885                            struct qed_sp_vport_update_params *p_data,
2886                            struct qed_rss_params *p_rss,
2887                            struct qed_iov_vf_mbx *p_mbx,
2888                            u16 *tlvs_mask, u16 *tlvs_accepted)
2889{
2890        struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2891        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2892        bool b_reject = false;
2893        u16 table_size;
2894        u16 i, q_idx;
2895
2896        p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2897                    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2898        if (!p_rss_tlv) {
2899                p_data->rss_params = NULL;
2900                return;
2901        }
2902
2903        memset(p_rss, 0, sizeof(struct qed_rss_params));
2904
2905        p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2906                                      VFPF_UPDATE_RSS_CONFIG_FLAG);
2907        p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2908                                            VFPF_UPDATE_RSS_CAPS_FLAG);
2909        p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2910                                         VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2911        p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2912                                   VFPF_UPDATE_RSS_KEY_FLAG);
2913
2914        p_rss->rss_enable = p_rss_tlv->rss_enable;
2915        p_rss->rss_eng_id = vf->relative_vf_id + 1;
2916        p_rss->rss_caps = p_rss_tlv->rss_caps;
2917        p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2918        memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2919
2920        table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2921                           (1 << p_rss_tlv->rss_table_size_log));
2922
2923        for (i = 0; i < table_size; i++) {
2924                struct qed_queue_cid *p_cid;
2925
2926                q_idx = p_rss_tlv->rss_ind_table[i];
2927                if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2928                                          QED_IOV_VALIDATE_Q_ENABLE)) {
2929                        DP_VERBOSE(p_hwfn,
2930                                   QED_MSG_IOV,
2931                                   "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2932                                   vf->relative_vf_id, q_idx);
2933                        b_reject = true;
2934                        goto out;
2935                }
2936
2937                p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2938                p_rss->rss_ind_table[i] = p_cid;
2939        }
2940
2941        p_data->rss_params = p_rss;
2942out:
2943        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2944        if (!b_reject)
2945                *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2946}
2947
2948static void
2949qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2950                                struct qed_vf_info *vf,
2951                                struct qed_sp_vport_update_params *p_data,
2952                                struct qed_sge_tpa_params *p_sge_tpa,
2953                                struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2954{
2955        struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2956        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2957
2958        p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2959            qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2960
2961        if (!p_sge_tpa_tlv) {
2962                p_data->sge_tpa_params = NULL;
2963                return;
2964        }
2965
2966        memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2967
2968        p_sge_tpa->update_tpa_en_flg =
2969            !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2970        p_sge_tpa->update_tpa_param_flg =
2971            !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2972                VFPF_UPDATE_TPA_PARAM_FLAG);
2973
2974        p_sge_tpa->tpa_ipv4_en_flg =
2975            !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2976        p_sge_tpa->tpa_ipv6_en_flg =
2977            !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2978        p_sge_tpa->tpa_pkt_split_flg =
2979            !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2980        p_sge_tpa->tpa_hdr_data_split_flg =
2981            !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2982        p_sge_tpa->tpa_gro_consistent_flg =
2983            !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2984
2985        p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2986        p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2987        p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2988        p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2989        p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2990
2991        p_data->sge_tpa_params = p_sge_tpa;
2992
2993        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2994}
2995
2996static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2997                                    u8 vfid,
2998                                    struct qed_sp_vport_update_params *params,
2999                                    u16 *tlvs)
3000{
3001        u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
3002        struct qed_filter_accept_flags *flags = &params->accept_flags;
3003        struct qed_public_vf_info *vf_info;
3004
3005        /* Untrusted VFs can't even be trusted to know that fact.
3006         * Simply indicate everything is configured fine, and trace
3007         * configuration 'behind their back'.
3008         */
3009        if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
3010                return 0;
3011
3012        vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
3013
3014        if (flags->update_rx_mode_config) {
3015                vf_info->rx_accept_mode = flags->rx_accept_filter;
3016                if (!vf_info->is_trusted_configured)
3017                        flags->rx_accept_filter &= ~mask;
3018        }
3019
3020        if (flags->update_tx_mode_config) {
3021                vf_info->tx_accept_mode = flags->tx_accept_filter;
3022                if (!vf_info->is_trusted_configured)
3023                        flags->tx_accept_filter &= ~mask;
3024        }
3025
3026        return 0;
3027}
3028
3029static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3030                                        struct qed_ptt *p_ptt,
3031                                        struct qed_vf_info *vf)
3032{
3033        struct qed_rss_params *p_rss_params = NULL;
3034        struct qed_sp_vport_update_params params;
3035        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3036        struct qed_sge_tpa_params sge_tpa_params;
3037        u16 tlvs_mask = 0, tlvs_accepted = 0;
3038        u8 status = PFVF_STATUS_SUCCESS;
3039        u16 length;
3040        int rc;
3041
3042        /* Valiate PF can send such a request */
3043        if (!vf->vport_instance) {
3044                DP_VERBOSE(p_hwfn,
3045                           QED_MSG_IOV,
3046                           "No VPORT instance available for VF[%d], failing vport update\n",
3047                           vf->abs_vf_id);
3048                status = PFVF_STATUS_FAILURE;
3049                goto out;
3050        }
3051        p_rss_params = vzalloc(sizeof(*p_rss_params));
3052        if (p_rss_params == NULL) {
3053                status = PFVF_STATUS_FAILURE;
3054                goto out;
3055        }
3056
3057        memset(&params, 0, sizeof(params));
3058        params.opaque_fid = vf->opaque_fid;
3059        params.vport_id = vf->vport_id;
3060        params.rss_params = NULL;
3061
3062        /* Search for extended tlvs list and update values
3063         * from VF in struct qed_sp_vport_update_params.
3064         */
3065        qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
3066        qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
3067        qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
3068        qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
3069        qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
3070        qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
3071        qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
3072                                        &sge_tpa_params, mbx, &tlvs_mask);
3073
3074        tlvs_accepted = tlvs_mask;
3075
3076        /* Some of the extended TLVs need to be validated first; In that case,
3077         * they can update the mask without updating the accepted [so that
3078         * PF could communicate to VF it has rejected request].
3079         */
3080        qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
3081                                    mbx, &tlvs_mask, &tlvs_accepted);
3082
3083        if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3084                                     &params, &tlvs_accepted)) {
3085                tlvs_accepted = 0;
3086                status = PFVF_STATUS_NOT_SUPPORTED;
3087                goto out;
3088        }
3089
3090        if (!tlvs_accepted) {
3091                if (tlvs_mask)
3092                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3093                                   "Upper-layer prevents VF vport configuration\n");
3094                else
3095                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3096                                   "No feature tlvs found for vport update\n");
3097                status = PFVF_STATUS_NOT_SUPPORTED;
3098                goto out;
3099        }
3100
3101        rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
3102
3103        if (rc)
3104                status = PFVF_STATUS_FAILURE;
3105
3106out:
3107        vfree(p_rss_params);
3108        length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3109                                                  tlvs_mask, tlvs_accepted);
3110        qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3111}
3112
3113static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3114                                         struct qed_vf_info *p_vf,
3115                                         struct qed_filter_ucast *p_params)
3116{
3117        int i;
3118
3119        /* First remove entries and then add new ones */
3120        if (p_params->opcode == QED_FILTER_REMOVE) {
3121                for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3122                        if (p_vf->shadow_config.vlans[i].used &&
3123                            p_vf->shadow_config.vlans[i].vid ==
3124                            p_params->vlan) {
3125                                p_vf->shadow_config.vlans[i].used = false;
3126                                break;
3127                        }
3128                if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3129                        DP_VERBOSE(p_hwfn,
3130                                   QED_MSG_IOV,
3131                                   "VF [%d] - Tries to remove a non-existing vlan\n",
3132                                   p_vf->relative_vf_id);
3133                        return -EINVAL;
3134                }
3135        } else if (p_params->opcode == QED_FILTER_REPLACE ||
3136                   p_params->opcode == QED_FILTER_FLUSH) {
3137                for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3138                        p_vf->shadow_config.vlans[i].used = false;
3139        }
3140
3141        /* In forced mode, we're willing to remove entries - but we don't add
3142         * new ones.
3143         */
3144        if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3145                return 0;
3146
3147        if (p_params->opcode == QED_FILTER_ADD ||
3148            p_params->opcode == QED_FILTER_REPLACE) {
3149                for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3150                        if (p_vf->shadow_config.vlans[i].used)
3151                                continue;
3152
3153                        p_vf->shadow_config.vlans[i].used = true;
3154                        p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3155                        break;
3156                }
3157
3158                if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3159                        DP_VERBOSE(p_hwfn,
3160                                   QED_MSG_IOV,
3161                                   "VF [%d] - Tries to configure more than %d vlan filters\n",
3162                                   p_vf->relative_vf_id,
3163                                   QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3164                        return -EINVAL;
3165                }
3166        }
3167
3168        return 0;
3169}
3170
3171static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3172                                        struct qed_vf_info *p_vf,
3173                                        struct qed_filter_ucast *p_params)
3174{
3175        int i;
3176
3177        /* If we're in forced-mode, we don't allow any change */
3178        if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3179                return 0;
3180
3181        /* Don't keep track of shadow copy since we don't intend to restore. */
3182        if (p_vf->p_vf_info.is_trusted_configured)
3183                return 0;
3184
3185        /* First remove entries and then add new ones */
3186        if (p_params->opcode == QED_FILTER_REMOVE) {
3187                for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3188                        if (ether_addr_equal(p_vf->shadow_config.macs[i],
3189                                             p_params->mac)) {
3190                                eth_zero_addr(p_vf->shadow_config.macs[i]);
3191                                break;
3192                        }
3193                }
3194
3195                if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3196                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3197                                   "MAC isn't configured\n");
3198                        return -EINVAL;
3199                }
3200        } else if (p_params->opcode == QED_FILTER_REPLACE ||
3201                   p_params->opcode == QED_FILTER_FLUSH) {
3202                for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3203                        eth_zero_addr(p_vf->shadow_config.macs[i]);
3204        }
3205
3206        /* List the new MAC address */
3207        if (p_params->opcode != QED_FILTER_ADD &&
3208            p_params->opcode != QED_FILTER_REPLACE)
3209                return 0;
3210
3211        for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3212                if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3213                        ether_addr_copy(p_vf->shadow_config.macs[i],
3214                                        p_params->mac);
3215                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3216                                   "Added MAC at %d entry in shadow\n", i);
3217                        break;
3218                }
3219        }
3220
3221        if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3222                DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3223                return -EINVAL;
3224        }
3225
3226        return 0;
3227}
3228
3229static int
3230qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3231                                 struct qed_vf_info *p_vf,
3232                                 struct qed_filter_ucast *p_params)
3233{
3234        int rc = 0;
3235
3236        if (p_params->type == QED_FILTER_MAC) {
3237                rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3238                if (rc)
3239                        return rc;
3240        }
3241
3242        if (p_params->type == QED_FILTER_VLAN)
3243                rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3244
3245        return rc;
3246}
3247
3248static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3249                             int vfid, struct qed_filter_ucast *params)
3250{
3251        struct qed_public_vf_info *vf;
3252
3253        vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3254        if (!vf)
3255                return -EINVAL;
3256
3257        /* No real decision to make; Store the configured MAC */
3258        if (params->type == QED_FILTER_MAC ||
3259            params->type == QED_FILTER_MAC_VLAN) {
3260                ether_addr_copy(vf->mac, params->mac);
3261
3262                if (vf->is_trusted_configured) {
3263                        qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
3264
3265                        /* Update and post bulleitin again */
3266                        qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3267                }
3268        }
3269
3270        return 0;
3271}
3272
3273static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3274                                        struct qed_ptt *p_ptt,
3275                                        struct qed_vf_info *vf)
3276{
3277        struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3278        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3279        struct vfpf_ucast_filter_tlv *req;
3280        u8 status = PFVF_STATUS_SUCCESS;
3281        struct qed_filter_ucast params;
3282        int rc;
3283
3284        /* Prepare the unicast filter params */
3285        memset(&params, 0, sizeof(struct qed_filter_ucast));
3286        req = &mbx->req_virt->ucast_filter;
3287        params.opcode = (enum qed_filter_opcode)req->opcode;
3288        params.type = (enum qed_filter_ucast_type)req->type;
3289
3290        params.is_rx_filter = 1;
3291        params.is_tx_filter = 1;
3292        params.vport_to_remove_from = vf->vport_id;
3293        params.vport_to_add_to = vf->vport_id;
3294        memcpy(params.mac, req->mac, ETH_ALEN);
3295        params.vlan = req->vlan;
3296
3297        DP_VERBOSE(p_hwfn,
3298                   QED_MSG_IOV,
3299                   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3300                   vf->abs_vf_id, params.opcode, params.type,
3301                   params.is_rx_filter ? "RX" : "",
3302                   params.is_tx_filter ? "TX" : "",
3303                   params.vport_to_add_to,
3304                   params.mac[0], params.mac[1],
3305                   params.mac[2], params.mac[3],
3306                   params.mac[4], params.mac[5], params.vlan);
3307
3308        if (!vf->vport_instance) {
3309                DP_VERBOSE(p_hwfn,
3310                           QED_MSG_IOV,
3311                           "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3312                           vf->abs_vf_id);
3313                status = PFVF_STATUS_FAILURE;
3314                goto out;
3315        }
3316
3317        /* Update shadow copy of the VF configuration */
3318        if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
3319                status = PFVF_STATUS_FAILURE;
3320                goto out;
3321        }
3322
3323        /* Determine if the unicast filtering is acceptible by PF */
3324        if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3325            (params.type == QED_FILTER_VLAN ||
3326             params.type == QED_FILTER_MAC_VLAN)) {
3327                /* Once VLAN is forced or PVID is set, do not allow
3328                 * to add/replace any further VLANs.
3329                 */
3330                if (params.opcode == QED_FILTER_ADD ||
3331                    params.opcode == QED_FILTER_REPLACE)
3332                        status = PFVF_STATUS_FORCED;
3333                goto out;
3334        }
3335
3336        if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3337            (params.type == QED_FILTER_MAC ||
3338             params.type == QED_FILTER_MAC_VLAN)) {
3339                if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3340                    (params.opcode != QED_FILTER_ADD &&
3341                     params.opcode != QED_FILTER_REPLACE))
3342                        status = PFVF_STATUS_FORCED;
3343                goto out;
3344        }
3345
3346        rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
3347        if (rc) {
3348                status = PFVF_STATUS_FAILURE;
3349                goto out;
3350        }
3351
3352        rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3353                                     QED_SPQ_MODE_CB, NULL);
3354        if (rc)
3355                status = PFVF_STATUS_FAILURE;
3356
3357out:
3358        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3359                             sizeof(struct pfvf_def_resp_tlv), status);
3360}
3361
3362static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3363                                       struct qed_ptt *p_ptt,
3364                                       struct qed_vf_info *vf)
3365{
3366        int i;
3367
3368        /* Reset the SBs */
3369        for (i = 0; i < vf->num_sbs; i++)
3370                qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3371                                                vf->igu_sbs[i],
3372                                                vf->opaque_fid, false);
3373
3374        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3375                             sizeof(struct pfvf_def_resp_tlv),
3376                             PFVF_STATUS_SUCCESS);
3377}
3378
3379static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3380                                 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3381{
3382        u16 length = sizeof(struct pfvf_def_resp_tlv);
3383        u8 status = PFVF_STATUS_SUCCESS;
3384
3385        /* Disable Interrupts for VF */
3386        qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3387
3388        /* Reset Permission table */
3389        qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3390
3391        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3392                             length, status);
3393}
3394
3395static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3396                                   struct qed_ptt *p_ptt,
3397                                   struct qed_vf_info *p_vf)
3398{
3399        u16 length = sizeof(struct pfvf_def_resp_tlv);
3400        u8 status = PFVF_STATUS_SUCCESS;
3401        int rc = 0;
3402
3403        qed_iov_vf_cleanup(p_hwfn, p_vf);
3404
3405        if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3406                /* Stopping the VF */
3407                rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3408                                    p_vf->opaque_fid);
3409
3410                if (rc) {
3411                        DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3412                               rc);
3413                        status = PFVF_STATUS_FAILURE;
3414                }
3415
3416                p_vf->state = VF_STOPPED;
3417        }
3418
3419        qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3420                             length, status);
3421}
3422
3423static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3424                                       struct qed_ptt *p_ptt,
3425                                       struct qed_vf_info *p_vf)
3426{
3427        struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3428        struct pfvf_read_coal_resp_tlv *p_resp;
3429        struct vfpf_read_coal_req_tlv *req;
3430        u8 status = PFVF_STATUS_FAILURE;
3431        struct qed_vf_queue *p_queue;
3432        struct qed_queue_cid *p_cid;
3433        u16 coal = 0, qid, i;
3434        bool b_is_rx;
3435        int rc = 0;
3436
3437        mbx->offset = (u8 *)mbx->reply_virt;
3438        req = &mbx->req_virt->read_coal_req;
3439
3440        qid = req->qid;
3441        b_is_rx = req->is_rx ? true : false;
3442
3443        if (b_is_rx) {
3444                if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3445                                          QED_IOV_VALIDATE_Q_ENABLE)) {
3446                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3447                                   "VF[%d]: Invalid Rx queue_id = %d\n",
3448                                   p_vf->abs_vf_id, qid);
3449                        goto send_resp;
3450                }
3451
3452                p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3453                rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3454                if (rc)
3455                        goto send_resp;
3456        } else {
3457                if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3458                                          QED_IOV_VALIDATE_Q_ENABLE)) {
3459                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3460                                   "VF[%d]: Invalid Tx queue_id = %d\n",
3461                                   p_vf->abs_vf_id, qid);
3462                        goto send_resp;
3463                }
3464                for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3465                        p_queue = &p_vf->vf_queues[qid];
3466                        if ((!p_queue->cids[i].p_cid) ||
3467                            (!p_queue->cids[i].b_is_tx))
3468                                continue;
3469
3470                        p_cid = p_queue->cids[i].p_cid;
3471
3472                        rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3473                        if (rc)
3474                                goto send_resp;
3475                        break;
3476                }
3477        }
3478
3479        status = PFVF_STATUS_SUCCESS;
3480
3481send_resp:
3482        p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3483                             sizeof(*p_resp));
3484        p_resp->coal = coal;
3485
3486        qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3487                    sizeof(struct channel_list_end_tlv));
3488
3489        qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3490}
3491
3492static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3493                                       struct qed_ptt *p_ptt,
3494                                       struct qed_vf_info *vf)
3495{
3496        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3497        struct vfpf_update_coalesce *req;
3498        u8 status = PFVF_STATUS_FAILURE;
3499        struct qed_queue_cid *p_cid;
3500        u16 rx_coal, tx_coal;
3501        int rc = 0, i;
3502        u16 qid;
3503
3504        req = &mbx->req_virt->update_coalesce;
3505
3506        rx_coal = req->rx_coal;
3507        tx_coal = req->tx_coal;
3508        qid = req->qid;
3509
3510        if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3511                                  QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3512                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3513                           "VF[%d]: Invalid Rx queue_id = %d\n",
3514                           vf->abs_vf_id, qid);
3515                goto out;
3516        }
3517
3518        if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3519                                  QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3520                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3521                           "VF[%d]: Invalid Tx queue_id = %d\n",
3522                           vf->abs_vf_id, qid);
3523                goto out;
3524        }
3525
3526        DP_VERBOSE(p_hwfn,
3527                   QED_MSG_IOV,
3528                   "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3529                   vf->abs_vf_id, rx_coal, tx_coal, qid);
3530
3531        if (rx_coal) {
3532                p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3533
3534                rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3535                if (rc) {
3536                        DP_VERBOSE(p_hwfn,
3537                                   QED_MSG_IOV,
3538                                   "VF[%d]: Unable to set rx queue = %d coalesce\n",
3539                                   vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3540                        goto out;
3541                }
3542                vf->rx_coal = rx_coal;
3543        }
3544
3545        if (tx_coal) {
3546                struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3547
3548                for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3549                        if (!p_queue->cids[i].p_cid)
3550                                continue;
3551
3552                        if (!p_queue->cids[i].b_is_tx)
3553                                continue;
3554
3555                        rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3556                                                  p_queue->cids[i].p_cid);
3557
3558                        if (rc) {
3559                                DP_VERBOSE(p_hwfn,
3560                                           QED_MSG_IOV,
3561                                           "VF[%d]: Unable to set tx queue coalesce\n",
3562                                           vf->abs_vf_id);
3563                                goto out;
3564                        }
3565                }
3566                vf->tx_coal = tx_coal;
3567        }
3568
3569        status = PFVF_STATUS_SUCCESS;
3570out:
3571        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3572                             sizeof(struct pfvf_def_resp_tlv), status);
3573}
3574static int
3575qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3576                         struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3577{
3578        int cnt;
3579        u32 val;
3580
3581        qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3582
3583        for (cnt = 0; cnt < 50; cnt++) {
3584                val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3585                if (!val)
3586                        break;
3587                msleep(20);
3588        }
3589        qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3590
3591        if (cnt == 50) {
3592                DP_ERR(p_hwfn,
3593                       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3594                       p_vf->abs_vf_id, val);
3595                return -EBUSY;
3596        }
3597
3598        return 0;
3599}
3600
3601static int
3602qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3603                        struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3604{
3605        u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3606        int i, cnt;
3607
3608        /* Read initial consumers & producers */
3609        for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3610                u32 prod;
3611
3612                cons[i] = qed_rd(p_hwfn, p_ptt,
3613                                 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3614                                 i * 0x40);
3615                prod = qed_rd(p_hwfn, p_ptt,
3616                              PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3617                              i * 0x40);
3618                distance[i] = prod - cons[i];
3619        }
3620
3621        /* Wait for consumers to pass the producers */
3622        i = 0;
3623        for (cnt = 0; cnt < 50; cnt++) {
3624                for (; i < MAX_NUM_VOQS_E4; i++) {
3625                        u32 tmp;
3626
3627                        tmp = qed_rd(p_hwfn, p_ptt,
3628                                     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3629                                     i * 0x40);
3630                        if (distance[i] > tmp - cons[i])
3631                                break;
3632                }
3633
3634                if (i == MAX_NUM_VOQS_E4)
3635                        break;
3636
3637                msleep(20);
3638        }
3639
3640        if (cnt == 50) {
3641                DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3642                       p_vf->abs_vf_id, i);
3643                return -EBUSY;
3644        }
3645
3646        return 0;
3647}
3648
3649static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3650                               struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3651{
3652        int rc;
3653
3654        rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3655        if (rc)
3656                return rc;
3657
3658        rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3659        if (rc)
3660                return rc;
3661
3662        return 0;
3663}
3664
3665static int
3666qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3667                               struct qed_ptt *p_ptt,
3668                               u16 rel_vf_id, u32 *ack_vfs)
3669{
3670        struct qed_vf_info *p_vf;
3671        int rc = 0;
3672
3673        p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3674        if (!p_vf)
3675                return 0;
3676
3677        if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3678            (1ULL << (rel_vf_id % 64))) {
3679                u16 vfid = p_vf->abs_vf_id;
3680
3681                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3682                           "VF[%d] - Handling FLR\n", vfid);
3683
3684                qed_iov_vf_cleanup(p_hwfn, p_vf);
3685
3686                /* If VF isn't active, no need for anything but SW */
3687                if (!p_vf->b_init)
3688                        goto cleanup;
3689
3690                rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3691                if (rc)
3692                        goto cleanup;
3693
3694                rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3695                if (rc) {
3696                        DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3697                        return rc;
3698                }
3699
3700                /* Workaround to make VF-PF channel ready, as FW
3701                 * doesn't do that as a part of FLR.
3702                 */
3703                REG_WR(p_hwfn,
3704                       GTT_BAR0_MAP_REG_USDM_RAM +
3705                       USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3706
3707                /* VF_STOPPED has to be set only after final cleanup
3708                 * but prior to re-enabling the VF.
3709                 */
3710                p_vf->state = VF_STOPPED;
3711
3712                rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3713                if (rc) {
3714                        DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3715                               vfid);
3716                        return rc;
3717                }
3718cleanup:
3719                /* Mark VF for ack and clean pending state */
3720                if (p_vf->state == VF_RESET)
3721                        p_vf->state = VF_STOPPED;
3722                ack_vfs[vfid / 32] |= BIT((vfid % 32));
3723                p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3724                    ~(1ULL << (rel_vf_id % 64));
3725                p_vf->vf_mbx.b_pending_msg = false;
3726        }
3727
3728        return rc;
3729}
3730
3731static int
3732qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3733{
3734        u32 ack_vfs[VF_MAX_STATIC / 32];
3735        int rc = 0;
3736        u16 i;
3737
3738        memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3739
3740        /* Since BRB <-> PRS interface can't be tested as part of the flr
3741         * polling due to HW limitations, simply sleep a bit. And since
3742         * there's no need to wait per-vf, do it before looping.
3743         */
3744        msleep(100);
3745
3746        for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3747                qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3748
3749        rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3750        return rc;
3751}
3752
3753bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3754{
3755        bool found = false;
3756        u16 i;
3757
3758        DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3759        for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3760                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3761                           "[%08x,...,%08x]: %08x\n",
3762                           i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3763
3764        if (!p_hwfn->cdev->p_iov_info) {
3765                DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3766                return false;
3767        }
3768
3769        /* Mark VFs */
3770        for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3771                struct qed_vf_info *p_vf;
3772                u8 vfid;
3773
3774                p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3775                if (!p_vf)
3776                        continue;
3777
3778                vfid = p_vf->abs_vf_id;
3779                if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3780                        u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3781                        u16 rel_vf_id = p_vf->relative_vf_id;
3782
3783                        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3784                                   "VF[%d] [rel %d] got FLR-ed\n",
3785                                   vfid, rel_vf_id);
3786
3787                        p_vf->state = VF_RESET;
3788
3789                        /* No need to lock here, since pending_flr should
3790                         * only change here and before ACKing MFw. Since
3791                         * MFW will not trigger an additional attention for
3792                         * VF flr until ACKs, we're safe.
3793                         */
3794                        p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3795                        found = true;
3796                }
3797        }
3798
3799        return found;
3800}
3801
3802static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3803                             u16 vfid,
3804                             struct qed_mcp_link_params *p_params,
3805                             struct qed_mcp_link_state *p_link,
3806                             struct qed_mcp_link_capabilities *p_caps)
3807{
3808        struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3809                                                       vfid,
3810                                                       false);
3811        struct qed_bulletin_content *p_bulletin;
3812
3813        if (!p_vf)
3814                return;
3815
3816        p_bulletin = p_vf->bulletin.p_virt;
3817
3818        if (p_params)
3819                __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3820        if (p_link)
3821                __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3822        if (p_caps)
3823                __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3824}
3825
3826static int
3827qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
3828                                  struct qed_ptt *p_ptt,
3829                                  struct qed_vf_info *p_vf)
3830{
3831        struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
3832        struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3833        struct vfpf_bulletin_update_mac_tlv *p_req;
3834        u8 status = PFVF_STATUS_SUCCESS;
3835        int rc = 0;
3836
3837        if (!p_vf->p_vf_info.is_trusted_configured) {
3838                DP_VERBOSE(p_hwfn,
3839                           QED_MSG_IOV,
3840                           "Blocking bulletin update request from untrusted VF[%d]\n",
3841                           p_vf->abs_vf_id);
3842                status = PFVF_STATUS_NOT_SUPPORTED;
3843                rc = -EINVAL;
3844                goto send_status;
3845        }
3846
3847        p_req = &mbx->req_virt->bulletin_update_mac;
3848        ether_addr_copy(p_bulletin->mac, p_req->mac);
3849        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3850                   "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3851                   p_vf->abs_vf_id, p_req->mac);
3852
3853send_status:
3854        qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3855                             CHANNEL_TLV_BULLETIN_UPDATE_MAC,
3856                             sizeof(struct pfvf_def_resp_tlv), status);
3857        return rc;
3858}
3859
3860static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3861                                    struct qed_ptt *p_ptt, int vfid)
3862{
3863        struct qed_iov_vf_mbx *mbx;
3864        struct qed_vf_info *p_vf;
3865
3866        p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3867        if (!p_vf)
3868                return;
3869
3870        mbx = &p_vf->vf_mbx;
3871
3872        /* qed_iov_process_mbx_request */
3873        if (!mbx->b_pending_msg) {
3874                DP_NOTICE(p_hwfn,
3875                          "VF[%02x]: Trying to process mailbox message when none is pending\n",
3876                          p_vf->abs_vf_id);
3877                return;
3878        }
3879        mbx->b_pending_msg = false;
3880
3881        mbx->first_tlv = mbx->req_virt->first_tlv;
3882
3883        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3884                   "VF[%02x]: Processing mailbox message [type %04x]\n",
3885                   p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3886
3887        /* check if tlv type is known */
3888        if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3889            !p_vf->b_malicious) {
3890                switch (mbx->first_tlv.tl.type) {
3891                case CHANNEL_TLV_ACQUIRE:
3892                        qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3893                        break;
3894                case CHANNEL_TLV_VPORT_START:
3895                        qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3896                        break;
3897                case CHANNEL_TLV_VPORT_TEARDOWN:
3898                        qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3899                        break;
3900                case CHANNEL_TLV_START_RXQ:
3901                        qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3902                        break;
3903                case CHANNEL_TLV_START_TXQ:
3904                        qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3905                        break;
3906                case CHANNEL_TLV_STOP_RXQS:
3907                        qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3908                        break;
3909                case CHANNEL_TLV_STOP_TXQS:
3910                        qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3911                        break;
3912                case CHANNEL_TLV_UPDATE_RXQ:
3913                        qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3914                        break;
3915                case CHANNEL_TLV_VPORT_UPDATE:
3916                        qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3917                        break;
3918                case CHANNEL_TLV_UCAST_FILTER:
3919                        qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3920                        break;
3921                case CHANNEL_TLV_CLOSE:
3922                        qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3923                        break;
3924                case CHANNEL_TLV_INT_CLEANUP:
3925                        qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3926                        break;
3927                case CHANNEL_TLV_RELEASE:
3928                        qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3929                        break;
3930                case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3931                        qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3932                        break;
3933                case CHANNEL_TLV_COALESCE_UPDATE:
3934                        qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3935                        break;
3936                case CHANNEL_TLV_COALESCE_READ:
3937                        qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3938                        break;
3939                case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
3940                        qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
3941                        break;
3942                }
3943        } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3944                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3945                           "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3946                           p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3947
3948                qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3949                                     mbx->first_tlv.tl.type,
3950                                     sizeof(struct pfvf_def_resp_tlv),
3951                                     PFVF_STATUS_MALICIOUS);
3952        } else {
3953                /* unknown TLV - this may belong to a VF driver from the future
3954                 * - a version written after this PF driver was written, which
3955                 * supports features unknown as of yet. Too bad since we don't
3956                 * support them. Or this may be because someone wrote a crappy
3957                 * VF driver and is sending garbage over the channel.
3958                 */
3959                DP_NOTICE(p_hwfn,
3960                          "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3961                          p_vf->abs_vf_id,
3962                          mbx->first_tlv.tl.type,
3963                          mbx->first_tlv.tl.length,
3964                          mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3965
3966                /* Try replying in case reply address matches the acquisition's
3967                 * posted address.
3968                 */
3969                if (p_vf->acquire.first_tlv.reply_address &&
3970                    (mbx->first_tlv.reply_address ==
3971                     p_vf->acquire.first_tlv.reply_address)) {
3972                        qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3973                                             mbx->first_tlv.tl.type,
3974                                             sizeof(struct pfvf_def_resp_tlv),
3975                                             PFVF_STATUS_NOT_SUPPORTED);
3976                } else {
3977                        DP_VERBOSE(p_hwfn,
3978                                   QED_MSG_IOV,
3979                                   "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3980                                   p_vf->abs_vf_id);
3981                }
3982        }
3983}
3984
3985static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3986{
3987        int i;
3988
3989        memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3990
3991        qed_for_each_vf(p_hwfn, i) {
3992                struct qed_vf_info *p_vf;
3993
3994                p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3995                if (p_vf->vf_mbx.b_pending_msg)
3996                        events[i / 64] |= 1ULL << (i % 64);
3997        }
3998}
3999
4000static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
4001                                                       u16 abs_vfid)
4002{
4003        u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
4004
4005        if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4006                DP_VERBOSE(p_hwfn,
4007                           QED_MSG_IOV,
4008                           "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4009                           abs_vfid);
4010                return NULL;
4011        }
4012
4013        return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
4014}
4015
4016static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
4017                              u16 abs_vfid, struct regpair *vf_msg)
4018{
4019        struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
4020                           abs_vfid);
4021
4022        if (!p_vf)
4023                return 0;
4024
4025        /* List the physical address of the request so that handler
4026         * could later on copy the message from it.
4027         */
4028        p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4029
4030        /* Mark the event and schedule the workqueue */
4031        p_vf->vf_mbx.b_pending_msg = true;
4032        qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
4033
4034        return 0;
4035}
4036
4037static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
4038                                     struct malicious_vf_eqe_data *p_data)
4039{
4040        struct qed_vf_info *p_vf;
4041
4042        p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4043
4044        if (!p_vf)
4045                return;
4046
4047        if (!p_vf->b_malicious) {
4048                DP_NOTICE(p_hwfn,
4049                          "VF [%d] - Malicious behavior [%02x]\n",
4050                          p_vf->abs_vf_id, p_data->err_id);
4051
4052                p_vf->b_malicious = true;
4053        } else {
4054                DP_INFO(p_hwfn,
4055                        "VF [%d] - Malicious behavior [%02x]\n",
4056                        p_vf->abs_vf_id, p_data->err_id);
4057        }
4058}
4059
4060static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
4061                               u8 opcode,
4062                               __le16 echo,
4063                               union event_ring_data *data, u8 fw_return_code)
4064{
4065        switch (opcode) {
4066        case COMMON_EVENT_VF_PF_CHANNEL:
4067                return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4068                                          &data->vf_pf_channel.msg_addr);
4069        case COMMON_EVENT_MALICIOUS_VF:
4070                qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4071                return 0;
4072        default:
4073                DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4074                        opcode);
4075                return -EINVAL;
4076        }
4077}
4078
4079u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4080{
4081        struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4082        u16 i;
4083
4084        if (!p_iov)
4085                goto out;
4086
4087        for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4088                if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4089                        return i;
4090
4091out:
4092        return MAX_NUM_VFS;
4093}
4094
4095static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4096                               int vfid)
4097{
4098        struct qed_dmae_params params;
4099        struct qed_vf_info *vf_info;
4100
4101        vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4102        if (!vf_info)
4103                return -EINVAL;
4104
4105        memset(&params, 0, sizeof(struct qed_dmae_params));
4106        params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
4107        params.src_vfid = vf_info->abs_vf_id;
4108
4109        if (qed_dmae_host2host(p_hwfn, ptt,
4110                               vf_info->vf_mbx.pending_req,
4111                               vf_info->vf_mbx.req_phys,
4112                               sizeof(union vfpf_tlvs) / 4, &params)) {
4113                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4114                           "Failed to copy message from VF 0x%02x\n", vfid);
4115
4116                return -EIO;
4117        }
4118
4119        return 0;
4120}
4121
4122static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4123                                            u8 *mac, int vfid)
4124{
4125        struct qed_vf_info *vf_info;
4126        u64 feature;
4127
4128        vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4129        if (!vf_info) {
4130                DP_NOTICE(p_hwfn->cdev,
4131                          "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4132                return;
4133        }
4134
4135        if (vf_info->b_malicious) {
4136                DP_NOTICE(p_hwfn->cdev,
4137                          "Can't set forced MAC to malicious VF [%d]\n", vfid);
4138                return;
4139        }
4140
4141        if (vf_info->p_vf_info.is_trusted_configured) {
4142                feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4143                /* Trust mode will disable Forced MAC */
4144                vf_info->bulletin.p_virt->valid_bitmap &=
4145                        ~BIT(MAC_ADDR_FORCED);
4146        } else {
4147                feature = BIT(MAC_ADDR_FORCED);
4148                /* Forced MAC will disable MAC_ADDR */
4149                vf_info->bulletin.p_virt->valid_bitmap &=
4150                        ~BIT(VFPF_BULLETIN_MAC_ADDR);
4151        }
4152
4153        memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4154
4155        vf_info->bulletin.p_virt->valid_bitmap |= feature;
4156
4157        qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4158}
4159
4160static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
4161{
4162        struct qed_vf_info *vf_info;
4163        u64 feature;
4164
4165        vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4166        if (!vf_info) {
4167                DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
4168                          vfid);
4169                return -EINVAL;
4170        }
4171
4172        if (vf_info->b_malicious) {
4173                DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
4174                          vfid);
4175                return -EINVAL;
4176        }
4177
4178        if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
4179                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4180                           "Can not set MAC, Forced MAC is configured\n");
4181                return -EINVAL;
4182        }
4183
4184        feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4185        ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
4186
4187        vf_info->bulletin.p_virt->valid_bitmap |= feature;
4188
4189        if (vf_info->p_vf_info.is_trusted_configured)
4190                qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4191
4192        return 0;
4193}
4194
4195static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4196                                             u16 pvid, int vfid)
4197{
4198        struct qed_vf_info *vf_info;
4199        u64 feature;
4200
4201        vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4202        if (!vf_info) {
4203                DP_NOTICE(p_hwfn->cdev,
4204                          "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4205                return;
4206        }
4207
4208        if (vf_info->b_malicious) {
4209                DP_NOTICE(p_hwfn->cdev,
4210                          "Can't set forced vlan to malicious VF [%d]\n", vfid);
4211                return;
4212        }
4213
4214        feature = 1 << VLAN_ADDR_FORCED;
4215        vf_info->bulletin.p_virt->pvid = pvid;
4216        if (pvid)
4217                vf_info->bulletin.p_virt->valid_bitmap |= feature;
4218        else
4219                vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4220
4221        qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4222}
4223
4224void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4225                                    int vfid, u16 vxlan_port, u16 geneve_port)
4226{
4227        struct qed_vf_info *vf_info;
4228
4229        vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4230        if (!vf_info) {
4231                DP_NOTICE(p_hwfn->cdev,
4232                          "Can not set udp ports, invalid vfid [%d]\n", vfid);
4233                return;
4234        }
4235
4236        if (vf_info->b_malicious) {
4237                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4238                           "Can not set udp ports to malicious VF [%d]\n",
4239                           vfid);
4240                return;
4241        }
4242
4243        vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4244        vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4245}
4246
4247static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4248{
4249        struct qed_vf_info *p_vf_info;
4250
4251        p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4252        if (!p_vf_info)
4253                return false;
4254
4255        return !!p_vf_info->vport_instance;
4256}
4257
4258static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4259{
4260        struct qed_vf_info *p_vf_info;
4261
4262        p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4263        if (!p_vf_info)
4264                return true;
4265
4266        return p_vf_info->state == VF_STOPPED;
4267}
4268
4269static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4270{
4271        struct qed_vf_info *vf_info;
4272
4273        vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4274        if (!vf_info)
4275                return false;
4276
4277        return vf_info->spoof_chk;
4278}
4279
4280static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4281{
4282        struct qed_vf_info *vf;
4283        int rc = -EINVAL;
4284
4285        if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4286                DP_NOTICE(p_hwfn,
4287                          "SR-IOV sanity check failed, can't set spoofchk\n");
4288                goto out;
4289        }
4290
4291        vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4292        if (!vf)
4293                goto out;
4294
4295        if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4296                /* After VF VPORT start PF will configure spoof check */
4297                vf->req_spoofchk_val = val;
4298                rc = 0;
4299                goto out;
4300        }
4301
4302        rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4303
4304out:
4305        return rc;
4306}
4307
4308static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4309{
4310        struct qed_vf_info *p_vf;
4311
4312        p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4313        if (!p_vf || !p_vf->bulletin.p_virt)
4314                return NULL;
4315
4316        if (!(p_vf->bulletin.p_virt->valid_bitmap &
4317              BIT(VFPF_BULLETIN_MAC_ADDR)))
4318                return NULL;
4319
4320        return p_vf->bulletin.p_virt->mac;
4321}
4322
4323static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4324                                           u16 rel_vf_id)
4325{
4326        struct qed_vf_info *p_vf;
4327
4328        p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4329        if (!p_vf || !p_vf->bulletin.p_virt)
4330                return NULL;
4331
4332        if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4333                return NULL;
4334
4335        return p_vf->bulletin.p_virt->mac;
4336}
4337
4338static u16
4339qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4340{
4341        struct qed_vf_info *p_vf;
4342
4343        p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4344        if (!p_vf || !p_vf->bulletin.p_virt)
4345                return 0;
4346
4347        if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4348                return 0;
4349
4350        return p_vf->bulletin.p_virt->pvid;
4351}
4352
4353static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4354                                     struct qed_ptt *p_ptt, int vfid, int val)
4355{
4356        struct qed_mcp_link_state *p_link;
4357        struct qed_vf_info *vf;
4358        u8 abs_vp_id = 0;
4359        int rc;
4360
4361        vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4362        if (!vf)
4363                return -EINVAL;
4364
4365        rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4366        if (rc)
4367                return rc;
4368
4369        p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
4370
4371        return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
4372                                 p_link->speed);
4373}
4374
4375static int
4376qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4377{
4378        struct qed_vf_info *vf;
4379        u8 vport_id;
4380        int i;
4381
4382        for_each_hwfn(cdev, i) {
4383                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4384
4385                if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4386                        DP_NOTICE(p_hwfn,
4387                                  "SR-IOV sanity check failed, can't set min rate\n");
4388                        return -EINVAL;
4389                }
4390        }
4391
4392        vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4393        vport_id = vf->vport_id;
4394
4395        return qed_configure_vport_wfq(cdev, vport_id, rate);
4396}
4397
4398static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4399{
4400        struct qed_wfq_data *vf_vp_wfq;
4401        struct qed_vf_info *vf_info;
4402
4403        vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4404        if (!vf_info)
4405                return 0;
4406
4407        vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4408
4409        if (vf_vp_wfq->configured)
4410                return vf_vp_wfq->min_speed;
4411        else
4412                return 0;
4413}
4414
4415/**
4416 * qed_schedule_iov - schedules IOV task for VF and PF
4417 * @hwfn: hardware function pointer
4418 * @flag: IOV flag for VF/PF
4419 */
4420void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4421{
4422        smp_mb__before_atomic();
4423        set_bit(flag, &hwfn->iov_task_flags);
4424        smp_mb__after_atomic();
4425        DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4426        queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4427}
4428
4429void qed_vf_start_iov_wq(struct qed_dev *cdev)
4430{
4431        int i;
4432
4433        for_each_hwfn(cdev, i)
4434            queue_delayed_work(cdev->hwfns[i].iov_wq,
4435                               &cdev->hwfns[i].iov_task, 0);
4436}
4437
4438int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4439{
4440        int i, j;
4441
4442        for_each_hwfn(cdev, i)
4443            if (cdev->hwfns[i].iov_wq)
4444                flush_workqueue(cdev->hwfns[i].iov_wq);
4445
4446        /* Mark VFs for disablement */
4447        qed_iov_set_vfs_to_disable(cdev, true);
4448
4449        if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4450                pci_disable_sriov(cdev->pdev);
4451
4452        if (cdev->recov_in_prog) {
4453                DP_VERBOSE(cdev,
4454                           QED_MSG_IOV,
4455                           "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4456                goto out;
4457        }
4458
4459        for_each_hwfn(cdev, i) {
4460                struct qed_hwfn *hwfn = &cdev->hwfns[i];
4461                struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4462
4463                /* Failure to acquire the ptt in 100g creates an odd error
4464                 * where the first engine has already relased IOV.
4465                 */
4466                if (!ptt) {
4467                        DP_ERR(hwfn, "Failed to acquire ptt\n");
4468                        return -EBUSY;
4469                }
4470
4471                /* Clean WFQ db and configure equal weight for all vports */
4472                qed_clean_wfq_db(hwfn, ptt);
4473
4474                qed_for_each_vf(hwfn, j) {
4475                        int k;
4476
4477                        if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4478                                continue;
4479
4480                        /* Wait until VF is disabled before releasing */
4481                        for (k = 0; k < 100; k++) {
4482                                if (!qed_iov_is_vf_stopped(hwfn, j))
4483                                        msleep(20);
4484                                else
4485                                        break;
4486                        }
4487
4488                        if (k < 100)
4489                                qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4490                                                          ptt, j);
4491                        else
4492                                DP_ERR(hwfn,
4493                                       "Timeout waiting for VF's FLR to end\n");
4494                }
4495
4496                qed_ptt_release(hwfn, ptt);
4497        }
4498out:
4499        qed_iov_set_vfs_to_disable(cdev, false);
4500
4501        return 0;
4502}
4503
4504static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4505                                        u16 vfid,
4506                                        struct qed_iov_vf_init_params *params)
4507{
4508        u16 base, i;
4509
4510        /* Since we have an equal resource distribution per-VF, and we assume
4511         * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4512         * sequentially from there.
4513         */
4514        base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4515
4516        params->rel_vf_id = vfid;
4517        for (i = 0; i < params->num_queues; i++) {
4518                params->req_rx_queue[i] = base + i;
4519                params->req_tx_queue[i] = base + i;
4520        }
4521}
4522
4523static int qed_sriov_enable(struct qed_dev *cdev, int num)
4524{
4525        struct qed_iov_vf_init_params params;
4526        struct qed_hwfn *hwfn;
4527        struct qed_ptt *ptt;
4528        int i, j, rc;
4529
4530        if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4531                DP_NOTICE(cdev, "Can start at most %d VFs\n",
4532                          RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4533                return -EINVAL;
4534        }
4535
4536        memset(&params, 0, sizeof(params));
4537
4538        /* Initialize HW for VF access */
4539        for_each_hwfn(cdev, j) {
4540                hwfn = &cdev->hwfns[j];
4541                ptt = qed_ptt_acquire(hwfn);
4542
4543                /* Make sure not to use more than 16 queues per VF */
4544                params.num_queues = min_t(int,
4545                                          FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4546                                          16);
4547
4548                if (!ptt) {
4549                        DP_ERR(hwfn, "Failed to acquire ptt\n");
4550                        rc = -EBUSY;
4551                        goto err;
4552                }
4553
4554                for (i = 0; i < num; i++) {
4555                        if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4556                                continue;
4557
4558                        qed_sriov_enable_qid_config(hwfn, i, &params);
4559                        rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
4560                        if (rc) {
4561                                DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4562                                qed_ptt_release(hwfn, ptt);
4563                                goto err;
4564                        }
4565                }
4566
4567                qed_ptt_release(hwfn, ptt);
4568        }
4569
4570        /* Enable SRIOV PCIe functions */
4571        rc = pci_enable_sriov(cdev->pdev, num);
4572        if (rc) {
4573                DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4574                goto err;
4575        }
4576
4577        hwfn = QED_LEADING_HWFN(cdev);
4578        ptt = qed_ptt_acquire(hwfn);
4579        if (!ptt) {
4580                DP_ERR(hwfn, "Failed to acquire ptt\n");
4581                rc = -EBUSY;
4582                goto err;
4583        }
4584
4585        rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4586        if (rc)
4587                DP_INFO(cdev, "Failed to update eswitch mode\n");
4588        qed_ptt_release(hwfn, ptt);
4589
4590        return num;
4591
4592err:
4593        qed_sriov_disable(cdev, false);
4594        return rc;
4595}
4596
4597static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4598{
4599        if (!IS_QED_SRIOV(cdev)) {
4600                DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4601                return -EOPNOTSUPP;
4602        }
4603
4604        if (num_vfs_param)
4605                return qed_sriov_enable(cdev, num_vfs_param);
4606        else
4607                return qed_sriov_disable(cdev, true);
4608}
4609
4610static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4611{
4612        int i;
4613
4614        if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4615                DP_VERBOSE(cdev, QED_MSG_IOV,
4616                           "Cannot set a VF MAC; Sriov is not enabled\n");
4617                return -EINVAL;
4618        }
4619
4620        if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4621                DP_VERBOSE(cdev, QED_MSG_IOV,
4622                           "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4623                return -EINVAL;
4624        }
4625
4626        for_each_hwfn(cdev, i) {
4627                struct qed_hwfn *hwfn = &cdev->hwfns[i];
4628                struct qed_public_vf_info *vf_info;
4629
4630                vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4631                if (!vf_info)
4632                        continue;
4633
4634                /* Set the MAC, and schedule the IOV task */
4635                if (vf_info->is_trusted_configured)
4636                        ether_addr_copy(vf_info->mac, mac);
4637                else
4638                        ether_addr_copy(vf_info->forced_mac, mac);
4639
4640                qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4641        }
4642
4643        return 0;
4644}
4645
4646static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4647{
4648        int i;
4649
4650        if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4651                DP_VERBOSE(cdev, QED_MSG_IOV,
4652                           "Cannot set a VF MAC; Sriov is not enabled\n");
4653                return -EINVAL;
4654        }
4655
4656        if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4657                DP_VERBOSE(cdev, QED_MSG_IOV,
4658                           "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4659                return -EINVAL;
4660        }
4661
4662        for_each_hwfn(cdev, i) {
4663                struct qed_hwfn *hwfn = &cdev->hwfns[i];
4664                struct qed_public_vf_info *vf_info;
4665
4666                vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4667                if (!vf_info)
4668                        continue;
4669
4670                /* Set the forced vlan, and schedule the IOV task */
4671                vf_info->forced_vlan = vid;
4672                qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4673        }
4674
4675        return 0;
4676}
4677
4678static int qed_get_vf_config(struct qed_dev *cdev,
4679                             int vf_id, struct ifla_vf_info *ivi)
4680{
4681        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4682        struct qed_public_vf_info *vf_info;
4683        struct qed_mcp_link_state link;
4684        u32 tx_rate;
4685
4686        /* Sanitize request */
4687        if (IS_VF(cdev))
4688                return -EINVAL;
4689
4690        if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4691                DP_VERBOSE(cdev, QED_MSG_IOV,
4692                           "VF index [%d] isn't active\n", vf_id);
4693                return -EINVAL;
4694        }
4695
4696        vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4697
4698        qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4699
4700        /* Fill information about VF */
4701        ivi->vf = vf_id;
4702
4703        if (is_valid_ether_addr(vf_info->forced_mac))
4704                ether_addr_copy(ivi->mac, vf_info->forced_mac);
4705        else
4706                ether_addr_copy(ivi->mac, vf_info->mac);
4707
4708        ivi->vlan = vf_info->forced_vlan;
4709        ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4710        ivi->linkstate = vf_info->link_state;
4711        tx_rate = vf_info->tx_rate;
4712        ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4713        ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4714
4715        return 0;
4716}
4717
4718void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4719{
4720        struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4721        struct qed_mcp_link_capabilities caps;
4722        struct qed_mcp_link_params params;
4723        struct qed_mcp_link_state link;
4724        int i;
4725
4726        if (!hwfn->pf_iov_info)
4727                return;
4728
4729        /* Update bulletin of all future possible VFs with link configuration */
4730        for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4731                struct qed_public_vf_info *vf_info;
4732
4733                vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4734                if (!vf_info)
4735                        continue;
4736
4737                /* Only hwfn0 is actually interested in the link speed.
4738                 * But since only it would receive an MFW indication of link,
4739                 * need to take configuration from it - otherwise things like
4740                 * rate limiting for hwfn1 VF would not work.
4741                 */
4742                memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
4743                       sizeof(params));
4744                memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4745                memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4746                       sizeof(caps));
4747
4748                /* Modify link according to the VF's configured link state */
4749                switch (vf_info->link_state) {
4750                case IFLA_VF_LINK_STATE_DISABLE:
4751                        link.link_up = false;
4752                        break;
4753                case IFLA_VF_LINK_STATE_ENABLE:
4754                        link.link_up = true;
4755                        /* Set speed according to maximum supported by HW.
4756                         * that is 40G for regular devices and 100G for CMT
4757                         * mode devices.
4758                         */
4759                        link.speed = (hwfn->cdev->num_hwfns > 1) ?
4760                                     100000 : 40000;
4761                default:
4762                        /* In auto mode pass PF link image to VF */
4763                        break;
4764                }
4765
4766                if (link.link_up && vf_info->tx_rate) {
4767                        struct qed_ptt *ptt;
4768                        int rate;
4769
4770                        rate = min_t(int, vf_info->tx_rate, link.speed);
4771
4772                        ptt = qed_ptt_acquire(hwfn);
4773                        if (!ptt) {
4774                                DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4775                                return;
4776                        }
4777
4778                        if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4779                                vf_info->tx_rate = rate;
4780                                link.speed = rate;
4781                        }
4782
4783                        qed_ptt_release(hwfn, ptt);
4784                }
4785
4786                qed_iov_set_link(hwfn, i, &params, &link, &caps);
4787        }
4788
4789        qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4790}
4791
4792static int qed_set_vf_link_state(struct qed_dev *cdev,
4793                                 int vf_id, int link_state)
4794{
4795        int i;
4796
4797        /* Sanitize request */
4798        if (IS_VF(cdev))
4799                return -EINVAL;
4800
4801        if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4802                DP_VERBOSE(cdev, QED_MSG_IOV,
4803                           "VF index [%d] isn't active\n", vf_id);
4804                return -EINVAL;
4805        }
4806
4807        /* Handle configuration of link state */
4808        for_each_hwfn(cdev, i) {
4809                struct qed_hwfn *hwfn = &cdev->hwfns[i];
4810                struct qed_public_vf_info *vf;
4811
4812                vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4813                if (!vf)
4814                        continue;
4815
4816                if (vf->link_state == link_state)
4817                        continue;
4818
4819                vf->link_state = link_state;
4820                qed_inform_vf_link_state(&cdev->hwfns[i]);
4821        }
4822
4823        return 0;
4824}
4825
4826static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4827{
4828        int i, rc = -EINVAL;
4829
4830        for_each_hwfn(cdev, i) {
4831                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4832
4833                rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4834                if (rc)
4835                        break;
4836        }
4837
4838        return rc;
4839}
4840
4841static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4842{
4843        int i;
4844
4845        for_each_hwfn(cdev, i) {
4846                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4847                struct qed_public_vf_info *vf;
4848
4849                if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4850                        DP_NOTICE(p_hwfn,
4851                                  "SR-IOV sanity check failed, can't set tx rate\n");
4852                        return -EINVAL;
4853                }
4854
4855                vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4856
4857                vf->tx_rate = rate;
4858
4859                qed_inform_vf_link_state(p_hwfn);
4860        }
4861
4862        return 0;
4863}
4864
4865static int qed_set_vf_rate(struct qed_dev *cdev,
4866                           int vfid, u32 min_rate, u32 max_rate)
4867{
4868        int rc_min = 0, rc_max = 0;
4869
4870        if (max_rate)
4871                rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4872
4873        if (min_rate)
4874                rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4875
4876        if (rc_max | rc_min)
4877                return -EINVAL;
4878
4879        return 0;
4880}
4881
4882static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4883{
4884        int i;
4885
4886        for_each_hwfn(cdev, i) {
4887                struct qed_hwfn *hwfn = &cdev->hwfns[i];
4888                struct qed_public_vf_info *vf;
4889
4890                if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4891                        DP_NOTICE(hwfn,
4892                                  "SR-IOV sanity check failed, can't set trust\n");
4893                        return -EINVAL;
4894                }
4895
4896                vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4897
4898                if (vf->is_trusted_request == trust)
4899                        return 0;
4900                vf->is_trusted_request = trust;
4901
4902                qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4903        }
4904
4905        return 0;
4906}
4907
4908static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4909{
4910        u64 events[QED_VF_ARRAY_LENGTH];
4911        struct qed_ptt *ptt;
4912        int i;
4913
4914        ptt = qed_ptt_acquire(hwfn);
4915        if (!ptt) {
4916                DP_VERBOSE(hwfn, QED_MSG_IOV,
4917                           "Can't acquire PTT; re-scheduling\n");
4918                qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4919                return;
4920        }
4921
4922        qed_iov_pf_get_pending_events(hwfn, events);
4923
4924        DP_VERBOSE(hwfn, QED_MSG_IOV,
4925                   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4926                   events[0], events[1], events[2]);
4927
4928        qed_for_each_vf(hwfn, i) {
4929                /* Skip VFs with no pending messages */
4930                if (!(events[i / 64] & (1ULL << (i % 64))))
4931                        continue;
4932
4933                DP_VERBOSE(hwfn, QED_MSG_IOV,
4934                           "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4935                           i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4936
4937                /* Copy VF's message to PF's request buffer for that VF */
4938                if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4939                        continue;
4940
4941                qed_iov_process_mbx_req(hwfn, ptt, i);
4942        }
4943
4944        qed_ptt_release(hwfn, ptt);
4945}
4946
4947static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
4948                                       u8 *mac,
4949                                       struct qed_public_vf_info *info)
4950{
4951        if (info->is_trusted_configured) {
4952                if (is_valid_ether_addr(info->mac) &&
4953                    (!mac || !ether_addr_equal(mac, info->mac)))
4954                        return true;
4955        } else {
4956                if (is_valid_ether_addr(info->forced_mac) &&
4957                    (!mac || !ether_addr_equal(mac, info->forced_mac)))
4958                        return true;
4959        }
4960
4961        return false;
4962}
4963
4964static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
4965                                 struct qed_public_vf_info *info,
4966                                 int vfid)
4967{
4968        if (info->is_trusted_configured)
4969                qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
4970        else
4971                qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
4972}
4973
4974static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4975{
4976        int i;
4977
4978        qed_for_each_vf(hwfn, i) {
4979                struct qed_public_vf_info *info;
4980                bool update = false;
4981                u8 *mac;
4982
4983                info = qed_iov_get_public_vf_info(hwfn, i, true);
4984                if (!info)
4985                        continue;
4986
4987                /* Update data on bulletin board */
4988                if (info->is_trusted_configured)
4989                        mac = qed_iov_bulletin_get_mac(hwfn, i);
4990                else
4991                        mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4992
4993                if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
4994                        DP_VERBOSE(hwfn,
4995                                   QED_MSG_IOV,
4996                                   "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4997                                   i,
4998                                   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4999
5000                        /* Update bulletin board with MAC */
5001                        qed_set_bulletin_mac(hwfn, info, i);
5002                        update = true;
5003                }
5004
5005                if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
5006                    info->forced_vlan) {
5007                        DP_VERBOSE(hwfn,
5008                                   QED_MSG_IOV,
5009                                   "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
5010                                   info->forced_vlan,
5011                                   i,
5012                                   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
5013                        qed_iov_bulletin_set_forced_vlan(hwfn,
5014                                                         info->forced_vlan, i);
5015                        update = true;
5016                }
5017
5018                if (update)
5019                        qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5020        }
5021}
5022
5023static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
5024{
5025        struct qed_ptt *ptt;
5026        int i;
5027
5028        ptt = qed_ptt_acquire(hwfn);
5029        if (!ptt) {
5030                DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
5031                qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5032                return;
5033        }
5034
5035        qed_for_each_vf(hwfn, i)
5036            qed_iov_post_vf_bulletin(hwfn, i, ptt);
5037
5038        qed_ptt_release(hwfn, ptt);
5039}
5040
5041static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
5042{
5043        struct qed_public_vf_info *vf_info;
5044        struct qed_vf_info *vf;
5045        u8 *force_mac;
5046        int i;
5047
5048        vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
5049        vf = qed_iov_get_vf_info(hwfn, vf_id, true);
5050
5051        if (!vf_info || !vf)
5052                return;
5053
5054        /* Force MAC converted to generic MAC in case of VF trust on */
5055        if (vf_info->is_trusted_configured &&
5056            (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
5057                force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
5058
5059                if (force_mac) {
5060                        /* Clear existing shadow copy of MAC to have a clean
5061                         * slate.
5062                         */
5063                        for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5064                                if (ether_addr_equal(vf->shadow_config.macs[i],
5065                                                     vf_info->mac)) {
5066                                        memset(vf->shadow_config.macs[i], 0,
5067                                               ETH_ALEN);
5068                                        DP_VERBOSE(hwfn, QED_MSG_IOV,
5069                                                   "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5070                                                    vf_info->mac, vf_id);
5071                                        break;
5072                                }
5073                        }
5074
5075                        ether_addr_copy(vf_info->mac, force_mac);
5076                        memset(vf_info->forced_mac, 0, ETH_ALEN);
5077                        vf->bulletin.p_virt->valid_bitmap &=
5078                                        ~BIT(MAC_ADDR_FORCED);
5079                        qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5080                }
5081        }
5082
5083        /* Update shadow copy with VF MAC when trust mode is turned off */
5084        if (!vf_info->is_trusted_configured) {
5085                u8 empty_mac[ETH_ALEN];
5086
5087                memset(empty_mac, 0, ETH_ALEN);
5088                for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5089                        if (ether_addr_equal(vf->shadow_config.macs[i],
5090                                             empty_mac)) {
5091                                ether_addr_copy(vf->shadow_config.macs[i],
5092                                                vf_info->mac);
5093                                DP_VERBOSE(hwfn, QED_MSG_IOV,
5094                                           "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5095                                            vf_info->mac, vf_id);
5096                                break;
5097                        }
5098                }
5099                /* Clear bulletin when trust mode is turned off,
5100                 * to have a clean slate for next (normal) operations.
5101                 */
5102                qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
5103                qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5104        }
5105}
5106
5107static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5108{
5109        struct qed_sp_vport_update_params params;
5110        struct qed_filter_accept_flags *flags;
5111        struct qed_public_vf_info *vf_info;
5112        struct qed_vf_info *vf;
5113        u8 mask;
5114        int i;
5115
5116        mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
5117        flags = &params.accept_flags;
5118
5119        qed_for_each_vf(hwfn, i) {
5120                /* Need to make sure current requested configuration didn't
5121                 * flip so that we'll end up configuring something that's not
5122                 * needed.
5123                 */
5124                vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
5125                if (vf_info->is_trusted_configured ==
5126                    vf_info->is_trusted_request)
5127                        continue;
5128                vf_info->is_trusted_configured = vf_info->is_trusted_request;
5129
5130                /* Handle forced MAC mode */
5131                qed_update_mac_for_vf_trust_change(hwfn, i);
5132
5133                /* Validate that the VF has a configured vport */
5134                vf = qed_iov_get_vf_info(hwfn, i, true);
5135                if (!vf->vport_instance)
5136                        continue;
5137
5138                memset(&params, 0, sizeof(params));
5139                params.opaque_fid = vf->opaque_fid;
5140                params.vport_id = vf->vport_id;
5141
5142                params.update_ctl_frame_check = 1;
5143                params.mac_chk_en = !vf_info->is_trusted_configured;
5144
5145                if (vf_info->rx_accept_mode & mask) {
5146                        flags->update_rx_mode_config = 1;
5147                        flags->rx_accept_filter = vf_info->rx_accept_mode;
5148                }
5149
5150                if (vf_info->tx_accept_mode & mask) {
5151                        flags->update_tx_mode_config = 1;
5152                        flags->tx_accept_filter = vf_info->tx_accept_mode;
5153                }
5154
5155                /* Remove if needed; Otherwise this would set the mask */
5156                if (!vf_info->is_trusted_configured) {
5157                        flags->rx_accept_filter &= ~mask;
5158                        flags->tx_accept_filter &= ~mask;
5159                }
5160
5161                if (flags->update_rx_mode_config ||
5162                    flags->update_tx_mode_config ||
5163                    params.update_ctl_frame_check)
5164                        qed_sp_vport_update(hwfn, &params,
5165                                            QED_SPQ_MODE_EBLOCK, NULL);
5166        }
5167}
5168
5169static void qed_iov_pf_task(struct work_struct *work)
5170
5171{
5172        struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
5173                                             iov_task.work);
5174        int rc;
5175
5176        if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
5177                return;
5178
5179        if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
5180                struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
5181
5182                if (!ptt) {
5183                        qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5184                        return;
5185                }
5186
5187                rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
5188                if (rc)
5189                        qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5190
5191                qed_ptt_release(hwfn, ptt);
5192        }
5193
5194        if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
5195                qed_handle_vf_msg(hwfn);
5196
5197        if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5198                               &hwfn->iov_task_flags))
5199                qed_handle_pf_set_vf_unicast(hwfn);
5200
5201        if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5202                               &hwfn->iov_task_flags))
5203                qed_handle_bulletin_post(hwfn);
5204
5205        if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5206                qed_iov_handle_trust_change(hwfn);
5207}
5208
5209void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5210{
5211        int i;
5212
5213        for_each_hwfn(cdev, i) {
5214                if (!cdev->hwfns[i].iov_wq)
5215                        continue;
5216
5217                if (schedule_first) {
5218                        qed_schedule_iov(&cdev->hwfns[i],
5219                                         QED_IOV_WQ_STOP_WQ_FLAG);
5220                        cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5221                }
5222
5223                flush_workqueue(cdev->hwfns[i].iov_wq);
5224                destroy_workqueue(cdev->hwfns[i].iov_wq);
5225        }
5226}
5227
5228int qed_iov_wq_start(struct qed_dev *cdev)
5229{
5230        char name[NAME_SIZE];
5231        int i;
5232
5233        for_each_hwfn(cdev, i) {
5234                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5235
5236                /* PFs needs a dedicated workqueue only if they support IOV.
5237                 * VFs always require one.
5238                 */
5239                if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5240                        continue;
5241
5242                snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5243                         cdev->pdev->bus->number,
5244                         PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5245
5246                p_hwfn->iov_wq = create_singlethread_workqueue(name);
5247                if (!p_hwfn->iov_wq) {
5248                        DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5249                        return -ENOMEM;
5250                }
5251
5252                if (IS_PF(cdev))
5253                        INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5254                else
5255                        INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5256        }
5257
5258        return 0;
5259}
5260
5261const struct qed_iov_hv_ops qed_iov_ops_pass = {
5262        .configure = &qed_sriov_configure,
5263        .set_mac = &qed_sriov_pf_set_mac,
5264        .set_vlan = &qed_sriov_pf_set_vlan,
5265        .get_config = &qed_get_vf_config,
5266        .set_link_state = &qed_set_vf_link_state,
5267        .set_spoof = &qed_spoof_configure,
5268        .set_rate = &qed_set_vf_rate,
5269        .set_trust = &qed_set_vf_trust,
5270};
5271