linux/drivers/net/ethernet/cavium/thunder/nicvf_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Cavium, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of version 2 of the GNU General Public License
   6 * as published by the Free Software Foundation.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/interrupt.h>
  11#include <linux/pci.h>
  12#include <linux/netdevice.h>
  13#include <linux/if_vlan.h>
  14#include <linux/etherdevice.h>
  15#include <linux/ethtool.h>
  16#include <linux/log2.h>
  17#include <linux/prefetch.h>
  18#include <linux/irq.h>
  19#include <linux/iommu.h>
  20#include <linux/bpf.h>
  21#include <linux/bpf_trace.h>
  22#include <linux/filter.h>
  23#include <linux/net_tstamp.h>
  24#include <linux/workqueue.h>
  25
  26#include "nic_reg.h"
  27#include "nic.h"
  28#include "nicvf_queues.h"
  29#include "thunder_bgx.h"
  30#include "../common/cavium_ptp.h"
  31
  32#define DRV_NAME        "nicvf"
  33#define DRV_VERSION     "1.0"
  34
  35/* Supported devices */
  36static const struct pci_device_id nicvf_id_table[] = {
  37        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
  38                         PCI_DEVICE_ID_THUNDER_NIC_VF,
  39                         PCI_VENDOR_ID_CAVIUM,
  40                         PCI_SUBSYS_DEVID_88XX_NIC_VF) },
  41        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
  42                         PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
  43                         PCI_VENDOR_ID_CAVIUM,
  44                         PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
  45        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
  46                         PCI_DEVICE_ID_THUNDER_NIC_VF,
  47                         PCI_VENDOR_ID_CAVIUM,
  48                         PCI_SUBSYS_DEVID_81XX_NIC_VF) },
  49        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
  50                         PCI_DEVICE_ID_THUNDER_NIC_VF,
  51                         PCI_VENDOR_ID_CAVIUM,
  52                         PCI_SUBSYS_DEVID_83XX_NIC_VF) },
  53        { 0, }  /* end of table */
  54};
  55
  56MODULE_AUTHOR("Sunil Goutham");
  57MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
  58MODULE_LICENSE("GPL v2");
  59MODULE_VERSION(DRV_VERSION);
  60MODULE_DEVICE_TABLE(pci, nicvf_id_table);
  61
  62static int debug = 0x00;
  63module_param(debug, int, 0644);
  64MODULE_PARM_DESC(debug, "Debug message level bitmap");
  65
  66static int cpi_alg = CPI_ALG_NONE;
  67module_param(cpi_alg, int, 0444);
  68MODULE_PARM_DESC(cpi_alg,
  69                 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
  70
  71/* workqueue for handling kernel ndo_set_rx_mode() calls */
  72static struct workqueue_struct *nicvf_rx_mode_wq;
  73
  74static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
  75{
  76        if (nic->sqs_mode)
  77                return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
  78        else
  79                return qidx;
  80}
  81
  82/* The Cavium ThunderX network controller can *only* be found in SoCs
  83 * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
  84 * registers on this platform are implicitly strongly ordered with respect
  85 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
  86 * with no memory barriers in this driver.  The readq()/writeq() functions add
  87 * explicit ordering operation which in this case are redundant, and only
  88 * add overhead.
  89 */
  90
  91/* Register read/write APIs */
  92void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
  93{
  94        writeq_relaxed(val, nic->reg_base + offset);
  95}
  96
  97u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
  98{
  99        return readq_relaxed(nic->reg_base + offset);
 100}
 101
 102void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
 103                           u64 qidx, u64 val)
 104{
 105        void __iomem *addr = nic->reg_base + offset;
 106
 107        writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
 108}
 109
 110u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
 111{
 112        void __iomem *addr = nic->reg_base + offset;
 113
 114        return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
 115}
 116
 117/* VF -> PF mailbox communication */
 118static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
 119{
 120        u64 *msg = (u64 *)mbx;
 121
 122        nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
 123        nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
 124}
 125
 126int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
 127{
 128        int timeout = NIC_MBOX_MSG_TIMEOUT;
 129        int sleep = 10;
 130
 131        nic->pf_acked = false;
 132        nic->pf_nacked = false;
 133
 134        nicvf_write_to_mbx(nic, mbx);
 135
 136        /* Wait for previous message to be acked, timeout 2sec */
 137        while (!nic->pf_acked) {
 138                if (nic->pf_nacked) {
 139                        netdev_err(nic->netdev,
 140                                   "PF NACK to mbox msg 0x%02x from VF%d\n",
 141                                   (mbx->msg.msg & 0xFF), nic->vf_id);
 142                        return -EINVAL;
 143                }
 144                msleep(sleep);
 145                if (nic->pf_acked)
 146                        break;
 147                timeout -= sleep;
 148                if (!timeout) {
 149                        netdev_err(nic->netdev,
 150                                   "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
 151                                   (mbx->msg.msg & 0xFF), nic->vf_id);
 152                        return -EBUSY;
 153                }
 154        }
 155        return 0;
 156}
 157
 158/* Checks if VF is able to comminicate with PF
 159* and also gets the VNIC number this VF is associated to.
 160*/
 161static int nicvf_check_pf_ready(struct nicvf *nic)
 162{
 163        union nic_mbx mbx = {};
 164
 165        mbx.msg.msg = NIC_MBOX_MSG_READY;
 166        if (nicvf_send_msg_to_pf(nic, &mbx)) {
 167                netdev_err(nic->netdev,
 168                           "PF didn't respond to READY msg\n");
 169                return 0;
 170        }
 171
 172        return 1;
 173}
 174
 175static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
 176{
 177        if (bgx->rx)
 178                nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
 179        else
 180                nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
 181}
 182
 183static void  nicvf_handle_mbx_intr(struct nicvf *nic)
 184{
 185        union nic_mbx mbx = {};
 186        u64 *mbx_data;
 187        u64 mbx_addr;
 188        int i;
 189
 190        mbx_addr = NIC_VF_PF_MAILBOX_0_1;
 191        mbx_data = (u64 *)&mbx;
 192
 193        for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
 194                *mbx_data = nicvf_reg_read(nic, mbx_addr);
 195                mbx_data++;
 196                mbx_addr += sizeof(u64);
 197        }
 198
 199        netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
 200        switch (mbx.msg.msg) {
 201        case NIC_MBOX_MSG_READY:
 202                nic->pf_acked = true;
 203                nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
 204                nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
 205                nic->node = mbx.nic_cfg.node_id;
 206                if (!nic->set_mac_pending)
 207                        ether_addr_copy(nic->netdev->dev_addr,
 208                                        mbx.nic_cfg.mac_addr);
 209                nic->sqs_mode = mbx.nic_cfg.sqs_mode;
 210                nic->loopback_supported = mbx.nic_cfg.loopback_supported;
 211                nic->link_up = false;
 212                nic->duplex = 0;
 213                nic->speed = 0;
 214                break;
 215        case NIC_MBOX_MSG_ACK:
 216                nic->pf_acked = true;
 217                break;
 218        case NIC_MBOX_MSG_NACK:
 219                nic->pf_nacked = true;
 220                break;
 221        case NIC_MBOX_MSG_RSS_SIZE:
 222                nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
 223                nic->pf_acked = true;
 224                break;
 225        case NIC_MBOX_MSG_BGX_STATS:
 226                nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
 227                nic->pf_acked = true;
 228                break;
 229        case NIC_MBOX_MSG_BGX_LINK_CHANGE:
 230                nic->pf_acked = true;
 231                nic->link_up = mbx.link_status.link_up;
 232                nic->duplex = mbx.link_status.duplex;
 233                nic->speed = mbx.link_status.speed;
 234                nic->mac_type = mbx.link_status.mac_type;
 235                if (nic->link_up) {
 236                        netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
 237                                    nic->speed,
 238                                    nic->duplex == DUPLEX_FULL ?
 239                                    "Full" : "Half");
 240                        netif_carrier_on(nic->netdev);
 241                        netif_tx_start_all_queues(nic->netdev);
 242                } else {
 243                        netdev_info(nic->netdev, "Link is Down\n");
 244                        netif_carrier_off(nic->netdev);
 245                        netif_tx_stop_all_queues(nic->netdev);
 246                }
 247                break;
 248        case NIC_MBOX_MSG_ALLOC_SQS:
 249                nic->sqs_count = mbx.sqs_alloc.qs_count;
 250                nic->pf_acked = true;
 251                break;
 252        case NIC_MBOX_MSG_SNICVF_PTR:
 253                /* Primary VF: make note of secondary VF's pointer
 254                 * to be used while packet transmission.
 255                 */
 256                nic->snicvf[mbx.nicvf.sqs_id] =
 257                        (struct nicvf *)mbx.nicvf.nicvf;
 258                nic->pf_acked = true;
 259                break;
 260        case NIC_MBOX_MSG_PNICVF_PTR:
 261                /* Secondary VF/Qset: make note of primary VF's pointer
 262                 * to be used while packet reception, to handover packet
 263                 * to primary VF's netdev.
 264                 */
 265                nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
 266                nic->pf_acked = true;
 267                break;
 268        case NIC_MBOX_MSG_PFC:
 269                nic->pfc.autoneg = mbx.pfc.autoneg;
 270                nic->pfc.fc_rx = mbx.pfc.fc_rx;
 271                nic->pfc.fc_tx = mbx.pfc.fc_tx;
 272                nic->pf_acked = true;
 273                break;
 274        default:
 275                netdev_err(nic->netdev,
 276                           "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
 277                break;
 278        }
 279        nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
 280}
 281
 282static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
 283{
 284        union nic_mbx mbx = {};
 285
 286        mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
 287        mbx.mac.vf_id = nic->vf_id;
 288        ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
 289
 290        return nicvf_send_msg_to_pf(nic, &mbx);
 291}
 292
 293static void nicvf_config_cpi(struct nicvf *nic)
 294{
 295        union nic_mbx mbx = {};
 296
 297        mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
 298        mbx.cpi_cfg.vf_id = nic->vf_id;
 299        mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
 300        mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
 301
 302        nicvf_send_msg_to_pf(nic, &mbx);
 303}
 304
 305static void nicvf_get_rss_size(struct nicvf *nic)
 306{
 307        union nic_mbx mbx = {};
 308
 309        mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
 310        mbx.rss_size.vf_id = nic->vf_id;
 311        nicvf_send_msg_to_pf(nic, &mbx);
 312}
 313
 314void nicvf_config_rss(struct nicvf *nic)
 315{
 316        union nic_mbx mbx = {};
 317        struct nicvf_rss_info *rss = &nic->rss_info;
 318        int ind_tbl_len = rss->rss_size;
 319        int i, nextq = 0;
 320
 321        mbx.rss_cfg.vf_id = nic->vf_id;
 322        mbx.rss_cfg.hash_bits = rss->hash_bits;
 323        while (ind_tbl_len) {
 324                mbx.rss_cfg.tbl_offset = nextq;
 325                mbx.rss_cfg.tbl_len = min(ind_tbl_len,
 326                                               RSS_IND_TBL_LEN_PER_MBX_MSG);
 327                mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
 328                          NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
 329
 330                for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
 331                        mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
 332
 333                nicvf_send_msg_to_pf(nic, &mbx);
 334
 335                ind_tbl_len -= mbx.rss_cfg.tbl_len;
 336        }
 337}
 338
 339void nicvf_set_rss_key(struct nicvf *nic)
 340{
 341        struct nicvf_rss_info *rss = &nic->rss_info;
 342        u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
 343        int idx;
 344
 345        for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
 346                nicvf_reg_write(nic, key_addr, rss->key[idx]);
 347                key_addr += sizeof(u64);
 348        }
 349}
 350
 351static int nicvf_rss_init(struct nicvf *nic)
 352{
 353        struct nicvf_rss_info *rss = &nic->rss_info;
 354        int idx;
 355
 356        nicvf_get_rss_size(nic);
 357
 358        if (cpi_alg != CPI_ALG_NONE) {
 359                rss->enable = false;
 360                rss->hash_bits = 0;
 361                return 0;
 362        }
 363
 364        rss->enable = true;
 365
 366        netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
 367        nicvf_set_rss_key(nic);
 368
 369        rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
 370        nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
 371
 372        rss->hash_bits =  ilog2(rounddown_pow_of_two(rss->rss_size));
 373
 374        for (idx = 0; idx < rss->rss_size; idx++)
 375                rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
 376                                                               nic->rx_queues);
 377        nicvf_config_rss(nic);
 378        return 1;
 379}
 380
 381/* Request PF to allocate additional Qsets */
 382static void nicvf_request_sqs(struct nicvf *nic)
 383{
 384        union nic_mbx mbx = {};
 385        int sqs;
 386        int sqs_count = nic->sqs_count;
 387        int rx_queues = 0, tx_queues = 0;
 388
 389        /* Only primary VF should request */
 390        if (nic->sqs_mode ||  !nic->sqs_count)
 391                return;
 392
 393        mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
 394        mbx.sqs_alloc.vf_id = nic->vf_id;
 395        mbx.sqs_alloc.qs_count = nic->sqs_count;
 396        if (nicvf_send_msg_to_pf(nic, &mbx)) {
 397                /* No response from PF */
 398                nic->sqs_count = 0;
 399                return;
 400        }
 401
 402        /* Return if no Secondary Qsets available */
 403        if (!nic->sqs_count)
 404                return;
 405
 406        if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
 407                rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
 408
 409        tx_queues = nic->tx_queues + nic->xdp_tx_queues;
 410        if (tx_queues > MAX_SND_QUEUES_PER_QS)
 411                tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
 412
 413        /* Set no of Rx/Tx queues in each of the SQsets */
 414        for (sqs = 0; sqs < nic->sqs_count; sqs++) {
 415                mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
 416                mbx.nicvf.vf_id = nic->vf_id;
 417                mbx.nicvf.sqs_id = sqs;
 418                nicvf_send_msg_to_pf(nic, &mbx);
 419
 420                nic->snicvf[sqs]->sqs_id = sqs;
 421                if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
 422                        nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
 423                        rx_queues -= MAX_RCV_QUEUES_PER_QS;
 424                } else {
 425                        nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
 426                        rx_queues = 0;
 427                }
 428
 429                if (tx_queues > MAX_SND_QUEUES_PER_QS) {
 430                        nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
 431                        tx_queues -= MAX_SND_QUEUES_PER_QS;
 432                } else {
 433                        nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
 434                        tx_queues = 0;
 435                }
 436
 437                nic->snicvf[sqs]->qs->cq_cnt =
 438                max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
 439
 440                /* Initialize secondary Qset's queues and its interrupts */
 441                nicvf_open(nic->snicvf[sqs]->netdev);
 442        }
 443
 444        /* Update stack with actual Rx/Tx queue count allocated */
 445        if (sqs_count != nic->sqs_count)
 446                nicvf_set_real_num_queues(nic->netdev,
 447                                          nic->tx_queues, nic->rx_queues);
 448}
 449
 450/* Send this Qset's nicvf pointer to PF.
 451 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
 452 * so that packets received by these Qsets can use primary VF's netdev
 453 */
 454static void nicvf_send_vf_struct(struct nicvf *nic)
 455{
 456        union nic_mbx mbx = {};
 457
 458        mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
 459        mbx.nicvf.sqs_mode = nic->sqs_mode;
 460        mbx.nicvf.nicvf = (u64)nic;
 461        nicvf_send_msg_to_pf(nic, &mbx);
 462}
 463
 464static void nicvf_get_primary_vf_struct(struct nicvf *nic)
 465{
 466        union nic_mbx mbx = {};
 467
 468        mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
 469        nicvf_send_msg_to_pf(nic, &mbx);
 470}
 471
 472int nicvf_set_real_num_queues(struct net_device *netdev,
 473                              int tx_queues, int rx_queues)
 474{
 475        int err = 0;
 476
 477        err = netif_set_real_num_tx_queues(netdev, tx_queues);
 478        if (err) {
 479                netdev_err(netdev,
 480                           "Failed to set no of Tx queues: %d\n", tx_queues);
 481                return err;
 482        }
 483
 484        err = netif_set_real_num_rx_queues(netdev, rx_queues);
 485        if (err)
 486                netdev_err(netdev,
 487                           "Failed to set no of Rx queues: %d\n", rx_queues);
 488        return err;
 489}
 490
 491static int nicvf_init_resources(struct nicvf *nic)
 492{
 493        int err;
 494
 495        /* Enable Qset */
 496        nicvf_qset_config(nic, true);
 497
 498        /* Initialize queues and HW for data transfer */
 499        err = nicvf_config_data_transfer(nic, true);
 500        if (err) {
 501                netdev_err(nic->netdev,
 502                           "Failed to alloc/config VF's QSet resources\n");
 503                return err;
 504        }
 505
 506        return 0;
 507}
 508
 509static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 510                                struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
 511                                struct rcv_queue *rq, struct sk_buff **skb)
 512{
 513        struct xdp_buff xdp;
 514        struct page *page;
 515        u32 action;
 516        u16 len, offset = 0;
 517        u64 dma_addr, cpu_addr;
 518        void *orig_data;
 519
 520        /* Retrieve packet buffer's DMA address and length */
 521        len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
 522        dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
 523
 524        cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
 525        if (!cpu_addr)
 526                return false;
 527        cpu_addr = (u64)phys_to_virt(cpu_addr);
 528        page = virt_to_page((void *)cpu_addr);
 529
 530        xdp.data_hard_start = page_address(page);
 531        xdp.data = (void *)cpu_addr;
 532        xdp_set_data_meta_invalid(&xdp);
 533        xdp.data_end = xdp.data + len;
 534        xdp.rxq = &rq->xdp_rxq;
 535        orig_data = xdp.data;
 536
 537        rcu_read_lock();
 538        action = bpf_prog_run_xdp(prog, &xdp);
 539        rcu_read_unlock();
 540
 541        /* Check if XDP program has changed headers */
 542        if (orig_data != xdp.data) {
 543                len = xdp.data_end - xdp.data;
 544                offset = orig_data - xdp.data;
 545                dma_addr -= offset;
 546        }
 547
 548        switch (action) {
 549        case XDP_PASS:
 550                /* Check if it's a recycled page, if not
 551                 * unmap the DMA mapping.
 552                 *
 553                 * Recycled page holds an extra reference.
 554                 */
 555                if (page_ref_count(page) == 1) {
 556                        dma_addr &= PAGE_MASK;
 557                        dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
 558                                             RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
 559                                             DMA_FROM_DEVICE,
 560                                             DMA_ATTR_SKIP_CPU_SYNC);
 561                }
 562
 563                /* Build SKB and pass on packet to network stack */
 564                *skb = build_skb(xdp.data,
 565                                 RCV_FRAG_LEN - cqe_rx->align_pad + offset);
 566                if (!*skb)
 567                        put_page(page);
 568                else
 569                        skb_put(*skb, len);
 570                return false;
 571        case XDP_TX:
 572                nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
 573                return true;
 574        default:
 575                bpf_warn_invalid_xdp_action(action);
 576                /* fall through */
 577        case XDP_ABORTED:
 578                trace_xdp_exception(nic->netdev, prog, action);
 579                /* fall through */
 580        case XDP_DROP:
 581                /* Check if it's a recycled page, if not
 582                 * unmap the DMA mapping.
 583                 *
 584                 * Recycled page holds an extra reference.
 585                 */
 586                if (page_ref_count(page) == 1) {
 587                        dma_addr &= PAGE_MASK;
 588                        dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
 589                                             RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
 590                                             DMA_FROM_DEVICE,
 591                                             DMA_ATTR_SKIP_CPU_SYNC);
 592                }
 593                put_page(page);
 594                return true;
 595        }
 596        return false;
 597}
 598
 599static void nicvf_snd_ptp_handler(struct net_device *netdev,
 600                                  struct cqe_send_t *cqe_tx)
 601{
 602        struct nicvf *nic = netdev_priv(netdev);
 603        struct skb_shared_hwtstamps ts;
 604        u64 ns;
 605
 606        nic = nic->pnicvf;
 607
 608        /* Sync for 'ptp_skb' */
 609        smp_rmb();
 610
 611        /* New timestamp request can be queued now */
 612        atomic_set(&nic->tx_ptp_skbs, 0);
 613
 614        /* Check for timestamp requested skb */
 615        if (!nic->ptp_skb)
 616                return;
 617
 618        /* Check if timestamping is timedout, which is set to 10us */
 619        if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
 620            cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
 621                goto no_tstamp;
 622
 623        /* Get the timestamp */
 624        memset(&ts, 0, sizeof(ts));
 625        ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
 626        ts.hwtstamp = ns_to_ktime(ns);
 627        skb_tstamp_tx(nic->ptp_skb, &ts);
 628
 629no_tstamp:
 630        /* Free the original skb */
 631        dev_kfree_skb_any(nic->ptp_skb);
 632        nic->ptp_skb = NULL;
 633        /* Sync 'ptp_skb' */
 634        smp_wmb();
 635}
 636
 637static void nicvf_snd_pkt_handler(struct net_device *netdev,
 638                                  struct cqe_send_t *cqe_tx,
 639                                  int budget, int *subdesc_cnt,
 640                                  unsigned int *tx_pkts, unsigned int *tx_bytes)
 641{
 642        struct sk_buff *skb = NULL;
 643        struct page *page;
 644        struct nicvf *nic = netdev_priv(netdev);
 645        struct snd_queue *sq;
 646        struct sq_hdr_subdesc *hdr;
 647        struct sq_hdr_subdesc *tso_sqe;
 648
 649        sq = &nic->qs->sq[cqe_tx->sq_idx];
 650
 651        hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
 652        if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
 653                return;
 654
 655        /* Check for errors */
 656        if (cqe_tx->send_status)
 657                nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
 658
 659        /* Is this a XDP designated Tx queue */
 660        if (sq->is_xdp) {
 661                page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
 662                /* Check if it's recycled page or else unmap DMA mapping */
 663                if (page && (page_ref_count(page) == 1))
 664                        nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
 665                                                 hdr->subdesc_cnt);
 666
 667                /* Release page reference for recycling */
 668                if (page)
 669                        put_page(page);
 670                sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
 671                *subdesc_cnt += hdr->subdesc_cnt + 1;
 672                return;
 673        }
 674
 675        skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
 676        if (skb) {
 677                /* Check for dummy descriptor used for HW TSO offload on 88xx */
 678                if (hdr->dont_send) {
 679                        /* Get actual TSO descriptors and free them */
 680                        tso_sqe =
 681                         (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
 682                        nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
 683                                                 tso_sqe->subdesc_cnt);
 684                        *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
 685                } else {
 686                        nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
 687                                                 hdr->subdesc_cnt);
 688                }
 689                *subdesc_cnt += hdr->subdesc_cnt + 1;
 690                prefetch(skb);
 691                (*tx_pkts)++;
 692                *tx_bytes += skb->len;
 693                /* If timestamp is requested for this skb, don't free it */
 694                if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
 695                    !nic->pnicvf->ptp_skb)
 696                        nic->pnicvf->ptp_skb = skb;
 697                else
 698                        napi_consume_skb(skb, budget);
 699                sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
 700        } else {
 701                /* In case of SW TSO on 88xx, only last segment will have
 702                 * a SKB attached, so just free SQEs here.
 703                 */
 704                if (!nic->hw_tso)
 705                        *subdesc_cnt += hdr->subdesc_cnt + 1;
 706        }
 707}
 708
 709static inline void nicvf_set_rxhash(struct net_device *netdev,
 710                                    struct cqe_rx_t *cqe_rx,
 711                                    struct sk_buff *skb)
 712{
 713        u8 hash_type;
 714        u32 hash;
 715
 716        if (!(netdev->features & NETIF_F_RXHASH))
 717                return;
 718
 719        switch (cqe_rx->rss_alg) {
 720        case RSS_ALG_TCP_IP:
 721        case RSS_ALG_UDP_IP:
 722                hash_type = PKT_HASH_TYPE_L4;
 723                hash = cqe_rx->rss_tag;
 724                break;
 725        case RSS_ALG_IP:
 726                hash_type = PKT_HASH_TYPE_L3;
 727                hash = cqe_rx->rss_tag;
 728                break;
 729        default:
 730                hash_type = PKT_HASH_TYPE_NONE;
 731                hash = 0;
 732        }
 733
 734        skb_set_hash(skb, hash, hash_type);
 735}
 736
 737static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
 738{
 739        u64 ns;
 740
 741        if (!nic->ptp_clock || !nic->hw_rx_tstamp)
 742                return;
 743
 744        /* The first 8 bytes is the timestamp */
 745        ns = cavium_ptp_tstamp2time(nic->ptp_clock,
 746                                    be64_to_cpu(*(__be64 *)skb->data));
 747        skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
 748
 749        __skb_pull(skb, 8);
 750}
 751
 752static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 753                                  struct napi_struct *napi,
 754                                  struct cqe_rx_t *cqe_rx,
 755                                  struct snd_queue *sq, struct rcv_queue *rq)
 756{
 757        struct sk_buff *skb = NULL;
 758        struct nicvf *nic = netdev_priv(netdev);
 759        struct nicvf *snic = nic;
 760        int err = 0;
 761        int rq_idx;
 762
 763        rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
 764
 765        if (nic->sqs_mode) {
 766                /* Use primary VF's 'nicvf' struct */
 767                nic = nic->pnicvf;
 768                netdev = nic->netdev;
 769        }
 770
 771        /* Check for errors */
 772        if (cqe_rx->err_level || cqe_rx->err_opcode) {
 773                err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
 774                if (err && !cqe_rx->rb_cnt)
 775                        return;
 776        }
 777
 778        /* For XDP, ignore pkts spanning multiple pages */
 779        if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
 780                /* Packet consumed by XDP */
 781                if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
 782                        return;
 783        } else {
 784                skb = nicvf_get_rcv_skb(snic, cqe_rx,
 785                                        nic->xdp_prog ? true : false);
 786        }
 787
 788        if (!skb)
 789                return;
 790
 791        if (netif_msg_pktdata(nic)) {
 792                netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
 793                print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
 794                               skb->data, skb->len, true);
 795        }
 796
 797        /* If error packet, drop it here */
 798        if (err) {
 799                dev_kfree_skb_any(skb);
 800                return;
 801        }
 802
 803        nicvf_set_rxtstamp(nic, skb);
 804        nicvf_set_rxhash(netdev, cqe_rx, skb);
 805
 806        skb_record_rx_queue(skb, rq_idx);
 807        if (netdev->hw_features & NETIF_F_RXCSUM) {
 808                /* HW by default verifies TCP/UDP/SCTP checksums */
 809                skb->ip_summed = CHECKSUM_UNNECESSARY;
 810        } else {
 811                skb_checksum_none_assert(skb);
 812        }
 813
 814        skb->protocol = eth_type_trans(skb, netdev);
 815
 816        /* Check for stripped VLAN */
 817        if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
 818                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 819                                       ntohs((__force __be16)cqe_rx->vlan_tci));
 820
 821        if (napi && (netdev->features & NETIF_F_GRO))
 822                napi_gro_receive(napi, skb);
 823        else
 824                netif_receive_skb(skb);
 825}
 826
 827static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
 828                                 struct napi_struct *napi, int budget)
 829{
 830        int processed_cqe, work_done = 0, tx_done = 0;
 831        int cqe_count, cqe_head;
 832        int subdesc_cnt = 0;
 833        struct nicvf *nic = netdev_priv(netdev);
 834        struct queue_set *qs = nic->qs;
 835        struct cmp_queue *cq = &qs->cq[cq_idx];
 836        struct cqe_rx_t *cq_desc;
 837        struct netdev_queue *txq;
 838        struct snd_queue *sq = &qs->sq[cq_idx];
 839        struct rcv_queue *rq = &qs->rq[cq_idx];
 840        unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
 841
 842        spin_lock_bh(&cq->lock);
 843loop:
 844        processed_cqe = 0;
 845        /* Get no of valid CQ entries to process */
 846        cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
 847        cqe_count &= CQ_CQE_COUNT;
 848        if (!cqe_count)
 849                goto done;
 850
 851        /* Get head of the valid CQ entries */
 852        cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
 853        cqe_head &= 0xFFFF;
 854
 855        while (processed_cqe < cqe_count) {
 856                /* Get the CQ descriptor */
 857                cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
 858                cqe_head++;
 859                cqe_head &= (cq->dmem.q_len - 1);
 860                /* Initiate prefetch for next descriptor */
 861                prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
 862
 863                if ((work_done >= budget) && napi &&
 864                    (cq_desc->cqe_type != CQE_TYPE_SEND)) {
 865                        break;
 866                }
 867
 868                switch (cq_desc->cqe_type) {
 869                case CQE_TYPE_RX:
 870                        nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
 871                        work_done++;
 872                break;
 873                case CQE_TYPE_SEND:
 874                        nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
 875                                              budget, &subdesc_cnt,
 876                                              &tx_pkts, &tx_bytes);
 877                        tx_done++;
 878                break;
 879                case CQE_TYPE_SEND_PTP:
 880                        nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
 881                break;
 882                case CQE_TYPE_INVALID:
 883                case CQE_TYPE_RX_SPLIT:
 884                case CQE_TYPE_RX_TCP:
 885                        /* Ignore for now */
 886                break;
 887                }
 888                processed_cqe++;
 889        }
 890
 891        /* Ring doorbell to inform H/W to reuse processed CQEs */
 892        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
 893                              cq_idx, processed_cqe);
 894
 895        if ((work_done < budget) && napi)
 896                goto loop;
 897
 898done:
 899        /* Update SQ's descriptor free count */
 900        if (subdesc_cnt)
 901                nicvf_put_sq_desc(sq, subdesc_cnt);
 902
 903        txq_idx = nicvf_netdev_qidx(nic, cq_idx);
 904        /* Handle XDP TX queues */
 905        if (nic->pnicvf->xdp_prog) {
 906                if (txq_idx < nic->pnicvf->xdp_tx_queues) {
 907                        nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
 908                        goto out;
 909                }
 910                nic = nic->pnicvf;
 911                txq_idx -= nic->pnicvf->xdp_tx_queues;
 912        }
 913
 914        /* Wakeup TXQ if its stopped earlier due to SQ full */
 915        if (tx_done ||
 916            (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
 917                netdev = nic->pnicvf->netdev;
 918                txq = netdev_get_tx_queue(netdev, txq_idx);
 919                if (tx_pkts)
 920                        netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
 921
 922                /* To read updated queue and carrier status */
 923                smp_mb();
 924                if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
 925                        netif_tx_wake_queue(txq);
 926                        nic = nic->pnicvf;
 927                        this_cpu_inc(nic->drv_stats->txq_wake);
 928                        netif_warn(nic, tx_err, netdev,
 929                                   "Transmit queue wakeup SQ%d\n", txq_idx);
 930                }
 931        }
 932
 933out:
 934        spin_unlock_bh(&cq->lock);
 935        return work_done;
 936}
 937
 938static int nicvf_poll(struct napi_struct *napi, int budget)
 939{
 940        u64  cq_head;
 941        int  work_done = 0;
 942        struct net_device *netdev = napi->dev;
 943        struct nicvf *nic = netdev_priv(netdev);
 944        struct nicvf_cq_poll *cq;
 945
 946        cq = container_of(napi, struct nicvf_cq_poll, napi);
 947        work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
 948
 949        if (work_done < budget) {
 950                /* Slow packet rate, exit polling */
 951                napi_complete_done(napi, work_done);
 952                /* Re-enable interrupts */
 953                cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
 954                                               cq->cq_idx);
 955                nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
 956                nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
 957                                      cq->cq_idx, cq_head);
 958                nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
 959        }
 960        return work_done;
 961}
 962
 963/* Qset error interrupt handler
 964 *
 965 * As of now only CQ errors are handled
 966 */
 967static void nicvf_handle_qs_err(unsigned long data)
 968{
 969        struct nicvf *nic = (struct nicvf *)data;
 970        struct queue_set *qs = nic->qs;
 971        int qidx;
 972        u64 status;
 973
 974        netif_tx_disable(nic->netdev);
 975
 976        /* Check if it is CQ err */
 977        for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 978                status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
 979                                              qidx);
 980                if (!(status & CQ_ERR_MASK))
 981                        continue;
 982                /* Process already queued CQEs and reconfig CQ */
 983                nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
 984                nicvf_sq_disable(nic, qidx);
 985                nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
 986                nicvf_cmp_queue_config(nic, qs, qidx, true);
 987                nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
 988                nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
 989
 990                nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
 991        }
 992
 993        netif_tx_start_all_queues(nic->netdev);
 994        /* Re-enable Qset error interrupt */
 995        nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
 996}
 997
 998static void nicvf_dump_intr_status(struct nicvf *nic)
 999{
1000        netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
1001                   nicvf_reg_read(nic, NIC_VF_INT));
1002}
1003
1004static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
1005{
1006        struct nicvf *nic = (struct nicvf *)nicvf_irq;
1007        u64 intr;
1008
1009        nicvf_dump_intr_status(nic);
1010
1011        intr = nicvf_reg_read(nic, NIC_VF_INT);
1012        /* Check for spurious interrupt */
1013        if (!(intr & NICVF_INTR_MBOX_MASK))
1014                return IRQ_HANDLED;
1015
1016        nicvf_handle_mbx_intr(nic);
1017
1018        return IRQ_HANDLED;
1019}
1020
1021static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
1022{
1023        struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
1024        struct nicvf *nic = cq_poll->nicvf;
1025        int qidx = cq_poll->cq_idx;
1026
1027        nicvf_dump_intr_status(nic);
1028
1029        /* Disable interrupts */
1030        nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1031
1032        /* Schedule NAPI */
1033        napi_schedule_irqoff(&cq_poll->napi);
1034
1035        /* Clear interrupt */
1036        nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1037
1038        return IRQ_HANDLED;
1039}
1040
1041static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
1042{
1043        struct nicvf *nic = (struct nicvf *)nicvf_irq;
1044        u8 qidx;
1045
1046
1047        nicvf_dump_intr_status(nic);
1048
1049        /* Disable RBDR interrupt and schedule softirq */
1050        for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1051                if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1052                        continue;
1053                nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1054                tasklet_hi_schedule(&nic->rbdr_task);
1055                /* Clear interrupt */
1056                nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1057        }
1058
1059        return IRQ_HANDLED;
1060}
1061
1062static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
1063{
1064        struct nicvf *nic = (struct nicvf *)nicvf_irq;
1065
1066        nicvf_dump_intr_status(nic);
1067
1068        /* Disable Qset err interrupt and schedule softirq */
1069        nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1070        tasklet_hi_schedule(&nic->qs_err_task);
1071        nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1072
1073        return IRQ_HANDLED;
1074}
1075
1076static void nicvf_set_irq_affinity(struct nicvf *nic)
1077{
1078        int vec, cpu;
1079
1080        for (vec = 0; vec < nic->num_vec; vec++) {
1081                if (!nic->irq_allocated[vec])
1082                        continue;
1083
1084                if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
1085                        return;
1086                 /* CQ interrupts */
1087                if (vec < NICVF_INTR_ID_SQ)
1088                        /* Leave CPU0 for RBDR and other interrupts */
1089                        cpu = nicvf_netdev_qidx(nic, vec) + 1;
1090                else
1091                        cpu = 0;
1092
1093                cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
1094                                nic->affinity_mask[vec]);
1095                irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
1096                                      nic->affinity_mask[vec]);
1097        }
1098}
1099
1100static int nicvf_register_interrupts(struct nicvf *nic)
1101{
1102        int irq, ret = 0;
1103
1104        for_each_cq_irq(irq)
1105                sprintf(nic->irq_name[irq], "%s-rxtx-%d",
1106                        nic->pnicvf->netdev->name,
1107                        nicvf_netdev_qidx(nic, irq));
1108
1109        for_each_sq_irq(irq)
1110                sprintf(nic->irq_name[irq], "%s-sq-%d",
1111                        nic->pnicvf->netdev->name,
1112                        nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
1113
1114        for_each_rbdr_irq(irq)
1115                sprintf(nic->irq_name[irq], "%s-rbdr-%d",
1116                        nic->pnicvf->netdev->name,
1117                        nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1118
1119        /* Register CQ interrupts */
1120        for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
1121                ret = request_irq(pci_irq_vector(nic->pdev, irq),
1122                                  nicvf_intr_handler,
1123                                  0, nic->irq_name[irq], nic->napi[irq]);
1124                if (ret)
1125                        goto err;
1126                nic->irq_allocated[irq] = true;
1127        }
1128
1129        /* Register RBDR interrupt */
1130        for (irq = NICVF_INTR_ID_RBDR;
1131             irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
1132                ret = request_irq(pci_irq_vector(nic->pdev, irq),
1133                                  nicvf_rbdr_intr_handler,
1134                                  0, nic->irq_name[irq], nic);
1135                if (ret)
1136                        goto err;
1137                nic->irq_allocated[irq] = true;
1138        }
1139
1140        /* Register QS error interrupt */
1141        sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
1142                nic->pnicvf->netdev->name,
1143                nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1144        irq = NICVF_INTR_ID_QS_ERR;
1145        ret = request_irq(pci_irq_vector(nic->pdev, irq),
1146                          nicvf_qs_err_intr_handler,
1147                          0, nic->irq_name[irq], nic);
1148        if (ret)
1149                goto err;
1150
1151        nic->irq_allocated[irq] = true;
1152
1153        /* Set IRQ affinities */
1154        nicvf_set_irq_affinity(nic);
1155
1156err:
1157        if (ret)
1158                netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
1159
1160        return ret;
1161}
1162
1163static void nicvf_unregister_interrupts(struct nicvf *nic)
1164{
1165        struct pci_dev *pdev = nic->pdev;
1166        int irq;
1167
1168        /* Free registered interrupts */
1169        for (irq = 0; irq < nic->num_vec; irq++) {
1170                if (!nic->irq_allocated[irq])
1171                        continue;
1172
1173                irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
1174                free_cpumask_var(nic->affinity_mask[irq]);
1175
1176                if (irq < NICVF_INTR_ID_SQ)
1177                        free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
1178                else
1179                        free_irq(pci_irq_vector(pdev, irq), nic);
1180
1181                nic->irq_allocated[irq] = false;
1182        }
1183
1184        /* Disable MSI-X */
1185        pci_free_irq_vectors(pdev);
1186        nic->num_vec = 0;
1187}
1188
1189/* Initialize MSIX vectors and register MISC interrupt.
1190 * Send READY message to PF to check if its alive
1191 */
1192static int nicvf_register_misc_interrupt(struct nicvf *nic)
1193{
1194        int ret = 0;
1195        int irq = NICVF_INTR_ID_MISC;
1196
1197        /* Return if mailbox interrupt is already registered */
1198        if (nic->pdev->msix_enabled)
1199                return 0;
1200
1201        /* Enable MSI-X */
1202        nic->num_vec = pci_msix_vec_count(nic->pdev);
1203        ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
1204                                    PCI_IRQ_MSIX);
1205        if (ret < 0) {
1206                netdev_err(nic->netdev,
1207                           "Req for #%d msix vectors failed\n", nic->num_vec);
1208                return 1;
1209        }
1210
1211        sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1212        /* Register Misc interrupt */
1213        ret = request_irq(pci_irq_vector(nic->pdev, irq),
1214                          nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1215
1216        if (ret)
1217                return ret;
1218        nic->irq_allocated[irq] = true;
1219
1220        /* Enable mailbox interrupt */
1221        nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1222
1223        /* Check if VF is able to communicate with PF */
1224        if (!nicvf_check_pf_ready(nic)) {
1225                nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1226                nicvf_unregister_interrupts(nic);
1227                return 1;
1228        }
1229
1230        return 0;
1231}
1232
1233static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1234{
1235        struct nicvf *nic = netdev_priv(netdev);
1236        int qid = skb_get_queue_mapping(skb);
1237        struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1238        struct nicvf *snic;
1239        struct snd_queue *sq;
1240        int tmp;
1241
1242        /* Check for minimum packet length */
1243        if (skb->len <= ETH_HLEN) {
1244                dev_kfree_skb(skb);
1245                return NETDEV_TX_OK;
1246        }
1247
1248        /* In XDP case, initial HW tx queues are used for XDP,
1249         * but stack's queue mapping starts at '0', so skip the
1250         * Tx queues attached to Rx queues for XDP.
1251         */
1252        if (nic->xdp_prog)
1253                qid += nic->xdp_tx_queues;
1254
1255        snic = nic;
1256        /* Get secondary Qset's SQ structure */
1257        if (qid >= MAX_SND_QUEUES_PER_QS) {
1258                tmp = qid / MAX_SND_QUEUES_PER_QS;
1259                snic = (struct nicvf *)nic->snicvf[tmp - 1];
1260                if (!snic) {
1261                        netdev_warn(nic->netdev,
1262                                    "Secondary Qset#%d's ptr not initialized\n",
1263                                    tmp - 1);
1264                        dev_kfree_skb(skb);
1265                        return NETDEV_TX_OK;
1266                }
1267                qid = qid % MAX_SND_QUEUES_PER_QS;
1268        }
1269
1270        sq = &snic->qs->sq[qid];
1271        if (!netif_tx_queue_stopped(txq) &&
1272            !nicvf_sq_append_skb(snic, sq, skb, qid)) {
1273                netif_tx_stop_queue(txq);
1274
1275                /* Barrier, so that stop_queue visible to other cpus */
1276                smp_mb();
1277
1278                /* Check again, incase another cpu freed descriptors */
1279                if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
1280                        netif_tx_wake_queue(txq);
1281                } else {
1282                        this_cpu_inc(nic->drv_stats->txq_stop);
1283                        netif_warn(nic, tx_err, netdev,
1284                                   "Transmit ring full, stopping SQ%d\n", qid);
1285                }
1286                return NETDEV_TX_BUSY;
1287        }
1288
1289        return NETDEV_TX_OK;
1290}
1291
1292static inline void nicvf_free_cq_poll(struct nicvf *nic)
1293{
1294        struct nicvf_cq_poll *cq_poll;
1295        int qidx;
1296
1297        for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1298                cq_poll = nic->napi[qidx];
1299                if (!cq_poll)
1300                        continue;
1301                nic->napi[qidx] = NULL;
1302                kfree(cq_poll);
1303        }
1304}
1305
1306int nicvf_stop(struct net_device *netdev)
1307{
1308        int irq, qidx;
1309        struct nicvf *nic = netdev_priv(netdev);
1310        struct queue_set *qs = nic->qs;
1311        struct nicvf_cq_poll *cq_poll = NULL;
1312        union nic_mbx mbx = {};
1313
1314        mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1315        nicvf_send_msg_to_pf(nic, &mbx);
1316
1317        netif_carrier_off(netdev);
1318        netif_tx_stop_all_queues(nic->netdev);
1319        nic->link_up = false;
1320
1321        /* Teardown secondary qsets first */
1322        if (!nic->sqs_mode) {
1323                for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1324                        if (!nic->snicvf[qidx])
1325                                continue;
1326                        nicvf_stop(nic->snicvf[qidx]->netdev);
1327                        nic->snicvf[qidx] = NULL;
1328                }
1329        }
1330
1331        /* Disable RBDR & QS error interrupts */
1332        for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1333                nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1334                nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1335        }
1336        nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1337        nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1338
1339        /* Wait for pending IRQ handlers to finish */
1340        for (irq = 0; irq < nic->num_vec; irq++)
1341                synchronize_irq(pci_irq_vector(nic->pdev, irq));
1342
1343        tasklet_kill(&nic->rbdr_task);
1344        tasklet_kill(&nic->qs_err_task);
1345        if (nic->rb_work_scheduled)
1346                cancel_delayed_work_sync(&nic->rbdr_work);
1347
1348        for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1349                cq_poll = nic->napi[qidx];
1350                if (!cq_poll)
1351                        continue;
1352                napi_synchronize(&cq_poll->napi);
1353                /* CQ intr is enabled while napi_complete,
1354                 * so disable it now
1355                 */
1356                nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1357                nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1358                napi_disable(&cq_poll->napi);
1359                netif_napi_del(&cq_poll->napi);
1360        }
1361
1362        netif_tx_disable(netdev);
1363
1364        for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1365                netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1366
1367        /* Free resources */
1368        nicvf_config_data_transfer(nic, false);
1369
1370        /* Disable HW Qset */
1371        nicvf_qset_config(nic, false);
1372
1373        /* disable mailbox interrupt */
1374        nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1375
1376        nicvf_unregister_interrupts(nic);
1377
1378        nicvf_free_cq_poll(nic);
1379
1380        /* Free any pending SKB saved to receive timestamp */
1381        if (nic->ptp_skb) {
1382                dev_kfree_skb_any(nic->ptp_skb);
1383                nic->ptp_skb = NULL;
1384        }
1385
1386        /* Clear multiqset info */
1387        nic->pnicvf = nic;
1388
1389        return 0;
1390}
1391
1392static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
1393{
1394        union nic_mbx mbx = {};
1395
1396        mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
1397        mbx.ptp.enable = enable;
1398
1399        return nicvf_send_msg_to_pf(nic, &mbx);
1400}
1401
1402static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1403{
1404        union nic_mbx mbx = {};
1405
1406        mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1407        mbx.frs.max_frs = mtu;
1408        mbx.frs.vf_id = nic->vf_id;
1409
1410        return nicvf_send_msg_to_pf(nic, &mbx);
1411}
1412
1413int nicvf_open(struct net_device *netdev)
1414{
1415        int cpu, err, qidx;
1416        struct nicvf *nic = netdev_priv(netdev);
1417        struct queue_set *qs = nic->qs;
1418        struct nicvf_cq_poll *cq_poll = NULL;
1419        union nic_mbx mbx = {};
1420
1421        netif_carrier_off(netdev);
1422
1423        err = nicvf_register_misc_interrupt(nic);
1424        if (err)
1425                return err;
1426
1427        /* Register NAPI handler for processing CQEs */
1428        for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1429                cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1430                if (!cq_poll) {
1431                        err = -ENOMEM;
1432                        goto napi_del;
1433                }
1434                cq_poll->cq_idx = qidx;
1435                cq_poll->nicvf = nic;
1436                netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1437                               NAPI_POLL_WEIGHT);
1438                napi_enable(&cq_poll->napi);
1439                nic->napi[qidx] = cq_poll;
1440        }
1441
1442        /* Check if we got MAC address from PF or else generate a radom MAC */
1443        if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
1444                eth_hw_addr_random(netdev);
1445                nicvf_hw_set_mac_addr(nic, netdev);
1446        }
1447
1448        if (nic->set_mac_pending) {
1449                nic->set_mac_pending = false;
1450                nicvf_hw_set_mac_addr(nic, netdev);
1451        }
1452
1453        /* Init tasklet for handling Qset err interrupt */
1454        tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1455                     (unsigned long)nic);
1456
1457        /* Init RBDR tasklet which will refill RBDR */
1458        tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1459                     (unsigned long)nic);
1460        INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1461
1462        /* Configure CPI alorithm */
1463        nic->cpi_alg = cpi_alg;
1464        if (!nic->sqs_mode)
1465                nicvf_config_cpi(nic);
1466
1467        nicvf_request_sqs(nic);
1468        if (nic->sqs_mode)
1469                nicvf_get_primary_vf_struct(nic);
1470
1471        /* Configure PTP timestamp */
1472        if (nic->ptp_clock)
1473                nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1474        atomic_set(&nic->tx_ptp_skbs, 0);
1475        nic->ptp_skb = NULL;
1476
1477        /* Configure receive side scaling and MTU */
1478        if (!nic->sqs_mode) {
1479                nicvf_rss_init(nic);
1480                err = nicvf_update_hw_max_frs(nic, netdev->mtu);
1481                if (err)
1482                        goto cleanup;
1483
1484                /* Clear percpu stats */
1485                for_each_possible_cpu(cpu)
1486                        memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1487                               sizeof(struct nicvf_drv_stats));
1488        }
1489
1490        err = nicvf_register_interrupts(nic);
1491        if (err)
1492                goto cleanup;
1493
1494        /* Initialize the queues */
1495        err = nicvf_init_resources(nic);
1496        if (err)
1497                goto cleanup;
1498
1499        /* Make sure queue initialization is written */
1500        wmb();
1501
1502        nicvf_reg_write(nic, NIC_VF_INT, -1);
1503        /* Enable Qset err interrupt */
1504        nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1505
1506        /* Enable completion queue interrupt */
1507        for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1508                nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1509
1510        /* Enable RBDR threshold interrupt */
1511        for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1512                nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1513
1514        /* Send VF config done msg to PF */
1515        mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1516        nicvf_write_to_mbx(nic, &mbx);
1517
1518        return 0;
1519cleanup:
1520        nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1521        nicvf_unregister_interrupts(nic);
1522        tasklet_kill(&nic->qs_err_task);
1523        tasklet_kill(&nic->rbdr_task);
1524napi_del:
1525        for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1526                cq_poll = nic->napi[qidx];
1527                if (!cq_poll)
1528                        continue;
1529                napi_disable(&cq_poll->napi);
1530                netif_napi_del(&cq_poll->napi);
1531        }
1532        nicvf_free_cq_poll(nic);
1533        return err;
1534}
1535
1536static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1537{
1538        struct nicvf *nic = netdev_priv(netdev);
1539        int orig_mtu = netdev->mtu;
1540
1541        netdev->mtu = new_mtu;
1542
1543        if (!netif_running(netdev))
1544                return 0;
1545
1546        if (nicvf_update_hw_max_frs(nic, new_mtu)) {
1547                netdev->mtu = orig_mtu;
1548                return -EINVAL;
1549        }
1550
1551        return 0;
1552}
1553
1554static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1555{
1556        struct sockaddr *addr = p;
1557        struct nicvf *nic = netdev_priv(netdev);
1558
1559        if (!is_valid_ether_addr(addr->sa_data))
1560                return -EADDRNOTAVAIL;
1561
1562        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1563
1564        if (nic->pdev->msix_enabled) {
1565                if (nicvf_hw_set_mac_addr(nic, netdev))
1566                        return -EBUSY;
1567        } else {
1568                nic->set_mac_pending = true;
1569        }
1570
1571        return 0;
1572}
1573
1574void nicvf_update_lmac_stats(struct nicvf *nic)
1575{
1576        int stat = 0;
1577        union nic_mbx mbx = {};
1578
1579        if (!netif_running(nic->netdev))
1580                return;
1581
1582        mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1583        mbx.bgx_stats.vf_id = nic->vf_id;
1584        /* Rx stats */
1585        mbx.bgx_stats.rx = 1;
1586        while (stat < BGX_RX_STATS_COUNT) {
1587                mbx.bgx_stats.idx = stat;
1588                if (nicvf_send_msg_to_pf(nic, &mbx))
1589                        return;
1590                stat++;
1591        }
1592
1593        stat = 0;
1594
1595        /* Tx stats */
1596        mbx.bgx_stats.rx = 0;
1597        while (stat < BGX_TX_STATS_COUNT) {
1598                mbx.bgx_stats.idx = stat;
1599                if (nicvf_send_msg_to_pf(nic, &mbx))
1600                        return;
1601                stat++;
1602        }
1603}
1604
1605void nicvf_update_stats(struct nicvf *nic)
1606{
1607        int qidx, cpu;
1608        u64 tmp_stats = 0;
1609        struct nicvf_hw_stats *stats = &nic->hw_stats;
1610        struct nicvf_drv_stats *drv_stats;
1611        struct queue_set *qs = nic->qs;
1612
1613#define GET_RX_STATS(reg) \
1614        nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1615#define GET_TX_STATS(reg) \
1616        nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1617
1618        stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1619        stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1620        stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1621        stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1622        stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1623        stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1624        stats->rx_drop_red = GET_RX_STATS(RX_RED);
1625        stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1626        stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1627        stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1628        stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1629        stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1630        stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1631        stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1632
1633        stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1634        stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1635        stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1636        stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1637        stats->tx_drops = GET_TX_STATS(TX_DROP);
1638
1639        /* On T88 pass 2.0, the dummy SQE added for TSO notification
1640         * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1641         * pointed by dummy SQE and results in tx_drops counter being
1642         * incremented. Subtracting it from tx_tso counter will give
1643         * exact tx_drops counter.
1644         */
1645        if (nic->t88 && nic->hw_tso) {
1646                for_each_possible_cpu(cpu) {
1647                        drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1648                        tmp_stats += drv_stats->tx_tso;
1649                }
1650                stats->tx_drops = tmp_stats - stats->tx_drops;
1651        }
1652        stats->tx_frames = stats->tx_ucast_frames +
1653                           stats->tx_bcast_frames +
1654                           stats->tx_mcast_frames;
1655        stats->rx_frames = stats->rx_ucast_frames +
1656                           stats->rx_bcast_frames +
1657                           stats->rx_mcast_frames;
1658        stats->rx_drops = stats->rx_drop_red +
1659                          stats->rx_drop_overrun;
1660
1661        /* Update RQ and SQ stats */
1662        for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1663                nicvf_update_rq_stats(nic, qidx);
1664        for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1665                nicvf_update_sq_stats(nic, qidx);
1666}
1667
1668static void nicvf_get_stats64(struct net_device *netdev,
1669                              struct rtnl_link_stats64 *stats)
1670{
1671        struct nicvf *nic = netdev_priv(netdev);
1672        struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1673
1674        nicvf_update_stats(nic);
1675
1676        stats->rx_bytes = hw_stats->rx_bytes;
1677        stats->rx_packets = hw_stats->rx_frames;
1678        stats->rx_dropped = hw_stats->rx_drops;
1679        stats->multicast = hw_stats->rx_mcast_frames;
1680
1681        stats->tx_bytes = hw_stats->tx_bytes;
1682        stats->tx_packets = hw_stats->tx_frames;
1683        stats->tx_dropped = hw_stats->tx_drops;
1684
1685}
1686
1687static void nicvf_tx_timeout(struct net_device *dev)
1688{
1689        struct nicvf *nic = netdev_priv(dev);
1690
1691        netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
1692
1693        this_cpu_inc(nic->drv_stats->tx_timeout);
1694        schedule_work(&nic->reset_task);
1695}
1696
1697static void nicvf_reset_task(struct work_struct *work)
1698{
1699        struct nicvf *nic;
1700
1701        nic = container_of(work, struct nicvf, reset_task);
1702
1703        if (!netif_running(nic->netdev))
1704                return;
1705
1706        nicvf_stop(nic->netdev);
1707        nicvf_open(nic->netdev);
1708        netif_trans_update(nic->netdev);
1709}
1710
1711static int nicvf_config_loopback(struct nicvf *nic,
1712                                 netdev_features_t features)
1713{
1714        union nic_mbx mbx = {};
1715
1716        mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1717        mbx.lbk.vf_id = nic->vf_id;
1718        mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1719
1720        return nicvf_send_msg_to_pf(nic, &mbx);
1721}
1722
1723static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1724                                            netdev_features_t features)
1725{
1726        struct nicvf *nic = netdev_priv(netdev);
1727
1728        if ((features & NETIF_F_LOOPBACK) &&
1729            netif_running(netdev) && !nic->loopback_supported)
1730                features &= ~NETIF_F_LOOPBACK;
1731
1732        return features;
1733}
1734
1735static int nicvf_set_features(struct net_device *netdev,
1736                              netdev_features_t features)
1737{
1738        struct nicvf *nic = netdev_priv(netdev);
1739        netdev_features_t changed = features ^ netdev->features;
1740
1741        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1742                nicvf_config_vlan_stripping(nic, features);
1743
1744        if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1745                return nicvf_config_loopback(nic, features);
1746
1747        return 0;
1748}
1749
1750static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
1751{
1752        u8 cq_count, txq_count;
1753
1754        /* Set XDP Tx queue count same as Rx queue count */
1755        if (!bpf_attached)
1756                nic->xdp_tx_queues = 0;
1757        else
1758                nic->xdp_tx_queues = nic->rx_queues;
1759
1760        /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
1761         * needs to be allocated, check how many.
1762         */
1763        txq_count = nic->xdp_tx_queues + nic->tx_queues;
1764        cq_count = max(nic->rx_queues, txq_count);
1765        if (cq_count > MAX_CMP_QUEUES_PER_QS) {
1766                nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
1767                nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
1768        } else {
1769                nic->sqs_count = 0;
1770        }
1771
1772        /* Set primary Qset's resources */
1773        nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
1774        nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
1775        nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
1776
1777        /* Update stack */
1778        nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
1779}
1780
1781static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1782{
1783        struct net_device *dev = nic->netdev;
1784        bool if_up = netif_running(nic->netdev);
1785        struct bpf_prog *old_prog;
1786        bool bpf_attached = false;
1787
1788        /* For now just support only the usual MTU sized frames */
1789        if (prog && (dev->mtu > 1500)) {
1790                netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1791                            dev->mtu);
1792                return -EOPNOTSUPP;
1793        }
1794
1795        /* ALL SQs attached to CQs i.e same as RQs, are treated as
1796         * XDP Tx queues and more Tx queues are allocated for
1797         * network stack to send pkts out.
1798         *
1799         * No of Tx queues are either same as Rx queues or whatever
1800         * is left in max no of queues possible.
1801         */
1802        if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
1803                netdev_warn(dev,
1804                            "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
1805                            nic->max_queues);
1806                return -ENOMEM;
1807        }
1808
1809        if (if_up)
1810                nicvf_stop(nic->netdev);
1811
1812        old_prog = xchg(&nic->xdp_prog, prog);
1813        /* Detach old prog, if any */
1814        if (old_prog)
1815                bpf_prog_put(old_prog);
1816
1817        if (nic->xdp_prog) {
1818                /* Attach BPF program */
1819                nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1820                if (!IS_ERR(nic->xdp_prog))
1821                        bpf_attached = true;
1822        }
1823
1824        /* Calculate Tx queues needed for XDP and network stack */
1825        nicvf_set_xdp_queues(nic, bpf_attached);
1826
1827        if (if_up) {
1828                /* Reinitialize interface, clean slate */
1829                nicvf_open(nic->netdev);
1830                netif_trans_update(nic->netdev);
1831        }
1832
1833        return 0;
1834}
1835
1836static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1837{
1838        struct nicvf *nic = netdev_priv(netdev);
1839
1840        /* To avoid checks while retrieving buffer address from CQE_RX,
1841         * do not support XDP for T88 pass1.x silicons which are anyway
1842         * not in use widely.
1843         */
1844        if (pass1_silicon(nic->pdev))
1845                return -EOPNOTSUPP;
1846
1847        switch (xdp->command) {
1848        case XDP_SETUP_PROG:
1849                return nicvf_xdp_setup(nic, xdp->prog);
1850        case XDP_QUERY_PROG:
1851                xdp->prog_attached = !!nic->xdp_prog;
1852                xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
1853                return 0;
1854        default:
1855                return -EINVAL;
1856        }
1857}
1858
1859static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1860{
1861        struct hwtstamp_config config;
1862        struct nicvf *nic = netdev_priv(netdev);
1863
1864        if (!nic->ptp_clock)
1865                return -ENODEV;
1866
1867        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1868                return -EFAULT;
1869
1870        /* reserved for future extensions */
1871        if (config.flags)
1872                return -EINVAL;
1873
1874        switch (config.tx_type) {
1875        case HWTSTAMP_TX_OFF:
1876        case HWTSTAMP_TX_ON:
1877                break;
1878        default:
1879                return -ERANGE;
1880        }
1881
1882        switch (config.rx_filter) {
1883        case HWTSTAMP_FILTER_NONE:
1884                nic->hw_rx_tstamp = false;
1885                break;
1886        case HWTSTAMP_FILTER_ALL:
1887        case HWTSTAMP_FILTER_SOME:
1888        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1889        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1890        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1891        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1892        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1893        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1894        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1895        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1896        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1897        case HWTSTAMP_FILTER_PTP_V2_EVENT:
1898        case HWTSTAMP_FILTER_PTP_V2_SYNC:
1899        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1900                nic->hw_rx_tstamp = true;
1901                config.rx_filter = HWTSTAMP_FILTER_ALL;
1902                break;
1903        default:
1904                return -ERANGE;
1905        }
1906
1907        if (netif_running(netdev))
1908                nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1909
1910        if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1911                return -EFAULT;
1912
1913        return 0;
1914}
1915
1916static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1917{
1918        switch (cmd) {
1919        case SIOCSHWTSTAMP:
1920                return nicvf_config_hwtstamp(netdev, req);
1921        default:
1922                return -EOPNOTSUPP;
1923        }
1924}
1925
1926static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
1927{
1928        struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
1929                                                  work.work);
1930        struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
1931        union nic_mbx mbx = {};
1932        int idx;
1933
1934        if (!vf_work)
1935                return;
1936
1937        /* From the inside of VM code flow we have only 128 bits memory
1938         * available to send message to host's PF, so send all mc addrs
1939         * one by one, starting from flush command in case if kernel
1940         * requests to configure specific MAC filtering
1941         */
1942
1943        /* flush DMAC filters and reset RX mode */
1944        mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1945        nicvf_send_msg_to_pf(nic, &mbx);
1946
1947        if (vf_work->mode & BGX_XCAST_MCAST_FILTER) {
1948                /* once enabling filtering, we need to signal to PF to add
1949                 * its' own LMAC to the filter to accept packets for it.
1950                 */
1951                mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1952                mbx.xcast.data.mac = 0;
1953                nicvf_send_msg_to_pf(nic, &mbx);
1954        }
1955
1956        /* check if we have any specific MACs to be added to PF DMAC filter */
1957        if (vf_work->mc) {
1958                /* now go through kernel list of MACs and add them one by one */
1959                for (idx = 0; idx < vf_work->mc->count; idx++) {
1960                        mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
1961                        mbx.xcast.data.mac = vf_work->mc->mc[idx];
1962                        nicvf_send_msg_to_pf(nic, &mbx);
1963                }
1964                kfree(vf_work->mc);
1965        }
1966
1967        /* and finally set rx mode for PF accordingly */
1968        mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
1969        mbx.xcast.data.mode = vf_work->mode;
1970
1971        nicvf_send_msg_to_pf(nic, &mbx);
1972}
1973
1974static void nicvf_set_rx_mode(struct net_device *netdev)
1975{
1976        struct nicvf *nic = netdev_priv(netdev);
1977        struct netdev_hw_addr *ha;
1978        struct xcast_addr_list *mc_list = NULL;
1979        u8 mode = 0;
1980
1981        if (netdev->flags & IFF_PROMISC) {
1982                mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT;
1983        } else {
1984                if (netdev->flags & IFF_BROADCAST)
1985                        mode |= BGX_XCAST_BCAST_ACCEPT;
1986
1987                if (netdev->flags & IFF_ALLMULTI) {
1988                        mode |= BGX_XCAST_MCAST_ACCEPT;
1989                } else if (netdev->flags & IFF_MULTICAST) {
1990                        mode |= BGX_XCAST_MCAST_FILTER;
1991                        /* here we need to copy mc addrs */
1992                        if (netdev_mc_count(netdev)) {
1993                                mc_list = kmalloc(offsetof(typeof(*mc_list),
1994                                                           mc[netdev_mc_count(netdev)]),
1995                                                  GFP_ATOMIC);
1996                                if (unlikely(!mc_list))
1997                                        return;
1998                                mc_list->count = 0;
1999                                netdev_hw_addr_list_for_each(ha, &netdev->mc) {
2000                                        mc_list->mc[mc_list->count] =
2001                                                ether_addr_to_u64(ha->addr);
2002                                        mc_list->count++;
2003                                }
2004                        }
2005                }
2006        }
2007        nic->rx_mode_work.mc = mc_list;
2008        nic->rx_mode_work.mode = mode;
2009        queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ);
2010}
2011
2012static const struct net_device_ops nicvf_netdev_ops = {
2013        .ndo_open               = nicvf_open,
2014        .ndo_stop               = nicvf_stop,
2015        .ndo_start_xmit         = nicvf_xmit,
2016        .ndo_change_mtu         = nicvf_change_mtu,
2017        .ndo_set_mac_address    = nicvf_set_mac_address,
2018        .ndo_get_stats64        = nicvf_get_stats64,
2019        .ndo_tx_timeout         = nicvf_tx_timeout,
2020        .ndo_fix_features       = nicvf_fix_features,
2021        .ndo_set_features       = nicvf_set_features,
2022        .ndo_bpf                = nicvf_xdp,
2023        .ndo_do_ioctl           = nicvf_ioctl,
2024        .ndo_set_rx_mode        = nicvf_set_rx_mode,
2025};
2026
2027static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2028{
2029        struct device *dev = &pdev->dev;
2030        struct net_device *netdev;
2031        struct nicvf *nic;
2032        int    err, qcount;
2033        u16    sdevid;
2034        struct cavium_ptp *ptp_clock;
2035
2036        ptp_clock = cavium_ptp_get();
2037        if (IS_ERR(ptp_clock)) {
2038                if (PTR_ERR(ptp_clock) == -ENODEV)
2039                        /* In virtualized environment we proceed without ptp */
2040                        ptp_clock = NULL;
2041                else
2042                        return PTR_ERR(ptp_clock);
2043        }
2044
2045        err = pci_enable_device(pdev);
2046        if (err) {
2047                dev_err(dev, "Failed to enable PCI device\n");
2048                return err;
2049        }
2050
2051        err = pci_request_regions(pdev, DRV_NAME);
2052        if (err) {
2053                dev_err(dev, "PCI request regions failed 0x%x\n", err);
2054                goto err_disable_device;
2055        }
2056
2057        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
2058        if (err) {
2059                dev_err(dev, "Unable to get usable DMA configuration\n");
2060                goto err_release_regions;
2061        }
2062
2063        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
2064        if (err) {
2065                dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
2066                goto err_release_regions;
2067        }
2068
2069        qcount = netif_get_num_default_rss_queues();
2070
2071        /* Restrict multiqset support only for host bound VFs */
2072        if (pdev->is_virtfn) {
2073                /* Set max number of queues per VF */
2074                qcount = min_t(int, num_online_cpus(),
2075                               (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
2076        }
2077
2078        netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
2079        if (!netdev) {
2080                err = -ENOMEM;
2081                goto err_release_regions;
2082        }
2083
2084        pci_set_drvdata(pdev, netdev);
2085
2086        SET_NETDEV_DEV(netdev, &pdev->dev);
2087
2088        nic = netdev_priv(netdev);
2089        nic->netdev = netdev;
2090        nic->pdev = pdev;
2091        nic->pnicvf = nic;
2092        nic->max_queues = qcount;
2093        /* If no of CPUs are too low, there won't be any queues left
2094         * for XDP_TX, hence double it.
2095         */
2096        if (!nic->t88)
2097                nic->max_queues *= 2;
2098        nic->ptp_clock = ptp_clock;
2099
2100        /* MAP VF's configuration registers */
2101        nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2102        if (!nic->reg_base) {
2103                dev_err(dev, "Cannot map config register space, aborting\n");
2104                err = -ENOMEM;
2105                goto err_free_netdev;
2106        }
2107
2108        nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
2109        if (!nic->drv_stats) {
2110                err = -ENOMEM;
2111                goto err_free_netdev;
2112        }
2113
2114        err = nicvf_set_qset_resources(nic);
2115        if (err)
2116                goto err_free_netdev;
2117
2118        /* Check if PF is alive and get MAC address for this VF */
2119        err = nicvf_register_misc_interrupt(nic);
2120        if (err)
2121                goto err_free_netdev;
2122
2123        nicvf_send_vf_struct(nic);
2124
2125        if (!pass1_silicon(nic->pdev))
2126                nic->hw_tso = true;
2127
2128        /* Get iommu domain for iova to physical addr conversion */
2129        nic->iommu_domain = iommu_get_domain_for_dev(dev);
2130
2131        pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
2132        if (sdevid == 0xA134)
2133                nic->t88 = true;
2134
2135        /* Check if this VF is in QS only mode */
2136        if (nic->sqs_mode)
2137                return 0;
2138
2139        err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
2140        if (err)
2141                goto err_unregister_interrupts;
2142
2143        netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
2144                               NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
2145                               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2146                               NETIF_F_HW_VLAN_CTAG_RX);
2147
2148        netdev->hw_features |= NETIF_F_RXHASH;
2149
2150        netdev->features |= netdev->hw_features;
2151        netdev->hw_features |= NETIF_F_LOOPBACK;
2152
2153        netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2154                                NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
2155
2156        netdev->netdev_ops = &nicvf_netdev_ops;
2157        netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
2158
2159        /* MTU range: 64 - 9200 */
2160        netdev->min_mtu = NIC_HW_MIN_FRS;
2161        netdev->max_mtu = NIC_HW_MAX_FRS;
2162
2163        INIT_WORK(&nic->reset_task, nicvf_reset_task);
2164
2165        INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2166
2167        err = register_netdev(netdev);
2168        if (err) {
2169                dev_err(dev, "Failed to register netdevice\n");
2170                goto err_unregister_interrupts;
2171        }
2172
2173        nic->msg_enable = debug;
2174
2175        nicvf_set_ethtool_ops(netdev);
2176
2177        return 0;
2178
2179err_unregister_interrupts:
2180        nicvf_unregister_interrupts(nic);
2181err_free_netdev:
2182        pci_set_drvdata(pdev, NULL);
2183        if (nic->drv_stats)
2184                free_percpu(nic->drv_stats);
2185        free_netdev(netdev);
2186err_release_regions:
2187        pci_release_regions(pdev);
2188err_disable_device:
2189        pci_disable_device(pdev);
2190        return err;
2191}
2192
2193static void nicvf_remove(struct pci_dev *pdev)
2194{
2195        struct net_device *netdev = pci_get_drvdata(pdev);
2196        struct nicvf *nic;
2197        struct net_device *pnetdev;
2198
2199        if (!netdev)
2200                return;
2201
2202        nic = netdev_priv(netdev);
2203        pnetdev = nic->pnicvf->netdev;
2204
2205        cancel_delayed_work_sync(&nic->rx_mode_work.work);
2206
2207        /* Check if this Qset is assigned to different VF.
2208         * If yes, clean primary and all secondary Qsets.
2209         */
2210        if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2211                unregister_netdev(pnetdev);
2212        nicvf_unregister_interrupts(nic);
2213        pci_set_drvdata(pdev, NULL);
2214        if (nic->drv_stats)
2215                free_percpu(nic->drv_stats);
2216        cavium_ptp_put(nic->ptp_clock);
2217        free_netdev(netdev);
2218        pci_release_regions(pdev);
2219        pci_disable_device(pdev);
2220}
2221
2222static void nicvf_shutdown(struct pci_dev *pdev)
2223{
2224        nicvf_remove(pdev);
2225}
2226
2227static struct pci_driver nicvf_driver = {
2228        .name = DRV_NAME,
2229        .id_table = nicvf_id_table,
2230        .probe = nicvf_probe,
2231        .remove = nicvf_remove,
2232        .shutdown = nicvf_shutdown,
2233};
2234
2235static int __init nicvf_init_module(void)
2236{
2237        pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2238        nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
2239                                                   WQ_MEM_RECLAIM);
2240        return pci_register_driver(&nicvf_driver);
2241}
2242
2243static void __exit nicvf_cleanup_module(void)
2244{
2245        if (nicvf_rx_mode_wq) {
2246                destroy_workqueue(nicvf_rx_mode_wq);
2247                nicvf_rx_mode_wq = NULL;
2248        }
2249        pci_unregister_driver(&nicvf_driver);
2250}
2251
2252module_init(nicvf_init_module);
2253module_exit(nicvf_cleanup_module);
2254