linux/drivers/net/ethernet/intel/iavf/iavf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "iavf.h"
   5#include "iavf_prototype.h"
   6#include "iavf_client.h"
   7/* All iavf tracepoints are defined by the include below, which must
   8 * be included exactly once across the whole kernel with
   9 * CREATE_TRACE_POINTS defined
  10 */
  11#define CREATE_TRACE_POINTS
  12#include "iavf_trace.h"
  13
  14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
  15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
  16static int iavf_close(struct net_device *netdev);
  17
  18char iavf_driver_name[] = "iavf";
  19static const char iavf_driver_string[] =
  20        "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
  21
  22#define DRV_KERN "-k"
  23
  24#define DRV_VERSION_MAJOR 3
  25#define DRV_VERSION_MINOR 2
  26#define DRV_VERSION_BUILD 3
  27#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  28             __stringify(DRV_VERSION_MINOR) "." \
  29             __stringify(DRV_VERSION_BUILD) \
  30             DRV_KERN
  31const char iavf_driver_version[] = DRV_VERSION;
  32static const char iavf_copyright[] =
  33        "Copyright (c) 2013 - 2018 Intel Corporation.";
  34
  35/* iavf_pci_tbl - PCI Device ID Table
  36 *
  37 * Wildcard entries (PCI_ANY_ID) should come last
  38 * Last entry must be all 0s
  39 *
  40 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  41 *   Class, Class Mask, private data (not used) }
  42 */
  43static const struct pci_device_id iavf_pci_tbl[] = {
  44        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
  45        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
  46        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
  47        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
  48        /* required last entry */
  49        {0, }
  50};
  51
  52MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
  53
  54MODULE_ALIAS("i40evf");
  55MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  56MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
  57MODULE_LICENSE("GPL v2");
  58MODULE_VERSION(DRV_VERSION);
  59
  60static struct workqueue_struct *iavf_wq;
  61
  62/**
  63 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
  64 * @hw:   pointer to the HW structure
  65 * @mem:  ptr to mem struct to fill out
  66 * @size: size of memory requested
  67 * @alignment: what to align the allocation to
  68 **/
  69iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
  70                                    struct iavf_dma_mem *mem,
  71                                    u64 size, u32 alignment)
  72{
  73        struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
  74
  75        if (!mem)
  76                return I40E_ERR_PARAM;
  77
  78        mem->size = ALIGN(size, alignment);
  79        mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
  80                                     (dma_addr_t *)&mem->pa, GFP_KERNEL);
  81        if (mem->va)
  82                return 0;
  83        else
  84                return I40E_ERR_NO_MEMORY;
  85}
  86
  87/**
  88 * iavf_free_dma_mem_d - OS specific memory free for shared code
  89 * @hw:   pointer to the HW structure
  90 * @mem:  ptr to mem struct to free
  91 **/
  92iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
  93{
  94        struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
  95
  96        if (!mem || !mem->va)
  97                return I40E_ERR_PARAM;
  98        dma_free_coherent(&adapter->pdev->dev, mem->size,
  99                          mem->va, (dma_addr_t)mem->pa);
 100        return 0;
 101}
 102
 103/**
 104 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
 105 * @hw:   pointer to the HW structure
 106 * @mem:  ptr to mem struct to fill out
 107 * @size: size of memory requested
 108 **/
 109iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
 110                                     struct iavf_virt_mem *mem, u32 size)
 111{
 112        if (!mem)
 113                return I40E_ERR_PARAM;
 114
 115        mem->size = size;
 116        mem->va = kzalloc(size, GFP_KERNEL);
 117
 118        if (mem->va)
 119                return 0;
 120        else
 121                return I40E_ERR_NO_MEMORY;
 122}
 123
 124/**
 125 * iavf_free_virt_mem_d - OS specific memory free for shared code
 126 * @hw:   pointer to the HW structure
 127 * @mem:  ptr to mem struct to free
 128 **/
 129iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
 130{
 131        if (!mem)
 132                return I40E_ERR_PARAM;
 133
 134        /* it's ok to kfree a NULL pointer */
 135        kfree(mem->va);
 136
 137        return 0;
 138}
 139
 140/**
 141 * iavf_debug_d - OS dependent version of debug printing
 142 * @hw:  pointer to the HW structure
 143 * @mask: debug level mask
 144 * @fmt_str: printf-type format description
 145 **/
 146void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
 147{
 148        char buf[512];
 149        va_list argptr;
 150
 151        if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
 152                return;
 153
 154        va_start(argptr, fmt_str);
 155        vsnprintf(buf, sizeof(buf), fmt_str, argptr);
 156        va_end(argptr);
 157
 158        /* the debug string is already formatted with a newline */
 159        pr_info("%s", buf);
 160}
 161
 162/**
 163 * iavf_schedule_reset - Set the flags and schedule a reset event
 164 * @adapter: board private structure
 165 **/
 166void iavf_schedule_reset(struct iavf_adapter *adapter)
 167{
 168        if (!(adapter->flags &
 169              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
 170                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
 171                schedule_work(&adapter->reset_task);
 172        }
 173}
 174
 175/**
 176 * iavf_tx_timeout - Respond to a Tx Hang
 177 * @netdev: network interface device structure
 178 **/
 179static void iavf_tx_timeout(struct net_device *netdev)
 180{
 181        struct iavf_adapter *adapter = netdev_priv(netdev);
 182
 183        adapter->tx_timeout_count++;
 184        iavf_schedule_reset(adapter);
 185}
 186
 187/**
 188 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
 189 * @adapter: board private structure
 190 **/
 191static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
 192{
 193        struct iavf_hw *hw = &adapter->hw;
 194
 195        if (!adapter->msix_entries)
 196                return;
 197
 198        wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
 199
 200        iavf_flush(hw);
 201
 202        synchronize_irq(adapter->msix_entries[0].vector);
 203}
 204
 205/**
 206 * iavf_misc_irq_enable - Enable default interrupt generation settings
 207 * @adapter: board private structure
 208 **/
 209static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
 210{
 211        struct iavf_hw *hw = &adapter->hw;
 212
 213        wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
 214                                       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
 215        wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
 216
 217        iavf_flush(hw);
 218}
 219
 220/**
 221 * iavf_irq_disable - Mask off interrupt generation on the NIC
 222 * @adapter: board private structure
 223 **/
 224static void iavf_irq_disable(struct iavf_adapter *adapter)
 225{
 226        int i;
 227        struct iavf_hw *hw = &adapter->hw;
 228
 229        if (!adapter->msix_entries)
 230                return;
 231
 232        for (i = 1; i < adapter->num_msix_vectors; i++) {
 233                wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
 234                synchronize_irq(adapter->msix_entries[i].vector);
 235        }
 236        iavf_flush(hw);
 237}
 238
 239/**
 240 * iavf_irq_enable_queues - Enable interrupt for specified queues
 241 * @adapter: board private structure
 242 * @mask: bitmap of queues to enable
 243 **/
 244void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
 245{
 246        struct iavf_hw *hw = &adapter->hw;
 247        int i;
 248
 249        for (i = 1; i < adapter->num_msix_vectors; i++) {
 250                if (mask & BIT(i - 1)) {
 251                        wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
 252                             IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
 253                             IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
 254                }
 255        }
 256}
 257
 258/**
 259 * iavf_irq_enable - Enable default interrupt generation settings
 260 * @adapter: board private structure
 261 * @flush: boolean value whether to run rd32()
 262 **/
 263void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
 264{
 265        struct iavf_hw *hw = &adapter->hw;
 266
 267        iavf_misc_irq_enable(adapter);
 268        iavf_irq_enable_queues(adapter, ~0);
 269
 270        if (flush)
 271                iavf_flush(hw);
 272}
 273
 274/**
 275 * iavf_msix_aq - Interrupt handler for vector 0
 276 * @irq: interrupt number
 277 * @data: pointer to netdev
 278 **/
 279static irqreturn_t iavf_msix_aq(int irq, void *data)
 280{
 281        struct net_device *netdev = data;
 282        struct iavf_adapter *adapter = netdev_priv(netdev);
 283        struct iavf_hw *hw = &adapter->hw;
 284
 285        /* handle non-queue interrupts, these reads clear the registers */
 286        rd32(hw, IAVF_VFINT_ICR01);
 287        rd32(hw, IAVF_VFINT_ICR0_ENA1);
 288
 289        /* schedule work on the private workqueue */
 290        schedule_work(&adapter->adminq_task);
 291
 292        return IRQ_HANDLED;
 293}
 294
 295/**
 296 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
 297 * @irq: interrupt number
 298 * @data: pointer to a q_vector
 299 **/
 300static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
 301{
 302        struct iavf_q_vector *q_vector = data;
 303
 304        if (!q_vector->tx.ring && !q_vector->rx.ring)
 305                return IRQ_HANDLED;
 306
 307        napi_schedule_irqoff(&q_vector->napi);
 308
 309        return IRQ_HANDLED;
 310}
 311
 312/**
 313 * iavf_map_vector_to_rxq - associate irqs with rx queues
 314 * @adapter: board private structure
 315 * @v_idx: interrupt number
 316 * @r_idx: queue number
 317 **/
 318static void
 319iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
 320{
 321        struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
 322        struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
 323        struct iavf_hw *hw = &adapter->hw;
 324
 325        rx_ring->q_vector = q_vector;
 326        rx_ring->next = q_vector->rx.ring;
 327        rx_ring->vsi = &adapter->vsi;
 328        q_vector->rx.ring = rx_ring;
 329        q_vector->rx.count++;
 330        q_vector->rx.next_update = jiffies + 1;
 331        q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
 332        q_vector->ring_mask |= BIT(r_idx);
 333        wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
 334             q_vector->rx.current_itr);
 335        q_vector->rx.current_itr = q_vector->rx.target_itr;
 336}
 337
 338/**
 339 * iavf_map_vector_to_txq - associate irqs with tx queues
 340 * @adapter: board private structure
 341 * @v_idx: interrupt number
 342 * @t_idx: queue number
 343 **/
 344static void
 345iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
 346{
 347        struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
 348        struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
 349        struct iavf_hw *hw = &adapter->hw;
 350
 351        tx_ring->q_vector = q_vector;
 352        tx_ring->next = q_vector->tx.ring;
 353        tx_ring->vsi = &adapter->vsi;
 354        q_vector->tx.ring = tx_ring;
 355        q_vector->tx.count++;
 356        q_vector->tx.next_update = jiffies + 1;
 357        q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
 358        q_vector->num_ringpairs++;
 359        wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
 360             q_vector->tx.target_itr);
 361        q_vector->tx.current_itr = q_vector->tx.target_itr;
 362}
 363
 364/**
 365 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
 366 * @adapter: board private structure to initialize
 367 *
 368 * This function maps descriptor rings to the queue-specific vectors
 369 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
 370 * one vector per ring/queue, but on a constrained vector budget, we
 371 * group the rings as "efficiently" as possible.  You would add new
 372 * mapping configurations in here.
 373 **/
 374static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
 375{
 376        int rings_remaining = adapter->num_active_queues;
 377        int ridx = 0, vidx = 0;
 378        int q_vectors;
 379
 380        q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 381
 382        for (; ridx < rings_remaining; ridx++) {
 383                iavf_map_vector_to_rxq(adapter, vidx, ridx);
 384                iavf_map_vector_to_txq(adapter, vidx, ridx);
 385
 386                /* In the case where we have more queues than vectors, continue
 387                 * round-robin on vectors until all queues are mapped.
 388                 */
 389                if (++vidx >= q_vectors)
 390                        vidx = 0;
 391        }
 392
 393        adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
 394}
 395
 396/**
 397 * iavf_irq_affinity_notify - Callback for affinity changes
 398 * @notify: context as to what irq was changed
 399 * @mask: the new affinity mask
 400 *
 401 * This is a callback function used by the irq_set_affinity_notifier function
 402 * so that we may register to receive changes to the irq affinity masks.
 403 **/
 404static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
 405                                     const cpumask_t *mask)
 406{
 407        struct iavf_q_vector *q_vector =
 408                container_of(notify, struct iavf_q_vector, affinity_notify);
 409
 410        cpumask_copy(&q_vector->affinity_mask, mask);
 411}
 412
 413/**
 414 * iavf_irq_affinity_release - Callback for affinity notifier release
 415 * @ref: internal core kernel usage
 416 *
 417 * This is a callback function used by the irq_set_affinity_notifier function
 418 * to inform the current notification subscriber that they will no longer
 419 * receive notifications.
 420 **/
 421static void iavf_irq_affinity_release(struct kref *ref) {}
 422
 423/**
 424 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
 425 * @adapter: board private structure
 426 * @basename: device basename
 427 *
 428 * Allocates MSI-X vectors for tx and rx handling, and requests
 429 * interrupts from the kernel.
 430 **/
 431static int
 432iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
 433{
 434        unsigned int vector, q_vectors;
 435        unsigned int rx_int_idx = 0, tx_int_idx = 0;
 436        int irq_num, err;
 437        int cpu;
 438
 439        iavf_irq_disable(adapter);
 440        /* Decrement for Other and TCP Timer vectors */
 441        q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 442
 443        for (vector = 0; vector < q_vectors; vector++) {
 444                struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
 445
 446                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
 447
 448                if (q_vector->tx.ring && q_vector->rx.ring) {
 449                        snprintf(q_vector->name, sizeof(q_vector->name),
 450                                 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
 451                        tx_int_idx++;
 452                } else if (q_vector->rx.ring) {
 453                        snprintf(q_vector->name, sizeof(q_vector->name),
 454                                 "iavf-%s-rx-%d", basename, rx_int_idx++);
 455                } else if (q_vector->tx.ring) {
 456                        snprintf(q_vector->name, sizeof(q_vector->name),
 457                                 "iavf-%s-tx-%d", basename, tx_int_idx++);
 458                } else {
 459                        /* skip this unused q_vector */
 460                        continue;
 461                }
 462                err = request_irq(irq_num,
 463                                  iavf_msix_clean_rings,
 464                                  0,
 465                                  q_vector->name,
 466                                  q_vector);
 467                if (err) {
 468                        dev_info(&adapter->pdev->dev,
 469                                 "Request_irq failed, error: %d\n", err);
 470                        goto free_queue_irqs;
 471                }
 472                /* register for affinity change notifications */
 473                q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
 474                q_vector->affinity_notify.release =
 475                                                   iavf_irq_affinity_release;
 476                irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
 477                /* Spread the IRQ affinity hints across online CPUs. Note that
 478                 * get_cpu_mask returns a mask with a permanent lifetime so
 479                 * it's safe to use as a hint for irq_set_affinity_hint.
 480                 */
 481                cpu = cpumask_local_spread(q_vector->v_idx, -1);
 482                irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
 483        }
 484
 485        return 0;
 486
 487free_queue_irqs:
 488        while (vector) {
 489                vector--;
 490                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
 491                irq_set_affinity_notifier(irq_num, NULL);
 492                irq_set_affinity_hint(irq_num, NULL);
 493                free_irq(irq_num, &adapter->q_vectors[vector]);
 494        }
 495        return err;
 496}
 497
 498/**
 499 * iavf_request_misc_irq - Initialize MSI-X interrupts
 500 * @adapter: board private structure
 501 *
 502 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
 503 * vector is only for the admin queue, and stays active even when the netdev
 504 * is closed.
 505 **/
 506static int iavf_request_misc_irq(struct iavf_adapter *adapter)
 507{
 508        struct net_device *netdev = adapter->netdev;
 509        int err;
 510
 511        snprintf(adapter->misc_vector_name,
 512                 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
 513                 dev_name(&adapter->pdev->dev));
 514        err = request_irq(adapter->msix_entries[0].vector,
 515                          &iavf_msix_aq, 0,
 516                          adapter->misc_vector_name, netdev);
 517        if (err) {
 518                dev_err(&adapter->pdev->dev,
 519                        "request_irq for %s failed: %d\n",
 520                        adapter->misc_vector_name, err);
 521                free_irq(adapter->msix_entries[0].vector, netdev);
 522        }
 523        return err;
 524}
 525
 526/**
 527 * iavf_free_traffic_irqs - Free MSI-X interrupts
 528 * @adapter: board private structure
 529 *
 530 * Frees all MSI-X vectors other than 0.
 531 **/
 532static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
 533{
 534        int vector, irq_num, q_vectors;
 535
 536        if (!adapter->msix_entries)
 537                return;
 538
 539        q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 540
 541        for (vector = 0; vector < q_vectors; vector++) {
 542                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
 543                irq_set_affinity_notifier(irq_num, NULL);
 544                irq_set_affinity_hint(irq_num, NULL);
 545                free_irq(irq_num, &adapter->q_vectors[vector]);
 546        }
 547}
 548
 549/**
 550 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
 551 * @adapter: board private structure
 552 *
 553 * Frees MSI-X vector 0.
 554 **/
 555static void iavf_free_misc_irq(struct iavf_adapter *adapter)
 556{
 557        struct net_device *netdev = adapter->netdev;
 558
 559        if (!adapter->msix_entries)
 560                return;
 561
 562        free_irq(adapter->msix_entries[0].vector, netdev);
 563}
 564
 565/**
 566 * iavf_configure_tx - Configure Transmit Unit after Reset
 567 * @adapter: board private structure
 568 *
 569 * Configure the Tx unit of the MAC after a reset.
 570 **/
 571static void iavf_configure_tx(struct iavf_adapter *adapter)
 572{
 573        struct iavf_hw *hw = &adapter->hw;
 574        int i;
 575
 576        for (i = 0; i < adapter->num_active_queues; i++)
 577                adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
 578}
 579
 580/**
 581 * iavf_configure_rx - Configure Receive Unit after Reset
 582 * @adapter: board private structure
 583 *
 584 * Configure the Rx unit of the MAC after a reset.
 585 **/
 586static void iavf_configure_rx(struct iavf_adapter *adapter)
 587{
 588        unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
 589        struct iavf_hw *hw = &adapter->hw;
 590        int i;
 591
 592        /* Legacy Rx will always default to a 2048 buffer size. */
 593#if (PAGE_SIZE < 8192)
 594        if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
 595                struct net_device *netdev = adapter->netdev;
 596
 597                /* For jumbo frames on systems with 4K pages we have to use
 598                 * an order 1 page, so we might as well increase the size
 599                 * of our Rx buffer to make better use of the available space
 600                 */
 601                rx_buf_len = IAVF_RXBUFFER_3072;
 602
 603                /* We use a 1536 buffer size for configurations with
 604                 * standard Ethernet mtu.  On x86 this gives us enough room
 605                 * for shared info and 192 bytes of padding.
 606                 */
 607                if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
 608                    (netdev->mtu <= ETH_DATA_LEN))
 609                        rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
 610        }
 611#endif
 612
 613        for (i = 0; i < adapter->num_active_queues; i++) {
 614                adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
 615                adapter->rx_rings[i].rx_buf_len = rx_buf_len;
 616
 617                if (adapter->flags & IAVF_FLAG_LEGACY_RX)
 618                        clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
 619                else
 620                        set_ring_build_skb_enabled(&adapter->rx_rings[i]);
 621        }
 622}
 623
 624/**
 625 * iavf_find_vlan - Search filter list for specific vlan filter
 626 * @adapter: board private structure
 627 * @vlan: vlan tag
 628 *
 629 * Returns ptr to the filter object or NULL. Must be called while holding the
 630 * mac_vlan_list_lock.
 631 **/
 632static struct
 633iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
 634{
 635        struct iavf_vlan_filter *f;
 636
 637        list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 638                if (vlan == f->vlan)
 639                        return f;
 640        }
 641        return NULL;
 642}
 643
 644/**
 645 * iavf_add_vlan - Add a vlan filter to the list
 646 * @adapter: board private structure
 647 * @vlan: VLAN tag
 648 *
 649 * Returns ptr to the filter object or NULL when no memory available.
 650 **/
 651static struct
 652iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
 653{
 654        struct iavf_vlan_filter *f = NULL;
 655
 656        spin_lock_bh(&adapter->mac_vlan_list_lock);
 657
 658        f = iavf_find_vlan(adapter, vlan);
 659        if (!f) {
 660                f = kzalloc(sizeof(*f), GFP_KERNEL);
 661                if (!f)
 662                        goto clearout;
 663
 664                f->vlan = vlan;
 665
 666                INIT_LIST_HEAD(&f->list);
 667                list_add(&f->list, &adapter->vlan_filter_list);
 668                f->add = true;
 669                adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
 670        }
 671
 672clearout:
 673        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 674        return f;
 675}
 676
 677/**
 678 * iavf_del_vlan - Remove a vlan filter from the list
 679 * @adapter: board private structure
 680 * @vlan: VLAN tag
 681 **/
 682static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
 683{
 684        struct iavf_vlan_filter *f;
 685
 686        spin_lock_bh(&adapter->mac_vlan_list_lock);
 687
 688        f = iavf_find_vlan(adapter, vlan);
 689        if (f) {
 690                f->remove = true;
 691                adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
 692        }
 693
 694        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 695}
 696
 697/**
 698 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
 699 * @netdev: network device struct
 700 * @proto: unused protocol data
 701 * @vid: VLAN tag
 702 **/
 703static int iavf_vlan_rx_add_vid(struct net_device *netdev,
 704                                __always_unused __be16 proto, u16 vid)
 705{
 706        struct iavf_adapter *adapter = netdev_priv(netdev);
 707
 708        if (!VLAN_ALLOWED(adapter))
 709                return -EIO;
 710        if (iavf_add_vlan(adapter, vid) == NULL)
 711                return -ENOMEM;
 712        return 0;
 713}
 714
 715/**
 716 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
 717 * @netdev: network device struct
 718 * @proto: unused protocol data
 719 * @vid: VLAN tag
 720 **/
 721static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
 722                                 __always_unused __be16 proto, u16 vid)
 723{
 724        struct iavf_adapter *adapter = netdev_priv(netdev);
 725
 726        if (VLAN_ALLOWED(adapter)) {
 727                iavf_del_vlan(adapter, vid);
 728                return 0;
 729        }
 730        return -EIO;
 731}
 732
 733/**
 734 * iavf_find_filter - Search filter list for specific mac filter
 735 * @adapter: board private structure
 736 * @macaddr: the MAC address
 737 *
 738 * Returns ptr to the filter object or NULL. Must be called while holding the
 739 * mac_vlan_list_lock.
 740 **/
 741static struct
 742iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
 743                                  const u8 *macaddr)
 744{
 745        struct iavf_mac_filter *f;
 746
 747        if (!macaddr)
 748                return NULL;
 749
 750        list_for_each_entry(f, &adapter->mac_filter_list, list) {
 751                if (ether_addr_equal(macaddr, f->macaddr))
 752                        return f;
 753        }
 754        return NULL;
 755}
 756
 757/**
 758 * iavf_add_filter - Add a mac filter to the filter list
 759 * @adapter: board private structure
 760 * @macaddr: the MAC address
 761 *
 762 * Returns ptr to the filter object or NULL when no memory available.
 763 **/
 764static struct
 765iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
 766                                 const u8 *macaddr)
 767{
 768        struct iavf_mac_filter *f;
 769
 770        if (!macaddr)
 771                return NULL;
 772
 773        f = iavf_find_filter(adapter, macaddr);
 774        if (!f) {
 775                f = kzalloc(sizeof(*f), GFP_ATOMIC);
 776                if (!f)
 777                        return f;
 778
 779                ether_addr_copy(f->macaddr, macaddr);
 780
 781                list_add_tail(&f->list, &adapter->mac_filter_list);
 782                f->add = true;
 783                adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
 784        } else {
 785                f->remove = false;
 786        }
 787
 788        return f;
 789}
 790
 791/**
 792 * iavf_set_mac - NDO callback to set port mac address
 793 * @netdev: network interface device structure
 794 * @p: pointer to an address structure
 795 *
 796 * Returns 0 on success, negative on failure
 797 **/
 798static int iavf_set_mac(struct net_device *netdev, void *p)
 799{
 800        struct iavf_adapter *adapter = netdev_priv(netdev);
 801        struct iavf_hw *hw = &adapter->hw;
 802        struct iavf_mac_filter *f;
 803        struct sockaddr *addr = p;
 804
 805        if (!is_valid_ether_addr(addr->sa_data))
 806                return -EADDRNOTAVAIL;
 807
 808        if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
 809                return 0;
 810
 811        if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF)
 812                return -EPERM;
 813
 814        spin_lock_bh(&adapter->mac_vlan_list_lock);
 815
 816        f = iavf_find_filter(adapter, hw->mac.addr);
 817        if (f) {
 818                f->remove = true;
 819                adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
 820        }
 821
 822        f = iavf_add_filter(adapter, addr->sa_data);
 823
 824        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 825
 826        if (f) {
 827                ether_addr_copy(hw->mac.addr, addr->sa_data);
 828                ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
 829        }
 830
 831        return (f == NULL) ? -ENOMEM : 0;
 832}
 833
 834/**
 835 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
 836 * @netdev: the netdevice
 837 * @addr: address to add
 838 *
 839 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
 840 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
 841 */
 842static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
 843{
 844        struct iavf_adapter *adapter = netdev_priv(netdev);
 845
 846        if (iavf_add_filter(adapter, addr))
 847                return 0;
 848        else
 849                return -ENOMEM;
 850}
 851
 852/**
 853 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
 854 * @netdev: the netdevice
 855 * @addr: address to add
 856 *
 857 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
 858 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
 859 */
 860static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
 861{
 862        struct iavf_adapter *adapter = netdev_priv(netdev);
 863        struct iavf_mac_filter *f;
 864
 865        /* Under some circumstances, we might receive a request to delete
 866         * our own device address from our uc list. Because we store the
 867         * device address in the VSI's MAC/VLAN filter list, we need to ignore
 868         * such requests and not delete our device address from this list.
 869         */
 870        if (ether_addr_equal(addr, netdev->dev_addr))
 871                return 0;
 872
 873        f = iavf_find_filter(adapter, addr);
 874        if (f) {
 875                f->remove = true;
 876                adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
 877        }
 878        return 0;
 879}
 880
 881/**
 882 * iavf_set_rx_mode - NDO callback to set the netdev filters
 883 * @netdev: network interface device structure
 884 **/
 885static void iavf_set_rx_mode(struct net_device *netdev)
 886{
 887        struct iavf_adapter *adapter = netdev_priv(netdev);
 888
 889        spin_lock_bh(&adapter->mac_vlan_list_lock);
 890        __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
 891        __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
 892        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 893
 894        if (netdev->flags & IFF_PROMISC &&
 895            !(adapter->flags & IAVF_FLAG_PROMISC_ON))
 896                adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
 897        else if (!(netdev->flags & IFF_PROMISC) &&
 898                 adapter->flags & IAVF_FLAG_PROMISC_ON)
 899                adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
 900
 901        if (netdev->flags & IFF_ALLMULTI &&
 902            !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
 903                adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
 904        else if (!(netdev->flags & IFF_ALLMULTI) &&
 905                 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
 906                adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
 907}
 908
 909/**
 910 * iavf_napi_enable_all - enable NAPI on all queue vectors
 911 * @adapter: board private structure
 912 **/
 913static void iavf_napi_enable_all(struct iavf_adapter *adapter)
 914{
 915        int q_idx;
 916        struct iavf_q_vector *q_vector;
 917        int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 918
 919        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 920                struct napi_struct *napi;
 921
 922                q_vector = &adapter->q_vectors[q_idx];
 923                napi = &q_vector->napi;
 924                napi_enable(napi);
 925        }
 926}
 927
 928/**
 929 * iavf_napi_disable_all - disable NAPI on all queue vectors
 930 * @adapter: board private structure
 931 **/
 932static void iavf_napi_disable_all(struct iavf_adapter *adapter)
 933{
 934        int q_idx;
 935        struct iavf_q_vector *q_vector;
 936        int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 937
 938        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 939                q_vector = &adapter->q_vectors[q_idx];
 940                napi_disable(&q_vector->napi);
 941        }
 942}
 943
 944/**
 945 * iavf_configure - set up transmit and receive data structures
 946 * @adapter: board private structure
 947 **/
 948static void iavf_configure(struct iavf_adapter *adapter)
 949{
 950        struct net_device *netdev = adapter->netdev;
 951        int i;
 952
 953        iavf_set_rx_mode(netdev);
 954
 955        iavf_configure_tx(adapter);
 956        iavf_configure_rx(adapter);
 957        adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
 958
 959        for (i = 0; i < adapter->num_active_queues; i++) {
 960                struct iavf_ring *ring = &adapter->rx_rings[i];
 961
 962                iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
 963        }
 964}
 965
 966/**
 967 * iavf_up_complete - Finish the last steps of bringing up a connection
 968 * @adapter: board private structure
 969 *
 970 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
 971 **/
 972static void iavf_up_complete(struct iavf_adapter *adapter)
 973{
 974        adapter->state = __IAVF_RUNNING;
 975        clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
 976
 977        iavf_napi_enable_all(adapter);
 978
 979        adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
 980        if (CLIENT_ENABLED(adapter))
 981                adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
 982        mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
 983}
 984
 985/**
 986 * iavf_down - Shutdown the connection processing
 987 * @adapter: board private structure
 988 *
 989 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
 990 **/
 991void iavf_down(struct iavf_adapter *adapter)
 992{
 993        struct net_device *netdev = adapter->netdev;
 994        struct iavf_vlan_filter *vlf;
 995        struct iavf_mac_filter *f;
 996        struct iavf_cloud_filter *cf;
 997
 998        if (adapter->state <= __IAVF_DOWN_PENDING)
 999                return;
1000
1001        netif_carrier_off(netdev);
1002        netif_tx_disable(netdev);
1003        adapter->link_up = false;
1004        iavf_napi_disable_all(adapter);
1005        iavf_irq_disable(adapter);
1006
1007        spin_lock_bh(&adapter->mac_vlan_list_lock);
1008
1009        /* clear the sync flag on all filters */
1010        __dev_uc_unsync(adapter->netdev, NULL);
1011        __dev_mc_unsync(adapter->netdev, NULL);
1012
1013        /* remove all MAC filters */
1014        list_for_each_entry(f, &adapter->mac_filter_list, list) {
1015                f->remove = true;
1016        }
1017
1018        /* remove all VLAN filters */
1019        list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1020                vlf->remove = true;
1021        }
1022
1023        spin_unlock_bh(&adapter->mac_vlan_list_lock);
1024
1025        /* remove all cloud filters */
1026        spin_lock_bh(&adapter->cloud_filter_list_lock);
1027        list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1028                cf->del = true;
1029        }
1030        spin_unlock_bh(&adapter->cloud_filter_list_lock);
1031
1032        if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1033            adapter->state != __IAVF_RESETTING) {
1034                /* cancel any current operation */
1035                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1036                /* Schedule operations to close down the HW. Don't wait
1037                 * here for this to complete. The watchdog is still running
1038                 * and it will take care of this.
1039                 */
1040                adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1041                adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1042                adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1043                adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1044        }
1045
1046        mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1047}
1048
1049/**
1050 * iavf_acquire_msix_vectors - Setup the MSIX capability
1051 * @adapter: board private structure
1052 * @vectors: number of vectors to request
1053 *
1054 * Work with the OS to set up the MSIX vectors needed.
1055 *
1056 * Returns 0 on success, negative on failure
1057 **/
1058static int
1059iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1060{
1061        int err, vector_threshold;
1062
1063        /* We'll want at least 3 (vector_threshold):
1064         * 0) Other (Admin Queue and link, mostly)
1065         * 1) TxQ[0] Cleanup
1066         * 2) RxQ[0] Cleanup
1067         */
1068        vector_threshold = MIN_MSIX_COUNT;
1069
1070        /* The more we get, the more we will assign to Tx/Rx Cleanup
1071         * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1072         * Right now, we simply care about how many we'll get; we'll
1073         * set them up later while requesting irq's.
1074         */
1075        err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1076                                    vector_threshold, vectors);
1077        if (err < 0) {
1078                dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1079                kfree(adapter->msix_entries);
1080                adapter->msix_entries = NULL;
1081                return err;
1082        }
1083
1084        /* Adjust for only the vectors we'll use, which is minimum
1085         * of max_msix_q_vectors + NONQ_VECS, or the number of
1086         * vectors we were allocated.
1087         */
1088        adapter->num_msix_vectors = err;
1089        return 0;
1090}
1091
1092/**
1093 * iavf_free_queues - Free memory for all rings
1094 * @adapter: board private structure to initialize
1095 *
1096 * Free all of the memory associated with queue pairs.
1097 **/
1098static void iavf_free_queues(struct iavf_adapter *adapter)
1099{
1100        if (!adapter->vsi_res)
1101                return;
1102        adapter->num_active_queues = 0;
1103        kfree(adapter->tx_rings);
1104        adapter->tx_rings = NULL;
1105        kfree(adapter->rx_rings);
1106        adapter->rx_rings = NULL;
1107}
1108
1109/**
1110 * iavf_alloc_queues - Allocate memory for all rings
1111 * @adapter: board private structure to initialize
1112 *
1113 * We allocate one ring per queue at run-time since we don't know the
1114 * number of queues at compile-time.  The polling_netdev array is
1115 * intended for Multiqueue, but should work fine with a single queue.
1116 **/
1117static int iavf_alloc_queues(struct iavf_adapter *adapter)
1118{
1119        int i, num_active_queues;
1120
1121        /* If we're in reset reallocating queues we don't actually know yet for
1122         * certain the PF gave us the number of queues we asked for but we'll
1123         * assume it did.  Once basic reset is finished we'll confirm once we
1124         * start negotiating config with PF.
1125         */
1126        if (adapter->num_req_queues)
1127                num_active_queues = adapter->num_req_queues;
1128        else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1129                 adapter->num_tc)
1130                num_active_queues = adapter->ch_config.total_qps;
1131        else
1132                num_active_queues = min_t(int,
1133                                          adapter->vsi_res->num_queue_pairs,
1134                                          (int)(num_online_cpus()));
1135
1136
1137        adapter->tx_rings = kcalloc(num_active_queues,
1138                                    sizeof(struct iavf_ring), GFP_KERNEL);
1139        if (!adapter->tx_rings)
1140                goto err_out;
1141        adapter->rx_rings = kcalloc(num_active_queues,
1142                                    sizeof(struct iavf_ring), GFP_KERNEL);
1143        if (!adapter->rx_rings)
1144                goto err_out;
1145
1146        for (i = 0; i < num_active_queues; i++) {
1147                struct iavf_ring *tx_ring;
1148                struct iavf_ring *rx_ring;
1149
1150                tx_ring = &adapter->tx_rings[i];
1151
1152                tx_ring->queue_index = i;
1153                tx_ring->netdev = adapter->netdev;
1154                tx_ring->dev = &adapter->pdev->dev;
1155                tx_ring->count = adapter->tx_desc_count;
1156                tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1157                if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1158                        tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1159
1160                rx_ring = &adapter->rx_rings[i];
1161                rx_ring->queue_index = i;
1162                rx_ring->netdev = adapter->netdev;
1163                rx_ring->dev = &adapter->pdev->dev;
1164                rx_ring->count = adapter->rx_desc_count;
1165                rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1166        }
1167
1168        adapter->num_active_queues = num_active_queues;
1169
1170        return 0;
1171
1172err_out:
1173        iavf_free_queues(adapter);
1174        return -ENOMEM;
1175}
1176
1177/**
1178 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1179 * @adapter: board private structure to initialize
1180 *
1181 * Attempt to configure the interrupts using the best available
1182 * capabilities of the hardware and the kernel.
1183 **/
1184static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1185{
1186        int vector, v_budget;
1187        int pairs = 0;
1188        int err = 0;
1189
1190        if (!adapter->vsi_res) {
1191                err = -EIO;
1192                goto out;
1193        }
1194        pairs = adapter->num_active_queues;
1195
1196        /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1197         * us much good if we have more vectors than CPUs. However, we already
1198         * limit the total number of queues by the number of CPUs so we do not
1199         * need any further limiting here.
1200         */
1201        v_budget = min_t(int, pairs + NONQ_VECS,
1202                         (int)adapter->vf_res->max_vectors);
1203
1204        adapter->msix_entries = kcalloc(v_budget,
1205                                        sizeof(struct msix_entry), GFP_KERNEL);
1206        if (!adapter->msix_entries) {
1207                err = -ENOMEM;
1208                goto out;
1209        }
1210
1211        for (vector = 0; vector < v_budget; vector++)
1212                adapter->msix_entries[vector].entry = vector;
1213
1214        err = iavf_acquire_msix_vectors(adapter, v_budget);
1215
1216out:
1217        netif_set_real_num_rx_queues(adapter->netdev, pairs);
1218        netif_set_real_num_tx_queues(adapter->netdev, pairs);
1219        return err;
1220}
1221
1222/**
1223 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1224 * @adapter: board private structure
1225 *
1226 * Return 0 on success, negative on failure
1227 **/
1228static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1229{
1230        struct i40e_aqc_get_set_rss_key_data *rss_key =
1231                (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1232        struct iavf_hw *hw = &adapter->hw;
1233        int ret = 0;
1234
1235        if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1236                /* bail because we already have a command pending */
1237                dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1238                        adapter->current_op);
1239                return -EBUSY;
1240        }
1241
1242        ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1243        if (ret) {
1244                dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1245                        iavf_stat_str(hw, ret),
1246                        iavf_aq_str(hw, hw->aq.asq_last_status));
1247                return ret;
1248
1249        }
1250
1251        ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1252                                  adapter->rss_lut, adapter->rss_lut_size);
1253        if (ret) {
1254                dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1255                        iavf_stat_str(hw, ret),
1256                        iavf_aq_str(hw, hw->aq.asq_last_status));
1257        }
1258
1259        return ret;
1260
1261}
1262
1263/**
1264 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1265 * @adapter: board private structure
1266 *
1267 * Returns 0 on success, negative on failure
1268 **/
1269static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1270{
1271        struct iavf_hw *hw = &adapter->hw;
1272        u32 *dw;
1273        u16 i;
1274
1275        dw = (u32 *)adapter->rss_key;
1276        for (i = 0; i <= adapter->rss_key_size / 4; i++)
1277                wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1278
1279        dw = (u32 *)adapter->rss_lut;
1280        for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1281                wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1282
1283        iavf_flush(hw);
1284
1285        return 0;
1286}
1287
1288/**
1289 * iavf_config_rss - Configure RSS keys and lut
1290 * @adapter: board private structure
1291 *
1292 * Returns 0 on success, negative on failure
1293 **/
1294int iavf_config_rss(struct iavf_adapter *adapter)
1295{
1296
1297        if (RSS_PF(adapter)) {
1298                adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1299                                        IAVF_FLAG_AQ_SET_RSS_KEY;
1300                return 0;
1301        } else if (RSS_AQ(adapter)) {
1302                return iavf_config_rss_aq(adapter);
1303        } else {
1304                return iavf_config_rss_reg(adapter);
1305        }
1306}
1307
1308/**
1309 * iavf_fill_rss_lut - Fill the lut with default values
1310 * @adapter: board private structure
1311 **/
1312static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1313{
1314        u16 i;
1315
1316        for (i = 0; i < adapter->rss_lut_size; i++)
1317                adapter->rss_lut[i] = i % adapter->num_active_queues;
1318}
1319
1320/**
1321 * iavf_init_rss - Prepare for RSS
1322 * @adapter: board private structure
1323 *
1324 * Return 0 on success, negative on failure
1325 **/
1326static int iavf_init_rss(struct iavf_adapter *adapter)
1327{
1328        struct iavf_hw *hw = &adapter->hw;
1329        int ret;
1330
1331        if (!RSS_PF(adapter)) {
1332                /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1333                if (adapter->vf_res->vf_cap_flags &
1334                    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1335                        adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1336                else
1337                        adapter->hena = IAVF_DEFAULT_RSS_HENA;
1338
1339                wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1340                wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1341        }
1342
1343        iavf_fill_rss_lut(adapter);
1344        netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1345        ret = iavf_config_rss(adapter);
1346
1347        return ret;
1348}
1349
1350/**
1351 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1352 * @adapter: board private structure to initialize
1353 *
1354 * We allocate one q_vector per queue interrupt.  If allocation fails we
1355 * return -ENOMEM.
1356 **/
1357static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1358{
1359        int q_idx = 0, num_q_vectors;
1360        struct iavf_q_vector *q_vector;
1361
1362        num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1363        adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1364                                     GFP_KERNEL);
1365        if (!adapter->q_vectors)
1366                return -ENOMEM;
1367
1368        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1369                q_vector = &adapter->q_vectors[q_idx];
1370                q_vector->adapter = adapter;
1371                q_vector->vsi = &adapter->vsi;
1372                q_vector->v_idx = q_idx;
1373                q_vector->reg_idx = q_idx;
1374                cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1375                netif_napi_add(adapter->netdev, &q_vector->napi,
1376                               iavf_napi_poll, NAPI_POLL_WEIGHT);
1377        }
1378
1379        return 0;
1380}
1381
1382/**
1383 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1384 * @adapter: board private structure to initialize
1385 *
1386 * This function frees the memory allocated to the q_vectors.  In addition if
1387 * NAPI is enabled it will delete any references to the NAPI struct prior
1388 * to freeing the q_vector.
1389 **/
1390static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1391{
1392        int q_idx, num_q_vectors;
1393        int napi_vectors;
1394
1395        if (!adapter->q_vectors)
1396                return;
1397
1398        num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1399        napi_vectors = adapter->num_active_queues;
1400
1401        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1402                struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1403
1404                if (q_idx < napi_vectors)
1405                        netif_napi_del(&q_vector->napi);
1406        }
1407        kfree(adapter->q_vectors);
1408        adapter->q_vectors = NULL;
1409}
1410
1411/**
1412 * iavf_reset_interrupt_capability - Reset MSIX setup
1413 * @adapter: board private structure
1414 *
1415 **/
1416void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1417{
1418        if (!adapter->msix_entries)
1419                return;
1420
1421        pci_disable_msix(adapter->pdev);
1422        kfree(adapter->msix_entries);
1423        adapter->msix_entries = NULL;
1424}
1425
1426/**
1427 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1428 * @adapter: board private structure to initialize
1429 *
1430 **/
1431int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1432{
1433        int err;
1434
1435        err = iavf_alloc_queues(adapter);
1436        if (err) {
1437                dev_err(&adapter->pdev->dev,
1438                        "Unable to allocate memory for queues\n");
1439                goto err_alloc_queues;
1440        }
1441
1442        rtnl_lock();
1443        err = iavf_set_interrupt_capability(adapter);
1444        rtnl_unlock();
1445        if (err) {
1446                dev_err(&adapter->pdev->dev,
1447                        "Unable to setup interrupt capabilities\n");
1448                goto err_set_interrupt;
1449        }
1450
1451        err = iavf_alloc_q_vectors(adapter);
1452        if (err) {
1453                dev_err(&adapter->pdev->dev,
1454                        "Unable to allocate memory for queue vectors\n");
1455                goto err_alloc_q_vectors;
1456        }
1457
1458        /* If we've made it so far while ADq flag being ON, then we haven't
1459         * bailed out anywhere in middle. And ADq isn't just enabled but actual
1460         * resources have been allocated in the reset path.
1461         * Now we can truly claim that ADq is enabled.
1462         */
1463        if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1464            adapter->num_tc)
1465                dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1466                         adapter->num_tc);
1467
1468        dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1469                 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1470                 adapter->num_active_queues);
1471
1472        return 0;
1473err_alloc_q_vectors:
1474        iavf_reset_interrupt_capability(adapter);
1475err_set_interrupt:
1476        iavf_free_queues(adapter);
1477err_alloc_queues:
1478        return err;
1479}
1480
1481/**
1482 * iavf_free_rss - Free memory used by RSS structs
1483 * @adapter: board private structure
1484 **/
1485static void iavf_free_rss(struct iavf_adapter *adapter)
1486{
1487        kfree(adapter->rss_key);
1488        adapter->rss_key = NULL;
1489
1490        kfree(adapter->rss_lut);
1491        adapter->rss_lut = NULL;
1492}
1493
1494/**
1495 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1496 * @adapter: board private structure
1497 *
1498 * Returns 0 on success, negative on failure
1499 **/
1500static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1501{
1502        struct net_device *netdev = adapter->netdev;
1503        int err;
1504
1505        if (netif_running(netdev))
1506                iavf_free_traffic_irqs(adapter);
1507        iavf_free_misc_irq(adapter);
1508        iavf_reset_interrupt_capability(adapter);
1509        iavf_free_q_vectors(adapter);
1510        iavf_free_queues(adapter);
1511
1512        err =  iavf_init_interrupt_scheme(adapter);
1513        if (err)
1514                goto err;
1515
1516        netif_tx_stop_all_queues(netdev);
1517
1518        err = iavf_request_misc_irq(adapter);
1519        if (err)
1520                goto err;
1521
1522        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1523
1524        iavf_map_rings_to_vectors(adapter);
1525
1526        if (RSS_AQ(adapter))
1527                adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1528        else
1529                err = iavf_init_rss(adapter);
1530err:
1531        return err;
1532}
1533
1534/**
1535 * iavf_watchdog_timer - Periodic call-back timer
1536 * @data: pointer to adapter disguised as unsigned long
1537 **/
1538static void iavf_watchdog_timer(struct timer_list *t)
1539{
1540        struct iavf_adapter *adapter = from_timer(adapter, t,
1541                                                    watchdog_timer);
1542
1543        schedule_work(&adapter->watchdog_task);
1544        /* timer will be rescheduled in watchdog task */
1545}
1546
1547/**
1548 * iavf_watchdog_task - Periodic call-back task
1549 * @work: pointer to work_struct
1550 **/
1551static void iavf_watchdog_task(struct work_struct *work)
1552{
1553        struct iavf_adapter *adapter = container_of(work,
1554                                                      struct iavf_adapter,
1555                                                      watchdog_task);
1556        struct iavf_hw *hw = &adapter->hw;
1557        u32 reg_val;
1558
1559        if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1560                goto restart_watchdog;
1561
1562        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
1563                reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1564                          IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1565                if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1566                    (reg_val == VIRTCHNL_VFR_COMPLETED)) {
1567                        /* A chance for redemption! */
1568                        dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1569                        adapter->state = __IAVF_STARTUP;
1570                        adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1571                        schedule_delayed_work(&adapter->init_task, 10);
1572                        clear_bit(__IAVF_IN_CRITICAL_TASK,
1573                                  &adapter->crit_section);
1574                        /* Don't reschedule the watchdog, since we've restarted
1575                         * the init task. When init_task contacts the PF and
1576                         * gets everything set up again, it'll restart the
1577                         * watchdog for us. Down, boy. Sit. Stay. Woof.
1578                         */
1579                        return;
1580                }
1581                adapter->aq_required = 0;
1582                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1583                goto watchdog_done;
1584        }
1585
1586        if ((adapter->state < __IAVF_DOWN) ||
1587            (adapter->flags & IAVF_FLAG_RESET_PENDING))
1588                goto watchdog_done;
1589
1590        /* check for reset */
1591        reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1592        if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) {
1593                adapter->state = __IAVF_RESETTING;
1594                adapter->flags |= IAVF_FLAG_RESET_PENDING;
1595                dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1596                schedule_work(&adapter->reset_task);
1597                adapter->aq_required = 0;
1598                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1599                goto watchdog_done;
1600        }
1601
1602        /* Process admin queue tasks. After init, everything gets done
1603         * here so we don't race on the admin queue.
1604         */
1605        if (adapter->current_op) {
1606                if (!iavf_asq_done(hw)) {
1607                        dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1608                        iavf_send_api_ver(adapter);
1609                }
1610                goto watchdog_done;
1611        }
1612        if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) {
1613                iavf_send_vf_config_msg(adapter);
1614                goto watchdog_done;
1615        }
1616
1617        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1618                iavf_disable_queues(adapter);
1619                goto watchdog_done;
1620        }
1621
1622        if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1623                iavf_map_queues(adapter);
1624                goto watchdog_done;
1625        }
1626
1627        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1628                iavf_add_ether_addrs(adapter);
1629                goto watchdog_done;
1630        }
1631
1632        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1633                iavf_add_vlans(adapter);
1634                goto watchdog_done;
1635        }
1636
1637        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1638                iavf_del_ether_addrs(adapter);
1639                goto watchdog_done;
1640        }
1641
1642        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1643                iavf_del_vlans(adapter);
1644                goto watchdog_done;
1645        }
1646
1647        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1648                iavf_enable_vlan_stripping(adapter);
1649                goto watchdog_done;
1650        }
1651
1652        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1653                iavf_disable_vlan_stripping(adapter);
1654                goto watchdog_done;
1655        }
1656
1657        if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1658                iavf_configure_queues(adapter);
1659                goto watchdog_done;
1660        }
1661
1662        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1663                iavf_enable_queues(adapter);
1664                goto watchdog_done;
1665        }
1666
1667        if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1668                /* This message goes straight to the firmware, not the
1669                 * PF, so we don't have to set current_op as we will
1670                 * not get a response through the ARQ.
1671                 */
1672                iavf_init_rss(adapter);
1673                adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1674                goto watchdog_done;
1675        }
1676        if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1677                iavf_get_hena(adapter);
1678                goto watchdog_done;
1679        }
1680        if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1681                iavf_set_hena(adapter);
1682                goto watchdog_done;
1683        }
1684        if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1685                iavf_set_rss_key(adapter);
1686                goto watchdog_done;
1687        }
1688        if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1689                iavf_set_rss_lut(adapter);
1690                goto watchdog_done;
1691        }
1692
1693        if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1694                iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1695                                       FLAG_VF_MULTICAST_PROMISC);
1696                goto watchdog_done;
1697        }
1698
1699        if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1700                iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1701                goto watchdog_done;
1702        }
1703
1704        if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1705            (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1706                iavf_set_promiscuous(adapter, 0);
1707                goto watchdog_done;
1708        }
1709
1710        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1711                iavf_enable_channels(adapter);
1712                goto watchdog_done;
1713        }
1714
1715        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1716                iavf_disable_channels(adapter);
1717                goto watchdog_done;
1718        }
1719
1720        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1721                iavf_add_cloud_filter(adapter);
1722                goto watchdog_done;
1723        }
1724
1725        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1726                iavf_del_cloud_filter(adapter);
1727                goto watchdog_done;
1728        }
1729
1730        schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1731
1732        if (adapter->state == __IAVF_RUNNING)
1733                iavf_request_stats(adapter);
1734watchdog_done:
1735        if (adapter->state == __IAVF_RUNNING)
1736                iavf_detect_recover_hung(&adapter->vsi);
1737        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1738restart_watchdog:
1739        if (adapter->state == __IAVF_REMOVE)
1740                return;
1741        if (adapter->aq_required)
1742                mod_timer(&adapter->watchdog_timer,
1743                          jiffies + msecs_to_jiffies(20));
1744        else
1745                mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1746        schedule_work(&adapter->adminq_task);
1747}
1748
1749static void iavf_disable_vf(struct iavf_adapter *adapter)
1750{
1751        struct iavf_mac_filter *f, *ftmp;
1752        struct iavf_vlan_filter *fv, *fvtmp;
1753        struct iavf_cloud_filter *cf, *cftmp;
1754
1755        adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
1756
1757        /* We don't use netif_running() because it may be true prior to
1758         * ndo_open() returning, so we can't assume it means all our open
1759         * tasks have finished, since we're not holding the rtnl_lock here.
1760         */
1761        if (adapter->state == __IAVF_RUNNING) {
1762                set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1763                netif_carrier_off(adapter->netdev);
1764                netif_tx_disable(adapter->netdev);
1765                adapter->link_up = false;
1766                iavf_napi_disable_all(adapter);
1767                iavf_irq_disable(adapter);
1768                iavf_free_traffic_irqs(adapter);
1769                iavf_free_all_tx_resources(adapter);
1770                iavf_free_all_rx_resources(adapter);
1771        }
1772
1773        spin_lock_bh(&adapter->mac_vlan_list_lock);
1774
1775        /* Delete all of the filters */
1776        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1777                list_del(&f->list);
1778                kfree(f);
1779        }
1780
1781        list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1782                list_del(&fv->list);
1783                kfree(fv);
1784        }
1785
1786        spin_unlock_bh(&adapter->mac_vlan_list_lock);
1787
1788        spin_lock_bh(&adapter->cloud_filter_list_lock);
1789        list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1790                list_del(&cf->list);
1791                kfree(cf);
1792                adapter->num_cloud_filters--;
1793        }
1794        spin_unlock_bh(&adapter->cloud_filter_list_lock);
1795
1796        iavf_free_misc_irq(adapter);
1797        iavf_reset_interrupt_capability(adapter);
1798        iavf_free_queues(adapter);
1799        iavf_free_q_vectors(adapter);
1800        kfree(adapter->vf_res);
1801        iavf_shutdown_adminq(&adapter->hw);
1802        adapter->netdev->flags &= ~IFF_UP;
1803        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1804        adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1805        adapter->state = __IAVF_DOWN;
1806        wake_up(&adapter->down_waitqueue);
1807        dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1808}
1809
1810#define IAVF_RESET_WAIT_MS 10
1811#define IAVF_RESET_WAIT_COUNT 500
1812/**
1813 * iavf_reset_task - Call-back task to handle hardware reset
1814 * @work: pointer to work_struct
1815 *
1816 * During reset we need to shut down and reinitialize the admin queue
1817 * before we can use it to communicate with the PF again. We also clear
1818 * and reinit the rings because that context is lost as well.
1819 **/
1820static void iavf_reset_task(struct work_struct *work)
1821{
1822        struct iavf_adapter *adapter = container_of(work,
1823                                                      struct iavf_adapter,
1824                                                      reset_task);
1825        struct virtchnl_vf_resource *vfres = adapter->vf_res;
1826        struct net_device *netdev = adapter->netdev;
1827        struct iavf_hw *hw = &adapter->hw;
1828        struct iavf_vlan_filter *vlf;
1829        struct iavf_cloud_filter *cf;
1830        struct iavf_mac_filter *f;
1831        u32 reg_val;
1832        int i = 0, err;
1833        bool running;
1834
1835        /* When device is being removed it doesn't make sense to run the reset
1836         * task, just return in such a case.
1837         */
1838        if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
1839                return;
1840
1841        while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
1842                                &adapter->crit_section))
1843                usleep_range(500, 1000);
1844        if (CLIENT_ENABLED(adapter)) {
1845                adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
1846                                    IAVF_FLAG_CLIENT_NEEDS_CLOSE |
1847                                    IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1848                                    IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
1849                cancel_delayed_work_sync(&adapter->client_task);
1850                iavf_notify_client_close(&adapter->vsi, true);
1851        }
1852        iavf_misc_irq_disable(adapter);
1853        if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
1854                adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
1855                /* Restart the AQ here. If we have been reset but didn't
1856                 * detect it, or if the PF had to reinit, our AQ will be hosed.
1857                 */
1858                iavf_shutdown_adminq(hw);
1859                iavf_init_adminq(hw);
1860                iavf_request_reset(adapter);
1861        }
1862        adapter->flags |= IAVF_FLAG_RESET_PENDING;
1863
1864        /* poll until we see the reset actually happen */
1865        for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1866                reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
1867                          IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1868                if (!reg_val)
1869                        break;
1870                usleep_range(5000, 10000);
1871        }
1872        if (i == IAVF_RESET_WAIT_COUNT) {
1873                dev_info(&adapter->pdev->dev, "Never saw reset\n");
1874                goto continue_reset; /* act like the reset happened */
1875        }
1876
1877        /* wait until the reset is complete and the PF is responding to us */
1878        for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
1879                /* sleep first to make sure a minimum wait time is met */
1880                msleep(IAVF_RESET_WAIT_MS);
1881
1882                reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1883                          IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1884                if (reg_val == VIRTCHNL_VFR_VFACTIVE)
1885                        break;
1886        }
1887
1888        pci_set_master(adapter->pdev);
1889
1890        if (i == IAVF_RESET_WAIT_COUNT) {
1891                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1892                        reg_val);
1893                iavf_disable_vf(adapter);
1894                clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
1895                return; /* Do not attempt to reinit. It's dead, Jim. */
1896        }
1897
1898continue_reset:
1899        /* We don't use netif_running() because it may be true prior to
1900         * ndo_open() returning, so we can't assume it means all our open
1901         * tasks have finished, since we're not holding the rtnl_lock here.
1902         */
1903        running = ((adapter->state == __IAVF_RUNNING) ||
1904                   (adapter->state == __IAVF_RESETTING));
1905
1906        if (running) {
1907                netif_carrier_off(netdev);
1908                netif_tx_stop_all_queues(netdev);
1909                adapter->link_up = false;
1910                iavf_napi_disable_all(adapter);
1911        }
1912        iavf_irq_disable(adapter);
1913
1914        adapter->state = __IAVF_RESETTING;
1915        adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1916
1917        /* free the Tx/Rx rings and descriptors, might be better to just
1918         * re-use them sometime in the future
1919         */
1920        iavf_free_all_rx_resources(adapter);
1921        iavf_free_all_tx_resources(adapter);
1922
1923        adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
1924        /* kill and reinit the admin queue */
1925        iavf_shutdown_adminq(hw);
1926        adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1927        err = iavf_init_adminq(hw);
1928        if (err)
1929                dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1930                         err);
1931        adapter->aq_required = 0;
1932
1933        if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1934                err = iavf_reinit_interrupt_scheme(adapter);
1935                if (err)
1936                        goto reset_err;
1937        }
1938
1939        adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
1940        adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
1941
1942        spin_lock_bh(&adapter->mac_vlan_list_lock);
1943
1944        /* re-add all MAC filters */
1945        list_for_each_entry(f, &adapter->mac_filter_list, list) {
1946                f->add = true;
1947        }
1948        /* re-add all VLAN filters */
1949        list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1950                vlf->add = true;
1951        }
1952
1953        spin_unlock_bh(&adapter->mac_vlan_list_lock);
1954
1955        /* check if TCs are running and re-add all cloud filters */
1956        spin_lock_bh(&adapter->cloud_filter_list_lock);
1957        if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1958            adapter->num_tc) {
1959                list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1960                        cf->add = true;
1961                }
1962        }
1963        spin_unlock_bh(&adapter->cloud_filter_list_lock);
1964
1965        adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1966        adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
1967        adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1968        iavf_misc_irq_enable(adapter);
1969
1970        mod_timer(&adapter->watchdog_timer, jiffies + 2);
1971
1972        /* We were running when the reset started, so we need to restore some
1973         * state here.
1974         */
1975        if (running) {
1976                /* allocate transmit descriptors */
1977                err = iavf_setup_all_tx_resources(adapter);
1978                if (err)
1979                        goto reset_err;
1980
1981                /* allocate receive descriptors */
1982                err = iavf_setup_all_rx_resources(adapter);
1983                if (err)
1984                        goto reset_err;
1985
1986                if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1987                        err = iavf_request_traffic_irqs(adapter, netdev->name);
1988                        if (err)
1989                                goto reset_err;
1990
1991                        adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1992                }
1993
1994                iavf_configure(adapter);
1995
1996                iavf_up_complete(adapter);
1997
1998                iavf_irq_enable(adapter, true);
1999        } else {
2000                adapter->state = __IAVF_DOWN;
2001                wake_up(&adapter->down_waitqueue);
2002        }
2003        clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2004        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2005
2006        return;
2007reset_err:
2008        clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2009        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2010        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2011        iavf_close(netdev);
2012}
2013
2014/**
2015 * iavf_adminq_task - worker thread to clean the admin queue
2016 * @work: pointer to work_struct containing our data
2017 **/
2018static void iavf_adminq_task(struct work_struct *work)
2019{
2020        struct iavf_adapter *adapter =
2021                container_of(work, struct iavf_adapter, adminq_task);
2022        struct iavf_hw *hw = &adapter->hw;
2023        struct i40e_arq_event_info event;
2024        enum virtchnl_ops v_op;
2025        iavf_status ret, v_ret;
2026        u32 val, oldval;
2027        u16 pending;
2028
2029        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2030                goto out;
2031
2032        event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2033        event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2034        if (!event.msg_buf)
2035                goto out;
2036
2037        do {
2038                ret = iavf_clean_arq_element(hw, &event, &pending);
2039                v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2040                v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
2041
2042                if (ret || !v_op)
2043                        break; /* No event to process or error cleaning ARQ */
2044
2045                iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2046                                         event.msg_len);
2047                if (pending != 0)
2048                        memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2049        } while (pending);
2050
2051        if ((adapter->flags &
2052             (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2053            adapter->state == __IAVF_RESETTING)
2054                goto freedom;
2055
2056        /* check for error indications */
2057        val = rd32(hw, hw->aq.arq.len);
2058        if (val == 0xdeadbeef) /* indicates device in reset */
2059                goto freedom;
2060        oldval = val;
2061        if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2062                dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2063                val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2064        }
2065        if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2066                dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2067                val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2068        }
2069        if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2070                dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2071                val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2072        }
2073        if (oldval != val)
2074                wr32(hw, hw->aq.arq.len, val);
2075
2076        val = rd32(hw, hw->aq.asq.len);
2077        oldval = val;
2078        if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2079                dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2080                val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2081        }
2082        if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2083                dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2084                val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2085        }
2086        if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2087                dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2088                val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2089        }
2090        if (oldval != val)
2091                wr32(hw, hw->aq.asq.len, val);
2092
2093freedom:
2094        kfree(event.msg_buf);
2095out:
2096        /* re-enable Admin queue interrupt cause */
2097        iavf_misc_irq_enable(adapter);
2098}
2099
2100/**
2101 * iavf_client_task - worker thread to perform client work
2102 * @work: pointer to work_struct containing our data
2103 *
2104 * This task handles client interactions. Because client calls can be
2105 * reentrant, we can't handle them in the watchdog.
2106 **/
2107static void iavf_client_task(struct work_struct *work)
2108{
2109        struct iavf_adapter *adapter =
2110                container_of(work, struct iavf_adapter, client_task.work);
2111
2112        /* If we can't get the client bit, just give up. We'll be rescheduled
2113         * later.
2114         */
2115
2116        if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
2117                return;
2118
2119        if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2120                iavf_client_subtask(adapter);
2121                adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2122                goto out;
2123        }
2124        if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2125                iavf_notify_client_l2_params(&adapter->vsi);
2126                adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2127                goto out;
2128        }
2129        if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2130                iavf_notify_client_close(&adapter->vsi, false);
2131                adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2132                goto out;
2133        }
2134        if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2135                iavf_notify_client_open(&adapter->vsi);
2136                adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2137        }
2138out:
2139        clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2140}
2141
2142/**
2143 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
2144 * @adapter: board private structure
2145 *
2146 * Free all transmit software resources
2147 **/
2148void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2149{
2150        int i;
2151
2152        if (!adapter->tx_rings)
2153                return;
2154
2155        for (i = 0; i < adapter->num_active_queues; i++)
2156                if (adapter->tx_rings[i].desc)
2157                        iavf_free_tx_resources(&adapter->tx_rings[i]);
2158}
2159
2160/**
2161 * iavf_setup_all_tx_resources - allocate all queues Tx resources
2162 * @adapter: board private structure
2163 *
2164 * If this function returns with an error, then it's possible one or
2165 * more of the rings is populated (while the rest are not).  It is the
2166 * callers duty to clean those orphaned rings.
2167 *
2168 * Return 0 on success, negative on failure
2169 **/
2170static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2171{
2172        int i, err = 0;
2173
2174        for (i = 0; i < adapter->num_active_queues; i++) {
2175                adapter->tx_rings[i].count = adapter->tx_desc_count;
2176                err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2177                if (!err)
2178                        continue;
2179                dev_err(&adapter->pdev->dev,
2180                        "Allocation for Tx Queue %u failed\n", i);
2181                break;
2182        }
2183
2184        return err;
2185}
2186
2187/**
2188 * iavf_setup_all_rx_resources - allocate all queues Rx resources
2189 * @adapter: board private structure
2190 *
2191 * If this function returns with an error, then it's possible one or
2192 * more of the rings is populated (while the rest are not).  It is the
2193 * callers duty to clean those orphaned rings.
2194 *
2195 * Return 0 on success, negative on failure
2196 **/
2197static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2198{
2199        int i, err = 0;
2200
2201        for (i = 0; i < adapter->num_active_queues; i++) {
2202                adapter->rx_rings[i].count = adapter->rx_desc_count;
2203                err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2204                if (!err)
2205                        continue;
2206                dev_err(&adapter->pdev->dev,
2207                        "Allocation for Rx Queue %u failed\n", i);
2208                break;
2209        }
2210        return err;
2211}
2212
2213/**
2214 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
2215 * @adapter: board private structure
2216 *
2217 * Free all receive software resources
2218 **/
2219void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2220{
2221        int i;
2222
2223        if (!adapter->rx_rings)
2224                return;
2225
2226        for (i = 0; i < adapter->num_active_queues; i++)
2227                if (adapter->rx_rings[i].desc)
2228                        iavf_free_rx_resources(&adapter->rx_rings[i]);
2229}
2230
2231/**
2232 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2233 * @adapter: board private structure
2234 * @max_tx_rate: max Tx bw for a tc
2235 **/
2236static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2237                                      u64 max_tx_rate)
2238{
2239        int speed = 0, ret = 0;
2240
2241        switch (adapter->link_speed) {
2242        case I40E_LINK_SPEED_40GB:
2243                speed = 40000;
2244                break;
2245        case I40E_LINK_SPEED_25GB:
2246                speed = 25000;
2247                break;
2248        case I40E_LINK_SPEED_20GB:
2249                speed = 20000;
2250                break;
2251        case I40E_LINK_SPEED_10GB:
2252                speed = 10000;
2253                break;
2254        case I40E_LINK_SPEED_1GB:
2255                speed = 1000;
2256                break;
2257        case I40E_LINK_SPEED_100MB:
2258                speed = 100;
2259                break;
2260        default:
2261                break;
2262        }
2263
2264        if (max_tx_rate > speed) {
2265                dev_err(&adapter->pdev->dev,
2266                        "Invalid tx rate specified\n");
2267                ret = -EINVAL;
2268        }
2269
2270        return ret;
2271}
2272
2273/**
2274 * iavf_validate_channel_config - validate queue mapping info
2275 * @adapter: board private structure
2276 * @mqprio_qopt: queue parameters
2277 *
2278 * This function validates if the config provided by the user to
2279 * configure queue channels is valid or not. Returns 0 on a valid
2280 * config.
2281 **/
2282static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2283                                   struct tc_mqprio_qopt_offload *mqprio_qopt)
2284{
2285        u64 total_max_rate = 0;
2286        int i, num_qps = 0;
2287        u64 tx_rate = 0;
2288        int ret = 0;
2289
2290        if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2291            mqprio_qopt->qopt.num_tc < 1)
2292                return -EINVAL;
2293
2294        for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2295                if (!mqprio_qopt->qopt.count[i] ||
2296                    mqprio_qopt->qopt.offset[i] != num_qps)
2297                        return -EINVAL;
2298                if (mqprio_qopt->min_rate[i]) {
2299                        dev_err(&adapter->pdev->dev,
2300                                "Invalid min tx rate (greater than 0) specified\n");
2301                        return -EINVAL;
2302                }
2303                /*convert to Mbps */
2304                tx_rate = div_u64(mqprio_qopt->max_rate[i],
2305                                  IAVF_MBPS_DIVISOR);
2306                total_max_rate += tx_rate;
2307                num_qps += mqprio_qopt->qopt.count[i];
2308        }
2309        if (num_qps > IAVF_MAX_REQ_QUEUES)
2310                return -EINVAL;
2311
2312        ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2313        return ret;
2314}
2315
2316/**
2317 * iavf_del_all_cloud_filters - delete all cloud filters
2318 * on the traffic classes
2319 **/
2320static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2321{
2322        struct iavf_cloud_filter *cf, *cftmp;
2323
2324        spin_lock_bh(&adapter->cloud_filter_list_lock);
2325        list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2326                                 list) {
2327                list_del(&cf->list);
2328                kfree(cf);
2329                adapter->num_cloud_filters--;
2330        }
2331        spin_unlock_bh(&adapter->cloud_filter_list_lock);
2332}
2333
2334/**
2335 * __iavf_setup_tc - configure multiple traffic classes
2336 * @netdev: network interface device structure
2337 * @type_date: tc offload data
2338 *
2339 * This function processes the config information provided by the
2340 * user to configure traffic classes/queue channels and packages the
2341 * information to request the PF to setup traffic classes.
2342 *
2343 * Returns 0 on success.
2344 **/
2345static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2346{
2347        struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2348        struct iavf_adapter *adapter = netdev_priv(netdev);
2349        struct virtchnl_vf_resource *vfres = adapter->vf_res;
2350        u8 num_tc = 0, total_qps = 0;
2351        int ret = 0, netdev_tc = 0;
2352        u64 max_tx_rate;
2353        u16 mode;
2354        int i;
2355
2356        num_tc = mqprio_qopt->qopt.num_tc;
2357        mode = mqprio_qopt->mode;
2358
2359        /* delete queue_channel */
2360        if (!mqprio_qopt->qopt.hw) {
2361                if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2362                        /* reset the tc configuration */
2363                        netdev_reset_tc(netdev);
2364                        adapter->num_tc = 0;
2365                        netif_tx_stop_all_queues(netdev);
2366                        netif_tx_disable(netdev);
2367                        iavf_del_all_cloud_filters(adapter);
2368                        adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2369                        goto exit;
2370                } else {
2371                        return -EINVAL;
2372                }
2373        }
2374
2375        /* add queue channel */
2376        if (mode == TC_MQPRIO_MODE_CHANNEL) {
2377                if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2378                        dev_err(&adapter->pdev->dev, "ADq not supported\n");
2379                        return -EOPNOTSUPP;
2380                }
2381                if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2382                        dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2383                        return -EINVAL;
2384                }
2385
2386                ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2387                if (ret)
2388                        return ret;
2389                /* Return if same TC config is requested */
2390                if (adapter->num_tc == num_tc)
2391                        return 0;
2392                adapter->num_tc = num_tc;
2393
2394                for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2395                        if (i < num_tc) {
2396                                adapter->ch_config.ch_info[i].count =
2397                                        mqprio_qopt->qopt.count[i];
2398                                adapter->ch_config.ch_info[i].offset =
2399                                        mqprio_qopt->qopt.offset[i];
2400                                total_qps += mqprio_qopt->qopt.count[i];
2401                                max_tx_rate = mqprio_qopt->max_rate[i];
2402                                /* convert to Mbps */
2403                                max_tx_rate = div_u64(max_tx_rate,
2404                                                      IAVF_MBPS_DIVISOR);
2405                                adapter->ch_config.ch_info[i].max_tx_rate =
2406                                        max_tx_rate;
2407                        } else {
2408                                adapter->ch_config.ch_info[i].count = 1;
2409                                adapter->ch_config.ch_info[i].offset = 0;
2410                        }
2411                }
2412                adapter->ch_config.total_qps = total_qps;
2413                netif_tx_stop_all_queues(netdev);
2414                netif_tx_disable(netdev);
2415                adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2416                netdev_reset_tc(netdev);
2417                /* Report the tc mapping up the stack */
2418                netdev_set_num_tc(adapter->netdev, num_tc);
2419                for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2420                        u16 qcount = mqprio_qopt->qopt.count[i];
2421                        u16 qoffset = mqprio_qopt->qopt.offset[i];
2422
2423                        if (i < num_tc)
2424                                netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2425                                                    qoffset);
2426                }
2427        }
2428exit:
2429        return ret;
2430}
2431
2432/**
2433 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
2434 * @adapter: board private structure
2435 * @cls_flower: pointer to struct tc_cls_flower_offload
2436 * @filter: pointer to cloud filter structure
2437 */
2438static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2439                                 struct tc_cls_flower_offload *f,
2440                                 struct iavf_cloud_filter *filter)
2441{
2442        struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
2443        struct flow_dissector *dissector = rule->match.dissector;
2444        u16 n_proto_mask = 0;
2445        u16 n_proto_key = 0;
2446        u8 field_flags = 0;
2447        u16 addr_type = 0;
2448        u16 n_proto = 0;
2449        int i = 0;
2450        struct virtchnl_filter *vf = &filter->f;
2451
2452        if (dissector->used_keys &
2453            ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2454              BIT(FLOW_DISSECTOR_KEY_BASIC) |
2455              BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2456              BIT(FLOW_DISSECTOR_KEY_VLAN) |
2457              BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2458              BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2459              BIT(FLOW_DISSECTOR_KEY_PORTS) |
2460              BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2461                dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2462                        dissector->used_keys);
2463                return -EOPNOTSUPP;
2464        }
2465
2466        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2467                struct flow_match_enc_keyid match;
2468
2469                flow_rule_match_enc_keyid(rule, &match);
2470                if (match.mask->keyid != 0)
2471                        field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2472        }
2473
2474        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2475                struct flow_match_basic match;
2476
2477                flow_rule_match_basic(rule, &match);
2478                n_proto_key = ntohs(match.key->n_proto);
2479                n_proto_mask = ntohs(match.mask->n_proto);
2480
2481                if (n_proto_key == ETH_P_ALL) {
2482                        n_proto_key = 0;
2483                        n_proto_mask = 0;
2484                }
2485                n_proto = n_proto_key & n_proto_mask;
2486                if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2487                        return -EINVAL;
2488                if (n_proto == ETH_P_IPV6) {
2489                        /* specify flow type as TCP IPv6 */
2490                        vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2491                }
2492
2493                if (match.key->ip_proto != IPPROTO_TCP) {
2494                        dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2495                        return -EINVAL;
2496                }
2497        }
2498
2499        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2500                struct flow_match_eth_addrs match;
2501
2502                flow_rule_match_eth_addrs(rule, &match);
2503
2504                /* use is_broadcast and is_zero to check for all 0xf or 0 */
2505                if (!is_zero_ether_addr(match.mask->dst)) {
2506                        if (is_broadcast_ether_addr(match.mask->dst)) {
2507                                field_flags |= IAVF_CLOUD_FIELD_OMAC;
2508                        } else {
2509                                dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2510                                        match.mask->dst);
2511                                return I40E_ERR_CONFIG;
2512                        }
2513                }
2514
2515                if (!is_zero_ether_addr(match.mask->src)) {
2516                        if (is_broadcast_ether_addr(match.mask->src)) {
2517                                field_flags |= IAVF_CLOUD_FIELD_IMAC;
2518                        } else {
2519                                dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2520                                        match.mask->src);
2521                                return I40E_ERR_CONFIG;
2522                        }
2523                }
2524
2525                if (!is_zero_ether_addr(match.key->dst))
2526                        if (is_valid_ether_addr(match.key->dst) ||
2527                            is_multicast_ether_addr(match.key->dst)) {
2528                                /* set the mask if a valid dst_mac address */
2529                                for (i = 0; i < ETH_ALEN; i++)
2530                                        vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2531                                ether_addr_copy(vf->data.tcp_spec.dst_mac,
2532                                                match.key->dst);
2533                        }
2534
2535                if (!is_zero_ether_addr(match.key->src))
2536                        if (is_valid_ether_addr(match.key->src) ||
2537                            is_multicast_ether_addr(match.key->src)) {
2538                                /* set the mask if a valid dst_mac address */
2539                                for (i = 0; i < ETH_ALEN; i++)
2540                                        vf->mask.tcp_spec.src_mac[i] |= 0xff;
2541                                ether_addr_copy(vf->data.tcp_spec.src_mac,
2542                                                match.key->src);
2543                }
2544        }
2545
2546        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2547                struct flow_match_vlan match;
2548
2549                flow_rule_match_vlan(rule, &match);
2550                if (match.mask->vlan_id) {
2551                        if (match.mask->vlan_id == VLAN_VID_MASK) {
2552                                field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2553                        } else {
2554                                dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2555                                        match.mask->vlan_id);
2556                                return I40E_ERR_CONFIG;
2557                        }
2558                }
2559                vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2560                vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
2561        }
2562
2563        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2564                struct flow_match_control match;
2565
2566                flow_rule_match_control(rule, &match);
2567                addr_type = match.key->addr_type;
2568        }
2569
2570        if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2571                struct flow_match_ipv4_addrs match;
2572
2573                flow_rule_match_ipv4_addrs(rule, &match);
2574                if (match.mask->dst) {
2575                        if (match.mask->dst == cpu_to_be32(0xffffffff)) {
2576                                field_flags |= IAVF_CLOUD_FIELD_IIP;
2577                        } else {
2578                                dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2579                                        be32_to_cpu(match.mask->dst));
2580                                return I40E_ERR_CONFIG;
2581                        }
2582                }
2583
2584                if (match.mask->src) {
2585                        if (match.mask->src == cpu_to_be32(0xffffffff)) {
2586                                field_flags |= IAVF_CLOUD_FIELD_IIP;
2587                        } else {
2588                                dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2589                                        be32_to_cpu(match.mask->dst));
2590                                return I40E_ERR_CONFIG;
2591                        }
2592                }
2593
2594                if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
2595                        dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2596                        return I40E_ERR_CONFIG;
2597                }
2598                if (match.key->dst) {
2599                        vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2600                        vf->data.tcp_spec.dst_ip[0] = match.key->dst;
2601                }
2602                if (match.key->src) {
2603                        vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2604                        vf->data.tcp_spec.src_ip[0] = match.key->src;
2605                }
2606        }
2607
2608        if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2609                struct flow_match_ipv6_addrs match;
2610
2611                flow_rule_match_ipv6_addrs(rule, &match);
2612
2613                /* validate mask, make sure it is not IPV6_ADDR_ANY */
2614                if (ipv6_addr_any(&match.mask->dst)) {
2615                        dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2616                                IPV6_ADDR_ANY);
2617                        return I40E_ERR_CONFIG;
2618                }
2619
2620                /* src and dest IPv6 address should not be LOOPBACK
2621                 * (0:0:0:0:0:0:0:1) which can be represented as ::1
2622                 */
2623                if (ipv6_addr_loopback(&match.key->dst) ||
2624                    ipv6_addr_loopback(&match.key->src)) {
2625                        dev_err(&adapter->pdev->dev,
2626                                "ipv6 addr should not be loopback\n");
2627                        return I40E_ERR_CONFIG;
2628                }
2629                if (!ipv6_addr_any(&match.mask->dst) ||
2630                    !ipv6_addr_any(&match.mask->src))
2631                        field_flags |= IAVF_CLOUD_FIELD_IIP;
2632
2633                for (i = 0; i < 4; i++)
2634                        vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2635                memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
2636                       sizeof(vf->data.tcp_spec.dst_ip));
2637                for (i = 0; i < 4; i++)
2638                        vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2639                memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
2640                       sizeof(vf->data.tcp_spec.src_ip));
2641        }
2642        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2643                struct flow_match_ports match;
2644
2645                flow_rule_match_ports(rule, &match);
2646                if (match.mask->src) {
2647                        if (match.mask->src == cpu_to_be16(0xffff)) {
2648                                field_flags |= IAVF_CLOUD_FIELD_IIP;
2649                        } else {
2650                                dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2651                                        be16_to_cpu(match.mask->src));
2652                                return I40E_ERR_CONFIG;
2653                        }
2654                }
2655
2656                if (match.mask->dst) {
2657                        if (match.mask->dst == cpu_to_be16(0xffff)) {
2658                                field_flags |= IAVF_CLOUD_FIELD_IIP;
2659                        } else {
2660                                dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2661                                        be16_to_cpu(match.mask->dst));
2662                                return I40E_ERR_CONFIG;
2663                        }
2664                }
2665                if (match.key->dst) {
2666                        vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2667                        vf->data.tcp_spec.dst_port = match.key->dst;
2668                }
2669
2670                if (match.key->src) {
2671                        vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2672                        vf->data.tcp_spec.src_port = match.key->src;
2673                }
2674        }
2675        vf->field_flags = field_flags;
2676
2677        return 0;
2678}
2679
2680/**
2681 * iavf_handle_tclass - Forward to a traffic class on the device
2682 * @adapter: board private structure
2683 * @tc: traffic class index on the device
2684 * @filter: pointer to cloud filter structure
2685 */
2686static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2687                              struct iavf_cloud_filter *filter)
2688{
2689        if (tc == 0)
2690                return 0;
2691        if (tc < adapter->num_tc) {
2692                if (!filter->f.data.tcp_spec.dst_port) {
2693                        dev_err(&adapter->pdev->dev,
2694                                "Specify destination port to redirect to traffic class other than TC0\n");
2695                        return -EINVAL;
2696                }
2697        }
2698        /* redirect to a traffic class on the same device */
2699        filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2700        filter->f.action_meta = tc;
2701        return 0;
2702}
2703
2704/**
2705 * iavf_configure_clsflower - Add tc flower filters
2706 * @adapter: board private structure
2707 * @cls_flower: Pointer to struct tc_cls_flower_offload
2708 */
2709static int iavf_configure_clsflower(struct iavf_adapter *adapter,
2710                                    struct tc_cls_flower_offload *cls_flower)
2711{
2712        int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2713        struct iavf_cloud_filter *filter = NULL;
2714        int err = -EINVAL, count = 50;
2715
2716        if (tc < 0) {
2717                dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2718                return -EINVAL;
2719        }
2720
2721        filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2722        if (!filter)
2723                return -ENOMEM;
2724
2725        while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2726                                &adapter->crit_section)) {
2727                if (--count == 0)
2728                        goto err;
2729                udelay(1);
2730        }
2731
2732        filter->cookie = cls_flower->cookie;
2733
2734        /* set the mask to all zeroes to begin with */
2735        memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2736        /* start out with flow type and eth type IPv4 to begin with */
2737        filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2738        err = iavf_parse_cls_flower(adapter, cls_flower, filter);
2739        if (err < 0)
2740                goto err;
2741
2742        err = iavf_handle_tclass(adapter, tc, filter);
2743        if (err < 0)
2744                goto err;
2745
2746        /* add filter to the list */
2747        spin_lock_bh(&adapter->cloud_filter_list_lock);
2748        list_add_tail(&filter->list, &adapter->cloud_filter_list);
2749        adapter->num_cloud_filters++;
2750        filter->add = true;
2751        adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2752        spin_unlock_bh(&adapter->cloud_filter_list_lock);
2753err:
2754        if (err)
2755                kfree(filter);
2756
2757        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2758        return err;
2759}
2760
2761/* iavf_find_cf - Find the cloud filter in the list
2762 * @adapter: Board private structure
2763 * @cookie: filter specific cookie
2764 *
2765 * Returns ptr to the filter object or NULL. Must be called while holding the
2766 * cloud_filter_list_lock.
2767 */
2768static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
2769                                              unsigned long *cookie)
2770{
2771        struct iavf_cloud_filter *filter = NULL;
2772
2773        if (!cookie)
2774                return NULL;
2775
2776        list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
2777                if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
2778                        return filter;
2779        }
2780        return NULL;
2781}
2782
2783/**
2784 * iavf_delete_clsflower - Remove tc flower filters
2785 * @adapter: board private structure
2786 * @cls_flower: Pointer to struct tc_cls_flower_offload
2787 */
2788static int iavf_delete_clsflower(struct iavf_adapter *adapter,
2789                                 struct tc_cls_flower_offload *cls_flower)
2790{
2791        struct iavf_cloud_filter *filter = NULL;
2792        int err = 0;
2793
2794        spin_lock_bh(&adapter->cloud_filter_list_lock);
2795        filter = iavf_find_cf(adapter, &cls_flower->cookie);
2796        if (filter) {
2797                filter->del = true;
2798                adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
2799        } else {
2800                err = -EINVAL;
2801        }
2802        spin_unlock_bh(&adapter->cloud_filter_list_lock);
2803
2804        return err;
2805}
2806
2807/**
2808 * iavf_setup_tc_cls_flower - flower classifier offloads
2809 * @netdev: net device to configure
2810 * @type_data: offload data
2811 */
2812static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
2813                                    struct tc_cls_flower_offload *cls_flower)
2814{
2815        if (cls_flower->common.chain_index)
2816                return -EOPNOTSUPP;
2817
2818        switch (cls_flower->command) {
2819        case TC_CLSFLOWER_REPLACE:
2820                return iavf_configure_clsflower(adapter, cls_flower);
2821        case TC_CLSFLOWER_DESTROY:
2822                return iavf_delete_clsflower(adapter, cls_flower);
2823        case TC_CLSFLOWER_STATS:
2824                return -EOPNOTSUPP;
2825        default:
2826                return -EOPNOTSUPP;
2827        }
2828}
2829
2830/**
2831 * iavf_setup_tc_block_cb - block callback for tc
2832 * @type: type of offload
2833 * @type_data: offload data
2834 * @cb_priv:
2835 *
2836 * This function is the block callback for traffic classes
2837 **/
2838static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2839                                  void *cb_priv)
2840{
2841        switch (type) {
2842        case TC_SETUP_CLSFLOWER:
2843                return iavf_setup_tc_cls_flower(cb_priv, type_data);
2844        default:
2845                return -EOPNOTSUPP;
2846        }
2847}
2848
2849/**
2850 * iavf_setup_tc_block - register callbacks for tc
2851 * @netdev: network interface device structure
2852 * @f: tc offload data
2853 *
2854 * This function registers block callbacks for tc
2855 * offloads
2856 **/
2857static int iavf_setup_tc_block(struct net_device *dev,
2858                               struct tc_block_offload *f)
2859{
2860        struct iavf_adapter *adapter = netdev_priv(dev);
2861
2862        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2863                return -EOPNOTSUPP;
2864
2865        switch (f->command) {
2866        case TC_BLOCK_BIND:
2867                return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
2868                                             adapter, adapter, f->extack);
2869        case TC_BLOCK_UNBIND:
2870                tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
2871                                        adapter);
2872                return 0;
2873        default:
2874                return -EOPNOTSUPP;
2875        }
2876}
2877
2878/**
2879 * iavf_setup_tc - configure multiple traffic classes
2880 * @netdev: network interface device structure
2881 * @type: type of offload
2882 * @type_date: tc offload data
2883 *
2884 * This function is the callback to ndo_setup_tc in the
2885 * netdev_ops.
2886 *
2887 * Returns 0 on success
2888 **/
2889static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2890                         void *type_data)
2891{
2892        switch (type) {
2893        case TC_SETUP_QDISC_MQPRIO:
2894                return __iavf_setup_tc(netdev, type_data);
2895        case TC_SETUP_BLOCK:
2896                return iavf_setup_tc_block(netdev, type_data);
2897        default:
2898                return -EOPNOTSUPP;
2899        }
2900}
2901
2902/**
2903 * iavf_open - Called when a network interface is made active
2904 * @netdev: network interface device structure
2905 *
2906 * Returns 0 on success, negative value on failure
2907 *
2908 * The open entry point is called when a network interface is made
2909 * active by the system (IFF_UP).  At this point all resources needed
2910 * for transmit and receive operations are allocated, the interrupt
2911 * handler is registered with the OS, the watchdog timer is started,
2912 * and the stack is notified that the interface is ready.
2913 **/
2914static int iavf_open(struct net_device *netdev)
2915{
2916        struct iavf_adapter *adapter = netdev_priv(netdev);
2917        int err;
2918
2919        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
2920                dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2921                return -EIO;
2922        }
2923
2924        while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2925                                &adapter->crit_section))
2926                usleep_range(500, 1000);
2927
2928        if (adapter->state != __IAVF_DOWN) {
2929                err = -EBUSY;
2930                goto err_unlock;
2931        }
2932
2933        /* allocate transmit descriptors */
2934        err = iavf_setup_all_tx_resources(adapter);
2935        if (err)
2936                goto err_setup_tx;
2937
2938        /* allocate receive descriptors */
2939        err = iavf_setup_all_rx_resources(adapter);
2940        if (err)
2941                goto err_setup_rx;
2942
2943        /* clear any pending interrupts, may auto mask */
2944        err = iavf_request_traffic_irqs(adapter, netdev->name);
2945        if (err)
2946                goto err_req_irq;
2947
2948        spin_lock_bh(&adapter->mac_vlan_list_lock);
2949
2950        iavf_add_filter(adapter, adapter->hw.mac.addr);
2951
2952        spin_unlock_bh(&adapter->mac_vlan_list_lock);
2953
2954        iavf_configure(adapter);
2955
2956        iavf_up_complete(adapter);
2957
2958        iavf_irq_enable(adapter, true);
2959
2960        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2961
2962        return 0;
2963
2964err_req_irq:
2965        iavf_down(adapter);
2966        iavf_free_traffic_irqs(adapter);
2967err_setup_rx:
2968        iavf_free_all_rx_resources(adapter);
2969err_setup_tx:
2970        iavf_free_all_tx_resources(adapter);
2971err_unlock:
2972        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2973
2974        return err;
2975}
2976
2977/**
2978 * iavf_close - Disables a network interface
2979 * @netdev: network interface device structure
2980 *
2981 * Returns 0, this is not allowed to fail
2982 *
2983 * The close entry point is called when an interface is de-activated
2984 * by the OS.  The hardware is still under the drivers control, but
2985 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2986 * are freed, along with all transmit and receive resources.
2987 **/
2988static int iavf_close(struct net_device *netdev)
2989{
2990        struct iavf_adapter *adapter = netdev_priv(netdev);
2991        int status;
2992
2993        if (adapter->state <= __IAVF_DOWN_PENDING)
2994                return 0;
2995
2996        while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2997                                &adapter->crit_section))
2998                usleep_range(500, 1000);
2999
3000        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3001        if (CLIENT_ENABLED(adapter))
3002                adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3003
3004        iavf_down(adapter);
3005        adapter->state = __IAVF_DOWN_PENDING;
3006        iavf_free_traffic_irqs(adapter);
3007
3008        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3009
3010        /* We explicitly don't free resources here because the hardware is
3011         * still active and can DMA into memory. Resources are cleared in
3012         * iavf_virtchnl_completion() after we get confirmation from the PF
3013         * driver that the rings have been stopped.
3014         *
3015         * Also, we wait for state to transition to __IAVF_DOWN before
3016         * returning. State change occurs in iavf_virtchnl_completion() after
3017         * VF resources are released (which occurs after PF driver processes and
3018         * responds to admin queue commands).
3019         */
3020
3021        status = wait_event_timeout(adapter->down_waitqueue,
3022                                    adapter->state == __IAVF_DOWN,
3023                                    msecs_to_jiffies(200));
3024        if (!status)
3025                netdev_warn(netdev, "Device resources not yet released\n");
3026        return 0;
3027}
3028
3029/**
3030 * iavf_change_mtu - Change the Maximum Transfer Unit
3031 * @netdev: network interface device structure
3032 * @new_mtu: new value for maximum frame size
3033 *
3034 * Returns 0 on success, negative on failure
3035 **/
3036static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3037{
3038        struct iavf_adapter *adapter = netdev_priv(netdev);
3039
3040        netdev->mtu = new_mtu;
3041        if (CLIENT_ENABLED(adapter)) {
3042                iavf_notify_client_l2_params(&adapter->vsi);
3043                adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3044        }
3045        adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3046        schedule_work(&adapter->reset_task);
3047
3048        return 0;
3049}
3050
3051/**
3052 * iavf_set_features - set the netdev feature flags
3053 * @netdev: ptr to the netdev being adjusted
3054 * @features: the feature set that the stack is suggesting
3055 * Note: expects to be called while under rtnl_lock()
3056 **/
3057static int iavf_set_features(struct net_device *netdev,
3058                             netdev_features_t features)
3059{
3060        struct iavf_adapter *adapter = netdev_priv(netdev);
3061
3062        /* Don't allow changing VLAN_RX flag when adapter is not capable
3063         * of VLAN offload
3064         */
3065        if (!VLAN_ALLOWED(adapter)) {
3066                if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3067                        return -EINVAL;
3068        } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3069                if (features & NETIF_F_HW_VLAN_CTAG_RX)
3070                        adapter->aq_required |=
3071                                IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3072                else
3073                        adapter->aq_required |=
3074                                IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3075        }
3076
3077        return 0;
3078}
3079
3080/**
3081 * iavf_features_check - Validate encapsulated packet conforms to limits
3082 * @skb: skb buff
3083 * @dev: This physical port's netdev
3084 * @features: Offload features that the stack believes apply
3085 **/
3086static netdev_features_t iavf_features_check(struct sk_buff *skb,
3087                                             struct net_device *dev,
3088                                             netdev_features_t features)
3089{
3090        size_t len;
3091
3092        /* No point in doing any of this if neither checksum nor GSO are
3093         * being requested for this frame.  We can rule out both by just
3094         * checking for CHECKSUM_PARTIAL
3095         */
3096        if (skb->ip_summed != CHECKSUM_PARTIAL)
3097                return features;
3098
3099        /* We cannot support GSO if the MSS is going to be less than
3100         * 64 bytes.  If it is then we need to drop support for GSO.
3101         */
3102        if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3103                features &= ~NETIF_F_GSO_MASK;
3104
3105        /* MACLEN can support at most 63 words */
3106        len = skb_network_header(skb) - skb->data;
3107        if (len & ~(63 * 2))
3108                goto out_err;
3109
3110        /* IPLEN and EIPLEN can support at most 127 dwords */
3111        len = skb_transport_header(skb) - skb_network_header(skb);
3112        if (len & ~(127 * 4))
3113                goto out_err;
3114
3115        if (skb->encapsulation) {
3116                /* L4TUNLEN can support 127 words */
3117                len = skb_inner_network_header(skb) - skb_transport_header(skb);
3118                if (len & ~(127 * 2))
3119                        goto out_err;
3120
3121                /* IPLEN can support at most 127 dwords */
3122                len = skb_inner_transport_header(skb) -
3123                      skb_inner_network_header(skb);
3124                if (len & ~(127 * 4))
3125                        goto out_err;
3126        }
3127
3128        /* No need to validate L4LEN as TCP is the only protocol with a
3129         * a flexible value and we support all possible values supported
3130         * by TCP, which is at most 15 dwords
3131         */
3132
3133        return features;
3134out_err:
3135        return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3136}
3137
3138/**
3139 * iavf_fix_features - fix up the netdev feature bits
3140 * @netdev: our net device
3141 * @features: desired feature bits
3142 *
3143 * Returns fixed-up features bits
3144 **/
3145static netdev_features_t iavf_fix_features(struct net_device *netdev,
3146                                           netdev_features_t features)
3147{
3148        struct iavf_adapter *adapter = netdev_priv(netdev);
3149
3150        if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3151                features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3152                              NETIF_F_HW_VLAN_CTAG_RX |
3153                              NETIF_F_HW_VLAN_CTAG_FILTER);
3154
3155        return features;
3156}
3157
3158static const struct net_device_ops iavf_netdev_ops = {
3159        .ndo_open               = iavf_open,
3160        .ndo_stop               = iavf_close,
3161        .ndo_start_xmit         = iavf_xmit_frame,
3162        .ndo_set_rx_mode        = iavf_set_rx_mode,
3163        .ndo_validate_addr      = eth_validate_addr,
3164        .ndo_set_mac_address    = iavf_set_mac,
3165        .ndo_change_mtu         = iavf_change_mtu,
3166        .ndo_tx_timeout         = iavf_tx_timeout,
3167        .ndo_vlan_rx_add_vid    = iavf_vlan_rx_add_vid,
3168        .ndo_vlan_rx_kill_vid   = iavf_vlan_rx_kill_vid,
3169        .ndo_features_check     = iavf_features_check,
3170        .ndo_fix_features       = iavf_fix_features,
3171        .ndo_set_features       = iavf_set_features,
3172        .ndo_setup_tc           = iavf_setup_tc,
3173};
3174
3175/**
3176 * iavf_check_reset_complete - check that VF reset is complete
3177 * @hw: pointer to hw struct
3178 *
3179 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3180 **/
3181static int iavf_check_reset_complete(struct iavf_hw *hw)
3182{
3183        u32 rstat;
3184        int i;
3185
3186        for (i = 0; i < 100; i++) {
3187                rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3188                             IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3189                if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3190                    (rstat == VIRTCHNL_VFR_COMPLETED))
3191                        return 0;
3192                usleep_range(10, 20);
3193        }
3194        return -EBUSY;
3195}
3196
3197/**
3198 * iavf_process_config - Process the config information we got from the PF
3199 * @adapter: board private structure
3200 *
3201 * Verify that we have a valid config struct, and set up our netdev features
3202 * and our VSI struct.
3203 **/
3204int iavf_process_config(struct iavf_adapter *adapter)
3205{
3206        struct virtchnl_vf_resource *vfres = adapter->vf_res;
3207        int i, num_req_queues = adapter->num_req_queues;
3208        struct net_device *netdev = adapter->netdev;
3209        struct iavf_vsi *vsi = &adapter->vsi;
3210        netdev_features_t hw_enc_features;
3211        netdev_features_t hw_features;
3212
3213        /* got VF config message back from PF, now we can parse it */
3214        for (i = 0; i < vfres->num_vsis; i++) {
3215                if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3216                        adapter->vsi_res = &vfres->vsi_res[i];
3217        }
3218        if (!adapter->vsi_res) {
3219                dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3220                return -ENODEV;
3221        }
3222
3223        if (num_req_queues &&
3224            num_req_queues != adapter->vsi_res->num_queue_pairs) {
3225                /* Problem.  The PF gave us fewer queues than what we had
3226                 * negotiated in our request.  Need a reset to see if we can't
3227                 * get back to a working state.
3228                 */
3229                dev_err(&adapter->pdev->dev,
3230                        "Requested %d queues, but PF only gave us %d.\n",
3231                        num_req_queues,
3232                        adapter->vsi_res->num_queue_pairs);
3233                adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3234                adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3235                iavf_schedule_reset(adapter);
3236                return -ENODEV;
3237        }
3238        adapter->num_req_queues = 0;
3239
3240        hw_enc_features = NETIF_F_SG                    |
3241                          NETIF_F_IP_CSUM               |
3242                          NETIF_F_IPV6_CSUM             |
3243                          NETIF_F_HIGHDMA               |
3244                          NETIF_F_SOFT_FEATURES |
3245                          NETIF_F_TSO                   |
3246                          NETIF_F_TSO_ECN               |
3247                          NETIF_F_TSO6                  |
3248                          NETIF_F_SCTP_CRC              |
3249                          NETIF_F_RXHASH                |
3250                          NETIF_F_RXCSUM                |
3251                          0;
3252
3253        /* advertise to stack only if offloads for encapsulated packets is
3254         * supported
3255         */
3256        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3257                hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
3258                                   NETIF_F_GSO_GRE              |
3259                                   NETIF_F_GSO_GRE_CSUM         |
3260                                   NETIF_F_GSO_IPXIP4           |
3261                                   NETIF_F_GSO_IPXIP6           |
3262                                   NETIF_F_GSO_UDP_TUNNEL_CSUM  |
3263                                   NETIF_F_GSO_PARTIAL          |
3264                                   0;
3265
3266                if (!(vfres->vf_cap_flags &
3267                      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3268                        netdev->gso_partial_features |=
3269                                NETIF_F_GSO_UDP_TUNNEL_CSUM;
3270
3271                netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3272                netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3273                netdev->hw_enc_features |= hw_enc_features;
3274        }
3275        /* record features VLANs can make use of */
3276        netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3277
3278        /* Write features and hw_features separately to avoid polluting
3279         * with, or dropping, features that are set when we registered.
3280         */
3281        hw_features = hw_enc_features;
3282
3283        /* Enable VLAN features if supported */
3284        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3285                hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3286                                NETIF_F_HW_VLAN_CTAG_RX);
3287        /* Enable cloud filter if ADQ is supported */
3288        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3289                hw_features |= NETIF_F_HW_TC;
3290
3291        netdev->hw_features |= hw_features;
3292
3293        netdev->features |= hw_features;
3294
3295        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3296                netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3297
3298        netdev->priv_flags |= IFF_UNICAST_FLT;
3299
3300        /* Do not turn on offloads when they are requested to be turned off.
3301         * TSO needs minimum 576 bytes to work correctly.
3302         */
3303        if (netdev->wanted_features) {
3304                if (!(netdev->wanted_features & NETIF_F_TSO) ||
3305                    netdev->mtu < 576)
3306                        netdev->features &= ~NETIF_F_TSO;
3307                if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3308                    netdev->mtu < 576)
3309                        netdev->features &= ~NETIF_F_TSO6;
3310                if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3311                        netdev->features &= ~NETIF_F_TSO_ECN;
3312                if (!(netdev->wanted_features & NETIF_F_GRO))
3313                        netdev->features &= ~NETIF_F_GRO;
3314                if (!(netdev->wanted_features & NETIF_F_GSO))
3315                        netdev->features &= ~NETIF_F_GSO;
3316        }
3317
3318        adapter->vsi.id = adapter->vsi_res->vsi_id;
3319
3320        adapter->vsi.back = adapter;
3321        adapter->vsi.base_vector = 1;
3322        adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3323        vsi->netdev = adapter->netdev;
3324        vsi->qs_handle = adapter->vsi_res->qset_handle;
3325        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3326                adapter->rss_key_size = vfres->rss_key_size;
3327                adapter->rss_lut_size = vfres->rss_lut_size;
3328        } else {
3329                adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3330                adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3331        }
3332
3333        return 0;
3334}
3335
3336/**
3337 * iavf_init_task - worker thread to perform delayed initialization
3338 * @work: pointer to work_struct containing our data
3339 *
3340 * This task completes the work that was begun in probe. Due to the nature
3341 * of VF-PF communications, we may need to wait tens of milliseconds to get
3342 * responses back from the PF. Rather than busy-wait in probe and bog down the
3343 * whole system, we'll do it in a task so we can sleep.
3344 * This task only runs during driver init. Once we've established
3345 * communications with the PF driver and set up our netdev, the watchdog
3346 * takes over.
3347 **/
3348static void iavf_init_task(struct work_struct *work)
3349{
3350        struct iavf_adapter *adapter = container_of(work,
3351                                                      struct iavf_adapter,
3352                                                      init_task.work);
3353        struct net_device *netdev = adapter->netdev;
3354        struct iavf_hw *hw = &adapter->hw;
3355        struct pci_dev *pdev = adapter->pdev;
3356        int err, bufsz;
3357
3358        switch (adapter->state) {
3359        case __IAVF_STARTUP:
3360                /* driver loaded, probe complete */
3361                adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
3362                adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3363                err = iavf_set_mac_type(hw);
3364                if (err) {
3365                        dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
3366                                err);
3367                        goto err;
3368                }
3369                err = iavf_check_reset_complete(hw);
3370                if (err) {
3371                        dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
3372                                 err);
3373                        goto err;
3374                }
3375                hw->aq.num_arq_entries = IAVF_AQ_LEN;
3376                hw->aq.num_asq_entries = IAVF_AQ_LEN;
3377                hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3378                hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3379
3380                err = iavf_init_adminq(hw);
3381                if (err) {
3382                        dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
3383                                err);
3384                        goto err;
3385                }
3386                err = iavf_send_api_ver(adapter);
3387                if (err) {
3388                        dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
3389                        iavf_shutdown_adminq(hw);
3390                        goto err;
3391                }
3392                adapter->state = __IAVF_INIT_VERSION_CHECK;
3393                goto restart;
3394        case __IAVF_INIT_VERSION_CHECK:
3395                if (!iavf_asq_done(hw)) {
3396                        dev_err(&pdev->dev, "Admin queue command never completed\n");
3397                        iavf_shutdown_adminq(hw);
3398                        adapter->state = __IAVF_STARTUP;
3399                        goto err;
3400                }
3401
3402                /* aq msg sent, awaiting reply */
3403                err = iavf_verify_api_ver(adapter);
3404                if (err) {
3405                        if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
3406                                err = iavf_send_api_ver(adapter);
3407                        else
3408                                dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
3409                                        adapter->pf_version.major,
3410                                        adapter->pf_version.minor,
3411                                        VIRTCHNL_VERSION_MAJOR,
3412                                        VIRTCHNL_VERSION_MINOR);
3413                        goto err;
3414                }
3415                err = iavf_send_vf_config_msg(adapter);
3416                if (err) {
3417                        dev_err(&pdev->dev, "Unable to send config request (%d)\n",
3418                                err);
3419                        goto err;
3420                }
3421                adapter->state = __IAVF_INIT_GET_RESOURCES;
3422                goto restart;
3423        case __IAVF_INIT_GET_RESOURCES:
3424                /* aq msg sent, awaiting reply */
3425                if (!adapter->vf_res) {
3426                        bufsz = sizeof(struct virtchnl_vf_resource) +
3427                                (IAVF_MAX_VF_VSI *
3428                                 sizeof(struct virtchnl_vsi_resource));
3429                        adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
3430                        if (!adapter->vf_res)
3431                                goto err;
3432                }
3433                err = iavf_get_vf_config(adapter);
3434                if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
3435                        err = iavf_send_vf_config_msg(adapter);
3436                        goto err;
3437                } else if (err == I40E_ERR_PARAM) {
3438                        /* We only get ERR_PARAM if the device is in a very bad
3439                         * state or if we've been disabled for previous bad
3440                         * behavior. Either way, we're done now.
3441                         */
3442                        iavf_shutdown_adminq(hw);
3443                        dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
3444                        return;
3445                }
3446                if (err) {
3447                        dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
3448                                err);
3449                        goto err_alloc;
3450                }
3451                adapter->state = __IAVF_INIT_SW;
3452                break;
3453        default:
3454                goto err_alloc;
3455        }
3456
3457        if (iavf_process_config(adapter))
3458                goto err_alloc;
3459        adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3460
3461        adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
3462
3463        netdev->netdev_ops = &iavf_netdev_ops;
3464        iavf_set_ethtool_ops(netdev);
3465        netdev->watchdog_timeo = 5 * HZ;
3466
3467        /* MTU range: 68 - 9710 */
3468        netdev->min_mtu = ETH_MIN_MTU;
3469        netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
3470
3471        if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
3472                dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
3473                         adapter->hw.mac.addr);
3474                eth_hw_addr_random(netdev);
3475                ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
3476        } else {
3477                adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF;
3478                ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
3479                ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
3480        }
3481
3482        timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0);
3483        mod_timer(&adapter->watchdog_timer, jiffies + 1);
3484
3485        adapter->tx_desc_count = IAVF_DEFAULT_TXD;
3486        adapter->rx_desc_count = IAVF_DEFAULT_RXD;
3487        err = iavf_init_interrupt_scheme(adapter);
3488        if (err)
3489                goto err_sw_init;
3490        iavf_map_rings_to_vectors(adapter);
3491        if (adapter->vf_res->vf_cap_flags &
3492            VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
3493                adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
3494
3495        err = iavf_request_misc_irq(adapter);
3496        if (err)
3497                goto err_sw_init;
3498
3499        netif_carrier_off(netdev);
3500        adapter->link_up = false;
3501
3502        if (!adapter->netdev_registered) {
3503                err = register_netdev(netdev);
3504                if (err)
3505                        goto err_register;
3506        }
3507
3508        adapter->netdev_registered = true;
3509
3510        netif_tx_stop_all_queues(netdev);
3511        if (CLIENT_ALLOWED(adapter)) {
3512                err = iavf_lan_add_device(adapter);
3513                if (err)
3514                        dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
3515                                 err);
3516        }
3517
3518        dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
3519        if (netdev->features & NETIF_F_GRO)
3520                dev_info(&pdev->dev, "GRO is enabled\n");
3521
3522        adapter->state = __IAVF_DOWN;
3523        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3524        iavf_misc_irq_enable(adapter);
3525        wake_up(&adapter->down_waitqueue);
3526
3527        adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
3528        adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
3529        if (!adapter->rss_key || !adapter->rss_lut)
3530                goto err_mem;
3531
3532        if (RSS_AQ(adapter)) {
3533                adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3534                mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
3535        } else {
3536                iavf_init_rss(adapter);
3537        }
3538        return;
3539restart:
3540        schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
3541        return;
3542err_mem:
3543        iavf_free_rss(adapter);
3544err_register:
3545        iavf_free_misc_irq(adapter);
3546err_sw_init:
3547        iavf_reset_interrupt_capability(adapter);
3548err_alloc:
3549        kfree(adapter->vf_res);
3550        adapter->vf_res = NULL;
3551err:
3552        /* Things went into the weeds, so try again later */
3553        if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3554                dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
3555                adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3556                iavf_shutdown_adminq(hw);
3557                adapter->state = __IAVF_STARTUP;
3558                schedule_delayed_work(&adapter->init_task, HZ * 5);
3559                return;
3560        }
3561        schedule_delayed_work(&adapter->init_task, HZ);
3562}
3563
3564/**
3565 * iavf_shutdown - Shutdown the device in preparation for a reboot
3566 * @pdev: pci device structure
3567 **/
3568static void iavf_shutdown(struct pci_dev *pdev)
3569{
3570        struct net_device *netdev = pci_get_drvdata(pdev);
3571        struct iavf_adapter *adapter = netdev_priv(netdev);
3572
3573        netif_device_detach(netdev);
3574
3575        if (netif_running(netdev))
3576                iavf_close(netdev);
3577
3578        /* Prevent the watchdog from running. */
3579        adapter->state = __IAVF_REMOVE;
3580        adapter->aq_required = 0;
3581
3582#ifdef CONFIG_PM
3583        pci_save_state(pdev);
3584
3585#endif
3586        pci_disable_device(pdev);
3587}
3588
3589/**
3590 * iavf_probe - Device Initialization Routine
3591 * @pdev: PCI device information struct
3592 * @ent: entry in iavf_pci_tbl
3593 *
3594 * Returns 0 on success, negative on failure
3595 *
3596 * iavf_probe initializes an adapter identified by a pci_dev structure.
3597 * The OS initialization, configuring of the adapter private structure,
3598 * and a hardware reset occur.
3599 **/
3600static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3601{
3602        struct net_device *netdev;
3603        struct iavf_adapter *adapter = NULL;
3604        struct iavf_hw *hw = NULL;
3605        int err;
3606
3607        err = pci_enable_device(pdev);
3608        if (err)
3609                return err;
3610
3611        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3612        if (err) {
3613                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3614                if (err) {
3615                        dev_err(&pdev->dev,
3616                                "DMA configuration failed: 0x%x\n", err);
3617                        goto err_dma;
3618                }
3619        }
3620
3621        err = pci_request_regions(pdev, iavf_driver_name);
3622        if (err) {
3623                dev_err(&pdev->dev,
3624                        "pci_request_regions failed 0x%x\n", err);
3625                goto err_pci_reg;
3626        }
3627
3628        pci_enable_pcie_error_reporting(pdev);
3629
3630        pci_set_master(pdev);
3631
3632        netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3633                                   IAVF_MAX_REQ_QUEUES);
3634        if (!netdev) {
3635                err = -ENOMEM;
3636                goto err_alloc_etherdev;
3637        }
3638
3639        SET_NETDEV_DEV(netdev, &pdev->dev);
3640
3641        pci_set_drvdata(pdev, netdev);
3642        adapter = netdev_priv(netdev);
3643
3644        adapter->netdev = netdev;
3645        adapter->pdev = pdev;
3646
3647        hw = &adapter->hw;
3648        hw->back = adapter;
3649
3650        adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3651        adapter->state = __IAVF_STARTUP;
3652
3653        /* Call save state here because it relies on the adapter struct. */
3654        pci_save_state(pdev);
3655
3656        hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3657                              pci_resource_len(pdev, 0));
3658        if (!hw->hw_addr) {
3659                err = -EIO;
3660                goto err_ioremap;
3661        }
3662        hw->vendor_id = pdev->vendor;
3663        hw->device_id = pdev->device;
3664        pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3665        hw->subsystem_vendor_id = pdev->subsystem_vendor;
3666        hw->subsystem_device_id = pdev->subsystem_device;
3667        hw->bus.device = PCI_SLOT(pdev->devfn);
3668        hw->bus.func = PCI_FUNC(pdev->devfn);
3669        hw->bus.bus_id = pdev->bus->number;
3670
3671        /* set up the locks for the AQ, do this only once in probe
3672         * and destroy them only once in remove
3673         */
3674        mutex_init(&hw->aq.asq_mutex);
3675        mutex_init(&hw->aq.arq_mutex);
3676
3677        spin_lock_init(&adapter->mac_vlan_list_lock);
3678        spin_lock_init(&adapter->cloud_filter_list_lock);
3679
3680        INIT_LIST_HEAD(&adapter->mac_filter_list);
3681        INIT_LIST_HEAD(&adapter->vlan_filter_list);
3682        INIT_LIST_HEAD(&adapter->cloud_filter_list);
3683
3684        INIT_WORK(&adapter->reset_task, iavf_reset_task);
3685        INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3686        INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3687        INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3688        INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
3689        schedule_delayed_work(&adapter->init_task,
3690                              msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3691
3692        /* Setup the wait queue for indicating transition to down status */
3693        init_waitqueue_head(&adapter->down_waitqueue);
3694
3695        return 0;
3696
3697err_ioremap:
3698        free_netdev(netdev);
3699err_alloc_etherdev:
3700        pci_release_regions(pdev);
3701err_pci_reg:
3702err_dma:
3703        pci_disable_device(pdev);
3704        return err;
3705}
3706
3707#ifdef CONFIG_PM
3708/**
3709 * iavf_suspend - Power management suspend routine
3710 * @pdev: PCI device information struct
3711 * @state: unused
3712 *
3713 * Called when the system (VM) is entering sleep/suspend.
3714 **/
3715static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
3716{
3717        struct net_device *netdev = pci_get_drvdata(pdev);
3718        struct iavf_adapter *adapter = netdev_priv(netdev);
3719        int retval = 0;
3720
3721        netif_device_detach(netdev);
3722
3723        while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3724                                &adapter->crit_section))
3725                usleep_range(500, 1000);
3726
3727        if (netif_running(netdev)) {
3728                rtnl_lock();
3729                iavf_down(adapter);
3730                rtnl_unlock();
3731        }
3732        iavf_free_misc_irq(adapter);
3733        iavf_reset_interrupt_capability(adapter);
3734
3735        clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3736
3737        retval = pci_save_state(pdev);
3738        if (retval)
3739                return retval;
3740
3741        pci_disable_device(pdev);
3742
3743        return 0;
3744}
3745
3746/**
3747 * iavf_resume - Power management resume routine
3748 * @pdev: PCI device information struct
3749 *
3750 * Called when the system (VM) is resumed from sleep/suspend.
3751 **/
3752static int iavf_resume(struct pci_dev *pdev)
3753{
3754        struct iavf_adapter *adapter = pci_get_drvdata(pdev);
3755        struct net_device *netdev = adapter->netdev;
3756        u32 err;
3757
3758        pci_set_power_state(pdev, PCI_D0);
3759        pci_restore_state(pdev);
3760        /* pci_restore_state clears dev->state_saved so call
3761         * pci_save_state to restore it.
3762         */
3763        pci_save_state(pdev);
3764
3765        err = pci_enable_device_mem(pdev);
3766        if (err) {
3767                dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3768                return err;
3769        }
3770        pci_set_master(pdev);
3771
3772        rtnl_lock();
3773        err = iavf_set_interrupt_capability(adapter);
3774        if (err) {
3775                rtnl_unlock();
3776                dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3777                return err;
3778        }
3779        err = iavf_request_misc_irq(adapter);
3780        rtnl_unlock();
3781        if (err) {
3782                dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3783                return err;
3784        }
3785
3786        schedule_work(&adapter->reset_task);
3787
3788        netif_device_attach(netdev);
3789
3790        return err;
3791}
3792
3793#endif /* CONFIG_PM */
3794/**
3795 * iavf_remove - Device Removal Routine
3796 * @pdev: PCI device information struct
3797 *
3798 * iavf_remove is called by the PCI subsystem to alert the driver
3799 * that it should release a PCI device.  The could be caused by a
3800 * Hot-Plug event, or because the driver is going to be removed from
3801 * memory.
3802 **/
3803static void iavf_remove(struct pci_dev *pdev)
3804{
3805        struct net_device *netdev = pci_get_drvdata(pdev);
3806        struct iavf_adapter *adapter = netdev_priv(netdev);
3807        struct iavf_vlan_filter *vlf, *vlftmp;
3808        struct iavf_mac_filter *f, *ftmp;
3809        struct iavf_cloud_filter *cf, *cftmp;
3810        struct iavf_hw *hw = &adapter->hw;
3811        int err;
3812        /* Indicate we are in remove and not to run reset_task */
3813        set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
3814        cancel_delayed_work_sync(&adapter->init_task);
3815        cancel_work_sync(&adapter->reset_task);
3816        cancel_delayed_work_sync(&adapter->client_task);
3817        if (adapter->netdev_registered) {
3818                unregister_netdev(netdev);
3819                adapter->netdev_registered = false;
3820        }
3821        if (CLIENT_ALLOWED(adapter)) {
3822                err = iavf_lan_del_device(adapter);
3823                if (err)
3824                        dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3825                                 err);
3826        }
3827
3828        /* Shut down all the garbage mashers on the detention level */
3829        adapter->state = __IAVF_REMOVE;
3830        adapter->aq_required = 0;
3831        adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3832        iavf_request_reset(adapter);
3833        msleep(50);
3834        /* If the FW isn't responding, kick it once, but only once. */
3835        if (!iavf_asq_done(hw)) {
3836                iavf_request_reset(adapter);
3837                msleep(50);
3838        }
3839        iavf_free_all_tx_resources(adapter);
3840        iavf_free_all_rx_resources(adapter);
3841        iavf_misc_irq_disable(adapter);
3842        iavf_free_misc_irq(adapter);
3843        iavf_reset_interrupt_capability(adapter);
3844        iavf_free_q_vectors(adapter);
3845
3846        if (adapter->watchdog_timer.function)
3847                del_timer_sync(&adapter->watchdog_timer);
3848
3849        cancel_work_sync(&adapter->adminq_task);
3850
3851        iavf_free_rss(adapter);
3852
3853        if (hw->aq.asq.count)
3854                iavf_shutdown_adminq(hw);
3855
3856        /* destroy the locks only once, here */
3857        mutex_destroy(&hw->aq.arq_mutex);
3858        mutex_destroy(&hw->aq.asq_mutex);
3859
3860        iounmap(hw->hw_addr);
3861        pci_release_regions(pdev);
3862        iavf_free_all_tx_resources(adapter);
3863        iavf_free_all_rx_resources(adapter);
3864        iavf_free_queues(adapter);
3865        kfree(adapter->vf_res);
3866        spin_lock_bh(&adapter->mac_vlan_list_lock);
3867        /* If we got removed before an up/down sequence, we've got a filter
3868         * hanging out there that we need to get rid of.
3869         */
3870        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3871                list_del(&f->list);
3872                kfree(f);
3873        }
3874        list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3875                                 list) {
3876                list_del(&vlf->list);
3877                kfree(vlf);
3878        }
3879
3880        spin_unlock_bh(&adapter->mac_vlan_list_lock);
3881
3882        spin_lock_bh(&adapter->cloud_filter_list_lock);
3883        list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3884                list_del(&cf->list);
3885                kfree(cf);
3886        }
3887        spin_unlock_bh(&adapter->cloud_filter_list_lock);
3888
3889        free_netdev(netdev);
3890
3891        pci_disable_pcie_error_reporting(pdev);
3892
3893        pci_disable_device(pdev);
3894}
3895
3896static struct pci_driver iavf_driver = {
3897        .name     = iavf_driver_name,
3898        .id_table = iavf_pci_tbl,
3899        .probe    = iavf_probe,
3900        .remove   = iavf_remove,
3901#ifdef CONFIG_PM
3902        .suspend  = iavf_suspend,
3903        .resume   = iavf_resume,
3904#endif
3905        .shutdown = iavf_shutdown,
3906};
3907
3908/**
3909 * iavf_init_module - Driver Registration Routine
3910 *
3911 * iavf_init_module is the first routine called when the driver is
3912 * loaded. All it does is register with the PCI subsystem.
3913 **/
3914static int __init iavf_init_module(void)
3915{
3916        int ret;
3917
3918        pr_info("iavf: %s - version %s\n", iavf_driver_string,
3919                iavf_driver_version);
3920
3921        pr_info("%s\n", iavf_copyright);
3922
3923        iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3924                                  iavf_driver_name);
3925        if (!iavf_wq) {
3926                pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
3927                return -ENOMEM;
3928        }
3929        ret = pci_register_driver(&iavf_driver);
3930        return ret;
3931}
3932
3933module_init(iavf_init_module);
3934
3935/**
3936 * iavf_exit_module - Driver Exit Cleanup Routine
3937 *
3938 * iavf_exit_module is called just before the driver is removed
3939 * from memory.
3940 **/
3941static void __exit iavf_exit_module(void)
3942{
3943        pci_unregister_driver(&iavf_driver);
3944        destroy_workqueue(iavf_wq);
3945}
3946
3947module_exit(iavf_exit_module);
3948
3949/* iavf_main.c */
3950