linux/drivers/net/ethernet/intel/iavf/iavf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "iavf.h"
   5#include "iavf_prototype.h"
   6#include "iavf_client.h"
   7/* All iavf tracepoints are defined by the include below, which must
   8 * be included exactly once across the whole kernel with
   9 * CREATE_TRACE_POINTS defined
  10 */
  11#define CREATE_TRACE_POINTS
  12#include "iavf_trace.h"
  13
  14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
  15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
  16static int iavf_close(struct net_device *netdev);
  17static void iavf_init_get_resources(struct iavf_adapter *adapter);
  18static int iavf_check_reset_complete(struct iavf_hw *hw);
  19
  20char iavf_driver_name[] = "iavf";
  21static const char iavf_driver_string[] =
  22        "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
  23
  24static const char iavf_copyright[] =
  25        "Copyright (c) 2013 - 2018 Intel Corporation.";
  26
  27/* iavf_pci_tbl - PCI Device ID Table
  28 *
  29 * Wildcard entries (PCI_ANY_ID) should come last
  30 * Last entry must be all 0s
  31 *
  32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  33 *   Class, Class Mask, private data (not used) }
  34 */
  35static const struct pci_device_id iavf_pci_tbl[] = {
  36        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
  37        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
  38        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
  39        {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
  40        /* required last entry */
  41        {0, }
  42};
  43
  44MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
  45
  46MODULE_ALIAS("i40evf");
  47MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  48MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
  49MODULE_LICENSE("GPL v2");
  50
  51static const struct net_device_ops iavf_netdev_ops;
  52struct workqueue_struct *iavf_wq;
  53
  54int iavf_status_to_errno(enum iavf_status status)
  55{
  56        switch (status) {
  57        case IAVF_SUCCESS:
  58                return 0;
  59        case IAVF_ERR_PARAM:
  60        case IAVF_ERR_MAC_TYPE:
  61        case IAVF_ERR_INVALID_MAC_ADDR:
  62        case IAVF_ERR_INVALID_LINK_SETTINGS:
  63        case IAVF_ERR_INVALID_PD_ID:
  64        case IAVF_ERR_INVALID_QP_ID:
  65        case IAVF_ERR_INVALID_CQ_ID:
  66        case IAVF_ERR_INVALID_CEQ_ID:
  67        case IAVF_ERR_INVALID_AEQ_ID:
  68        case IAVF_ERR_INVALID_SIZE:
  69        case IAVF_ERR_INVALID_ARP_INDEX:
  70        case IAVF_ERR_INVALID_FPM_FUNC_ID:
  71        case IAVF_ERR_QP_INVALID_MSG_SIZE:
  72        case IAVF_ERR_INVALID_FRAG_COUNT:
  73        case IAVF_ERR_INVALID_ALIGNMENT:
  74        case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
  75        case IAVF_ERR_INVALID_IMM_DATA_SIZE:
  76        case IAVF_ERR_INVALID_VF_ID:
  77        case IAVF_ERR_INVALID_HMCFN_ID:
  78        case IAVF_ERR_INVALID_PBLE_INDEX:
  79        case IAVF_ERR_INVALID_SD_INDEX:
  80        case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
  81        case IAVF_ERR_INVALID_SD_TYPE:
  82        case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
  83        case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
  84        case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
  85                return -EINVAL;
  86        case IAVF_ERR_NVM:
  87        case IAVF_ERR_NVM_CHECKSUM:
  88        case IAVF_ERR_PHY:
  89        case IAVF_ERR_CONFIG:
  90        case IAVF_ERR_UNKNOWN_PHY:
  91        case IAVF_ERR_LINK_SETUP:
  92        case IAVF_ERR_ADAPTER_STOPPED:
  93        case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
  94        case IAVF_ERR_AUTONEG_NOT_COMPLETE:
  95        case IAVF_ERR_RESET_FAILED:
  96        case IAVF_ERR_BAD_PTR:
  97        case IAVF_ERR_SWFW_SYNC:
  98        case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
  99        case IAVF_ERR_QUEUE_EMPTY:
 100        case IAVF_ERR_FLUSHED_QUEUE:
 101        case IAVF_ERR_OPCODE_MISMATCH:
 102        case IAVF_ERR_CQP_COMPL_ERROR:
 103        case IAVF_ERR_BACKING_PAGE_ERROR:
 104        case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
 105        case IAVF_ERR_MEMCPY_FAILED:
 106        case IAVF_ERR_SRQ_ENABLED:
 107        case IAVF_ERR_ADMIN_QUEUE_ERROR:
 108        case IAVF_ERR_ADMIN_QUEUE_FULL:
 109        case IAVF_ERR_BAD_IWARP_CQE:
 110        case IAVF_ERR_NVM_BLANK_MODE:
 111        case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
 112        case IAVF_ERR_DIAG_TEST_FAILED:
 113        case IAVF_ERR_FIRMWARE_API_VERSION:
 114        case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
 115                return -EIO;
 116        case IAVF_ERR_DEVICE_NOT_SUPPORTED:
 117                return -ENODEV;
 118        case IAVF_ERR_NO_AVAILABLE_VSI:
 119        case IAVF_ERR_RING_FULL:
 120                return -ENOSPC;
 121        case IAVF_ERR_NO_MEMORY:
 122                return -ENOMEM;
 123        case IAVF_ERR_TIMEOUT:
 124        case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
 125                return -ETIMEDOUT;
 126        case IAVF_ERR_NOT_IMPLEMENTED:
 127        case IAVF_NOT_SUPPORTED:
 128                return -EOPNOTSUPP;
 129        case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
 130                return -EALREADY;
 131        case IAVF_ERR_NOT_READY:
 132                return -EBUSY;
 133        case IAVF_ERR_BUF_TOO_SHORT:
 134                return -EMSGSIZE;
 135        }
 136
 137        return -EIO;
 138}
 139
 140int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
 141{
 142        switch (v_status) {
 143        case VIRTCHNL_STATUS_SUCCESS:
 144                return 0;
 145        case VIRTCHNL_STATUS_ERR_PARAM:
 146        case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
 147                return -EINVAL;
 148        case VIRTCHNL_STATUS_ERR_NO_MEMORY:
 149                return -ENOMEM;
 150        case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
 151        case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
 152        case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
 153                return -EIO;
 154        case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
 155                return -EOPNOTSUPP;
 156        }
 157
 158        return -EIO;
 159}
 160
 161/**
 162 * iavf_pdev_to_adapter - go from pci_dev to adapter
 163 * @pdev: pci_dev pointer
 164 */
 165static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
 166{
 167        return netdev_priv(pci_get_drvdata(pdev));
 168}
 169
 170/**
 171 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
 172 * @hw:   pointer to the HW structure
 173 * @mem:  ptr to mem struct to fill out
 174 * @size: size of memory requested
 175 * @alignment: what to align the allocation to
 176 **/
 177enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
 178                                         struct iavf_dma_mem *mem,
 179                                         u64 size, u32 alignment)
 180{
 181        struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
 182
 183        if (!mem)
 184                return IAVF_ERR_PARAM;
 185
 186        mem->size = ALIGN(size, alignment);
 187        mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
 188                                     (dma_addr_t *)&mem->pa, GFP_KERNEL);
 189        if (mem->va)
 190                return 0;
 191        else
 192                return IAVF_ERR_NO_MEMORY;
 193}
 194
 195/**
 196 * iavf_free_dma_mem_d - OS specific memory free for shared code
 197 * @hw:   pointer to the HW structure
 198 * @mem:  ptr to mem struct to free
 199 **/
 200enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
 201                                     struct iavf_dma_mem *mem)
 202{
 203        struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
 204
 205        if (!mem || !mem->va)
 206                return IAVF_ERR_PARAM;
 207        dma_free_coherent(&adapter->pdev->dev, mem->size,
 208                          mem->va, (dma_addr_t)mem->pa);
 209        return 0;
 210}
 211
 212/**
 213 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
 214 * @hw:   pointer to the HW structure
 215 * @mem:  ptr to mem struct to fill out
 216 * @size: size of memory requested
 217 **/
 218enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
 219                                          struct iavf_virt_mem *mem, u32 size)
 220{
 221        if (!mem)
 222                return IAVF_ERR_PARAM;
 223
 224        mem->size = size;
 225        mem->va = kzalloc(size, GFP_KERNEL);
 226
 227        if (mem->va)
 228                return 0;
 229        else
 230                return IAVF_ERR_NO_MEMORY;
 231}
 232
 233/**
 234 * iavf_free_virt_mem_d - OS specific memory free for shared code
 235 * @hw:   pointer to the HW structure
 236 * @mem:  ptr to mem struct to free
 237 **/
 238enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
 239                                      struct iavf_virt_mem *mem)
 240{
 241        if (!mem)
 242                return IAVF_ERR_PARAM;
 243
 244        /* it's ok to kfree a NULL pointer */
 245        kfree(mem->va);
 246
 247        return 0;
 248}
 249
 250/**
 251 * iavf_lock_timeout - try to lock mutex but give up after timeout
 252 * @lock: mutex that should be locked
 253 * @msecs: timeout in msecs
 254 *
 255 * Returns 0 on success, negative on failure
 256 **/
 257int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
 258{
 259        unsigned int wait, delay = 10;
 260
 261        for (wait = 0; wait < msecs; wait += delay) {
 262                if (mutex_trylock(lock))
 263                        return 0;
 264
 265                msleep(delay);
 266        }
 267
 268        return -1;
 269}
 270
 271/**
 272 * iavf_schedule_reset - Set the flags and schedule a reset event
 273 * @adapter: board private structure
 274 **/
 275void iavf_schedule_reset(struct iavf_adapter *adapter)
 276{
 277        if (!(adapter->flags &
 278              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
 279                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
 280                queue_work(iavf_wq, &adapter->reset_task);
 281        }
 282}
 283
 284/**
 285 * iavf_schedule_request_stats - Set the flags and schedule statistics request
 286 * @adapter: board private structure
 287 *
 288 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
 289 * request and refresh ethtool stats
 290 **/
 291void iavf_schedule_request_stats(struct iavf_adapter *adapter)
 292{
 293        adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
 294        mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
 295}
 296
 297/**
 298 * iavf_tx_timeout - Respond to a Tx Hang
 299 * @netdev: network interface device structure
 300 * @txqueue: queue number that is timing out
 301 **/
 302static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
 303{
 304        struct iavf_adapter *adapter = netdev_priv(netdev);
 305
 306        adapter->tx_timeout_count++;
 307        iavf_schedule_reset(adapter);
 308}
 309
 310/**
 311 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
 312 * @adapter: board private structure
 313 **/
 314static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
 315{
 316        struct iavf_hw *hw = &adapter->hw;
 317
 318        if (!adapter->msix_entries)
 319                return;
 320
 321        wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
 322
 323        iavf_flush(hw);
 324
 325        synchronize_irq(adapter->msix_entries[0].vector);
 326}
 327
 328/**
 329 * iavf_misc_irq_enable - Enable default interrupt generation settings
 330 * @adapter: board private structure
 331 **/
 332static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
 333{
 334        struct iavf_hw *hw = &adapter->hw;
 335
 336        wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
 337                                       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
 338        wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
 339
 340        iavf_flush(hw);
 341}
 342
 343/**
 344 * iavf_irq_disable - Mask off interrupt generation on the NIC
 345 * @adapter: board private structure
 346 **/
 347static void iavf_irq_disable(struct iavf_adapter *adapter)
 348{
 349        int i;
 350        struct iavf_hw *hw = &adapter->hw;
 351
 352        if (!adapter->msix_entries)
 353                return;
 354
 355        for (i = 1; i < adapter->num_msix_vectors; i++) {
 356                wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
 357                synchronize_irq(adapter->msix_entries[i].vector);
 358        }
 359        iavf_flush(hw);
 360}
 361
 362/**
 363 * iavf_irq_enable_queues - Enable interrupt for specified queues
 364 * @adapter: board private structure
 365 * @mask: bitmap of queues to enable
 366 **/
 367void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
 368{
 369        struct iavf_hw *hw = &adapter->hw;
 370        int i;
 371
 372        for (i = 1; i < adapter->num_msix_vectors; i++) {
 373                if (mask & BIT(i - 1)) {
 374                        wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
 375                             IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
 376                             IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
 377                }
 378        }
 379}
 380
 381/**
 382 * iavf_irq_enable - Enable default interrupt generation settings
 383 * @adapter: board private structure
 384 * @flush: boolean value whether to run rd32()
 385 **/
 386void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
 387{
 388        struct iavf_hw *hw = &adapter->hw;
 389
 390        iavf_misc_irq_enable(adapter);
 391        iavf_irq_enable_queues(adapter, ~0);
 392
 393        if (flush)
 394                iavf_flush(hw);
 395}
 396
 397/**
 398 * iavf_msix_aq - Interrupt handler for vector 0
 399 * @irq: interrupt number
 400 * @data: pointer to netdev
 401 **/
 402static irqreturn_t iavf_msix_aq(int irq, void *data)
 403{
 404        struct net_device *netdev = data;
 405        struct iavf_adapter *adapter = netdev_priv(netdev);
 406        struct iavf_hw *hw = &adapter->hw;
 407
 408        /* handle non-queue interrupts, these reads clear the registers */
 409        rd32(hw, IAVF_VFINT_ICR01);
 410        rd32(hw, IAVF_VFINT_ICR0_ENA1);
 411
 412        if (adapter->state != __IAVF_REMOVE)
 413                /* schedule work on the private workqueue */
 414                queue_work(iavf_wq, &adapter->adminq_task);
 415
 416        return IRQ_HANDLED;
 417}
 418
 419/**
 420 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
 421 * @irq: interrupt number
 422 * @data: pointer to a q_vector
 423 **/
 424static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
 425{
 426        struct iavf_q_vector *q_vector = data;
 427
 428        if (!q_vector->tx.ring && !q_vector->rx.ring)
 429                return IRQ_HANDLED;
 430
 431        napi_schedule_irqoff(&q_vector->napi);
 432
 433        return IRQ_HANDLED;
 434}
 435
 436/**
 437 * iavf_map_vector_to_rxq - associate irqs with rx queues
 438 * @adapter: board private structure
 439 * @v_idx: interrupt number
 440 * @r_idx: queue number
 441 **/
 442static void
 443iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
 444{
 445        struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
 446        struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
 447        struct iavf_hw *hw = &adapter->hw;
 448
 449        rx_ring->q_vector = q_vector;
 450        rx_ring->next = q_vector->rx.ring;
 451        rx_ring->vsi = &adapter->vsi;
 452        q_vector->rx.ring = rx_ring;
 453        q_vector->rx.count++;
 454        q_vector->rx.next_update = jiffies + 1;
 455        q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
 456        q_vector->ring_mask |= BIT(r_idx);
 457        wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
 458             q_vector->rx.current_itr >> 1);
 459        q_vector->rx.current_itr = q_vector->rx.target_itr;
 460}
 461
 462/**
 463 * iavf_map_vector_to_txq - associate irqs with tx queues
 464 * @adapter: board private structure
 465 * @v_idx: interrupt number
 466 * @t_idx: queue number
 467 **/
 468static void
 469iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
 470{
 471        struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
 472        struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
 473        struct iavf_hw *hw = &adapter->hw;
 474
 475        tx_ring->q_vector = q_vector;
 476        tx_ring->next = q_vector->tx.ring;
 477        tx_ring->vsi = &adapter->vsi;
 478        q_vector->tx.ring = tx_ring;
 479        q_vector->tx.count++;
 480        q_vector->tx.next_update = jiffies + 1;
 481        q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
 482        q_vector->num_ringpairs++;
 483        wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
 484             q_vector->tx.target_itr >> 1);
 485        q_vector->tx.current_itr = q_vector->tx.target_itr;
 486}
 487
 488/**
 489 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
 490 * @adapter: board private structure to initialize
 491 *
 492 * This function maps descriptor rings to the queue-specific vectors
 493 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
 494 * one vector per ring/queue, but on a constrained vector budget, we
 495 * group the rings as "efficiently" as possible.  You would add new
 496 * mapping configurations in here.
 497 **/
 498static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
 499{
 500        int rings_remaining = adapter->num_active_queues;
 501        int ridx = 0, vidx = 0;
 502        int q_vectors;
 503
 504        q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 505
 506        for (; ridx < rings_remaining; ridx++) {
 507                iavf_map_vector_to_rxq(adapter, vidx, ridx);
 508                iavf_map_vector_to_txq(adapter, vidx, ridx);
 509
 510                /* In the case where we have more queues than vectors, continue
 511                 * round-robin on vectors until all queues are mapped.
 512                 */
 513                if (++vidx >= q_vectors)
 514                        vidx = 0;
 515        }
 516
 517        adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
 518}
 519
 520/**
 521 * iavf_irq_affinity_notify - Callback for affinity changes
 522 * @notify: context as to what irq was changed
 523 * @mask: the new affinity mask
 524 *
 525 * This is a callback function used by the irq_set_affinity_notifier function
 526 * so that we may register to receive changes to the irq affinity masks.
 527 **/
 528static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
 529                                     const cpumask_t *mask)
 530{
 531        struct iavf_q_vector *q_vector =
 532                container_of(notify, struct iavf_q_vector, affinity_notify);
 533
 534        cpumask_copy(&q_vector->affinity_mask, mask);
 535}
 536
 537/**
 538 * iavf_irq_affinity_release - Callback for affinity notifier release
 539 * @ref: internal core kernel usage
 540 *
 541 * This is a callback function used by the irq_set_affinity_notifier function
 542 * to inform the current notification subscriber that they will no longer
 543 * receive notifications.
 544 **/
 545static void iavf_irq_affinity_release(struct kref *ref) {}
 546
 547/**
 548 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
 549 * @adapter: board private structure
 550 * @basename: device basename
 551 *
 552 * Allocates MSI-X vectors for tx and rx handling, and requests
 553 * interrupts from the kernel.
 554 **/
 555static int
 556iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
 557{
 558        unsigned int vector, q_vectors;
 559        unsigned int rx_int_idx = 0, tx_int_idx = 0;
 560        int irq_num, err;
 561        int cpu;
 562
 563        iavf_irq_disable(adapter);
 564        /* Decrement for Other and TCP Timer vectors */
 565        q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 566
 567        for (vector = 0; vector < q_vectors; vector++) {
 568                struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
 569
 570                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
 571
 572                if (q_vector->tx.ring && q_vector->rx.ring) {
 573                        snprintf(q_vector->name, sizeof(q_vector->name),
 574                                 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
 575                        tx_int_idx++;
 576                } else if (q_vector->rx.ring) {
 577                        snprintf(q_vector->name, sizeof(q_vector->name),
 578                                 "iavf-%s-rx-%u", basename, rx_int_idx++);
 579                } else if (q_vector->tx.ring) {
 580                        snprintf(q_vector->name, sizeof(q_vector->name),
 581                                 "iavf-%s-tx-%u", basename, tx_int_idx++);
 582                } else {
 583                        /* skip this unused q_vector */
 584                        continue;
 585                }
 586                err = request_irq(irq_num,
 587                                  iavf_msix_clean_rings,
 588                                  0,
 589                                  q_vector->name,
 590                                  q_vector);
 591                if (err) {
 592                        dev_info(&adapter->pdev->dev,
 593                                 "Request_irq failed, error: %d\n", err);
 594                        goto free_queue_irqs;
 595                }
 596                /* register for affinity change notifications */
 597                q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
 598                q_vector->affinity_notify.release =
 599                                                   iavf_irq_affinity_release;
 600                irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
 601                /* Spread the IRQ affinity hints across online CPUs. Note that
 602                 * get_cpu_mask returns a mask with a permanent lifetime so
 603                 * it's safe to use as a hint for irq_update_affinity_hint.
 604                 */
 605                cpu = cpumask_local_spread(q_vector->v_idx, -1);
 606                irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
 607        }
 608
 609        return 0;
 610
 611free_queue_irqs:
 612        while (vector) {
 613                vector--;
 614                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
 615                irq_set_affinity_notifier(irq_num, NULL);
 616                irq_update_affinity_hint(irq_num, NULL);
 617                free_irq(irq_num, &adapter->q_vectors[vector]);
 618        }
 619        return err;
 620}
 621
 622/**
 623 * iavf_request_misc_irq - Initialize MSI-X interrupts
 624 * @adapter: board private structure
 625 *
 626 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
 627 * vector is only for the admin queue, and stays active even when the netdev
 628 * is closed.
 629 **/
 630static int iavf_request_misc_irq(struct iavf_adapter *adapter)
 631{
 632        struct net_device *netdev = adapter->netdev;
 633        int err;
 634
 635        snprintf(adapter->misc_vector_name,
 636                 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
 637                 dev_name(&adapter->pdev->dev));
 638        err = request_irq(adapter->msix_entries[0].vector,
 639                          &iavf_msix_aq, 0,
 640                          adapter->misc_vector_name, netdev);
 641        if (err) {
 642                dev_err(&adapter->pdev->dev,
 643                        "request_irq for %s failed: %d\n",
 644                        adapter->misc_vector_name, err);
 645                free_irq(adapter->msix_entries[0].vector, netdev);
 646        }
 647        return err;
 648}
 649
 650/**
 651 * iavf_free_traffic_irqs - Free MSI-X interrupts
 652 * @adapter: board private structure
 653 *
 654 * Frees all MSI-X vectors other than 0.
 655 **/
 656static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
 657{
 658        int vector, irq_num, q_vectors;
 659
 660        if (!adapter->msix_entries)
 661                return;
 662
 663        q_vectors = adapter->num_msix_vectors - NONQ_VECS;
 664
 665        for (vector = 0; vector < q_vectors; vector++) {
 666                irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
 667                irq_set_affinity_notifier(irq_num, NULL);
 668                irq_update_affinity_hint(irq_num, NULL);
 669                free_irq(irq_num, &adapter->q_vectors[vector]);
 670        }
 671}
 672
 673/**
 674 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
 675 * @adapter: board private structure
 676 *
 677 * Frees MSI-X vector 0.
 678 **/
 679static void iavf_free_misc_irq(struct iavf_adapter *adapter)
 680{
 681        struct net_device *netdev = adapter->netdev;
 682
 683        if (!adapter->msix_entries)
 684                return;
 685
 686        free_irq(adapter->msix_entries[0].vector, netdev);
 687}
 688
 689/**
 690 * iavf_configure_tx - Configure Transmit Unit after Reset
 691 * @adapter: board private structure
 692 *
 693 * Configure the Tx unit of the MAC after a reset.
 694 **/
 695static void iavf_configure_tx(struct iavf_adapter *adapter)
 696{
 697        struct iavf_hw *hw = &adapter->hw;
 698        int i;
 699
 700        for (i = 0; i < adapter->num_active_queues; i++)
 701                adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
 702}
 703
 704/**
 705 * iavf_configure_rx - Configure Receive Unit after Reset
 706 * @adapter: board private structure
 707 *
 708 * Configure the Rx unit of the MAC after a reset.
 709 **/
 710static void iavf_configure_rx(struct iavf_adapter *adapter)
 711{
 712        unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
 713        struct iavf_hw *hw = &adapter->hw;
 714        int i;
 715
 716        /* Legacy Rx will always default to a 2048 buffer size. */
 717#if (PAGE_SIZE < 8192)
 718        if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
 719                struct net_device *netdev = adapter->netdev;
 720
 721                /* For jumbo frames on systems with 4K pages we have to use
 722                 * an order 1 page, so we might as well increase the size
 723                 * of our Rx buffer to make better use of the available space
 724                 */
 725                rx_buf_len = IAVF_RXBUFFER_3072;
 726
 727                /* We use a 1536 buffer size for configurations with
 728                 * standard Ethernet mtu.  On x86 this gives us enough room
 729                 * for shared info and 192 bytes of padding.
 730                 */
 731                if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
 732                    (netdev->mtu <= ETH_DATA_LEN))
 733                        rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
 734        }
 735#endif
 736
 737        for (i = 0; i < adapter->num_active_queues; i++) {
 738                adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
 739                adapter->rx_rings[i].rx_buf_len = rx_buf_len;
 740
 741                if (adapter->flags & IAVF_FLAG_LEGACY_RX)
 742                        clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
 743                else
 744                        set_ring_build_skb_enabled(&adapter->rx_rings[i]);
 745        }
 746}
 747
 748/**
 749 * iavf_find_vlan - Search filter list for specific vlan filter
 750 * @adapter: board private structure
 751 * @vlan: vlan tag
 752 *
 753 * Returns ptr to the filter object or NULL. Must be called while holding the
 754 * mac_vlan_list_lock.
 755 **/
 756static struct
 757iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
 758                                 struct iavf_vlan vlan)
 759{
 760        struct iavf_vlan_filter *f;
 761
 762        list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 763                if (f->vlan.vid == vlan.vid &&
 764                    f->vlan.tpid == vlan.tpid)
 765                        return f;
 766        }
 767
 768        return NULL;
 769}
 770
 771/**
 772 * iavf_add_vlan - Add a vlan filter to the list
 773 * @adapter: board private structure
 774 * @vlan: VLAN tag
 775 *
 776 * Returns ptr to the filter object or NULL when no memory available.
 777 **/
 778static struct
 779iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
 780                                struct iavf_vlan vlan)
 781{
 782        struct iavf_vlan_filter *f = NULL;
 783
 784        spin_lock_bh(&adapter->mac_vlan_list_lock);
 785
 786        f = iavf_find_vlan(adapter, vlan);
 787        if (!f) {
 788                f = kzalloc(sizeof(*f), GFP_ATOMIC);
 789                if (!f)
 790                        goto clearout;
 791
 792                f->vlan = vlan;
 793
 794                list_add_tail(&f->list, &adapter->vlan_filter_list);
 795                f->add = true;
 796                adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
 797        }
 798
 799clearout:
 800        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 801        return f;
 802}
 803
 804/**
 805 * iavf_del_vlan - Remove a vlan filter from the list
 806 * @adapter: board private structure
 807 * @vlan: VLAN tag
 808 **/
 809static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
 810{
 811        struct iavf_vlan_filter *f;
 812
 813        spin_lock_bh(&adapter->mac_vlan_list_lock);
 814
 815        f = iavf_find_vlan(adapter, vlan);
 816        if (f) {
 817                f->remove = true;
 818                adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
 819        }
 820
 821        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 822}
 823
 824/**
 825 * iavf_restore_filters
 826 * @adapter: board private structure
 827 *
 828 * Restore existing non MAC filters when VF netdev comes back up
 829 **/
 830static void iavf_restore_filters(struct iavf_adapter *adapter)
 831{
 832        u16 vid;
 833
 834        /* re-add all VLAN filters */
 835        for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
 836                iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
 837
 838        for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
 839                iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
 840}
 841
 842/**
 843 * iavf_get_num_vlans_added - get number of VLANs added
 844 * @adapter: board private structure
 845 */
 846u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
 847{
 848        return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
 849                bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
 850}
 851
 852/**
 853 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
 854 * @adapter: board private structure
 855 *
 856 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
 857 * do not impose a limit as that maintains current behavior and for
 858 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
 859 **/
 860static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
 861{
 862        /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
 863         * never been a limit on the VF driver side
 864         */
 865        if (VLAN_ALLOWED(adapter))
 866                return VLAN_N_VID;
 867        else if (VLAN_V2_ALLOWED(adapter))
 868                return adapter->vlan_v2_caps.filtering.max_filters;
 869
 870        return 0;
 871}
 872
 873/**
 874 * iavf_max_vlans_added - check if maximum VLANs allowed already exist
 875 * @adapter: board private structure
 876 **/
 877static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
 878{
 879        if (iavf_get_num_vlans_added(adapter) <
 880            iavf_get_max_vlans_allowed(adapter))
 881                return false;
 882
 883        return true;
 884}
 885
 886/**
 887 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
 888 * @netdev: network device struct
 889 * @proto: unused protocol data
 890 * @vid: VLAN tag
 891 **/
 892static int iavf_vlan_rx_add_vid(struct net_device *netdev,
 893                                __always_unused __be16 proto, u16 vid)
 894{
 895        struct iavf_adapter *adapter = netdev_priv(netdev);
 896
 897        if (!VLAN_FILTERING_ALLOWED(adapter))
 898                return -EIO;
 899
 900        if (iavf_max_vlans_added(adapter)) {
 901                netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
 902                           iavf_get_max_vlans_allowed(adapter));
 903                return -EIO;
 904        }
 905
 906        if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
 907                return -ENOMEM;
 908
 909        return 0;
 910}
 911
 912/**
 913 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
 914 * @netdev: network device struct
 915 * @proto: unused protocol data
 916 * @vid: VLAN tag
 917 **/
 918static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
 919                                 __always_unused __be16 proto, u16 vid)
 920{
 921        struct iavf_adapter *adapter = netdev_priv(netdev);
 922
 923        iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
 924        if (proto == cpu_to_be16(ETH_P_8021Q))
 925                clear_bit(vid, adapter->vsi.active_cvlans);
 926        else
 927                clear_bit(vid, adapter->vsi.active_svlans);
 928
 929        return 0;
 930}
 931
 932/**
 933 * iavf_find_filter - Search filter list for specific mac filter
 934 * @adapter: board private structure
 935 * @macaddr: the MAC address
 936 *
 937 * Returns ptr to the filter object or NULL. Must be called while holding the
 938 * mac_vlan_list_lock.
 939 **/
 940static struct
 941iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
 942                                  const u8 *macaddr)
 943{
 944        struct iavf_mac_filter *f;
 945
 946        if (!macaddr)
 947                return NULL;
 948
 949        list_for_each_entry(f, &adapter->mac_filter_list, list) {
 950                if (ether_addr_equal(macaddr, f->macaddr))
 951                        return f;
 952        }
 953        return NULL;
 954}
 955
 956/**
 957 * iavf_add_filter - Add a mac filter to the filter list
 958 * @adapter: board private structure
 959 * @macaddr: the MAC address
 960 *
 961 * Returns ptr to the filter object or NULL when no memory available.
 962 **/
 963struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
 964                                        const u8 *macaddr)
 965{
 966        struct iavf_mac_filter *f;
 967
 968        if (!macaddr)
 969                return NULL;
 970
 971        f = iavf_find_filter(adapter, macaddr);
 972        if (!f) {
 973                f = kzalloc(sizeof(*f), GFP_ATOMIC);
 974                if (!f)
 975                        return f;
 976
 977                ether_addr_copy(f->macaddr, macaddr);
 978
 979                list_add_tail(&f->list, &adapter->mac_filter_list);
 980                f->add = true;
 981                f->is_new_mac = true;
 982                f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
 983                adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
 984        } else {
 985                f->remove = false;
 986        }
 987
 988        return f;
 989}
 990
 991/**
 992 * iavf_set_mac - NDO callback to set port mac address
 993 * @netdev: network interface device structure
 994 * @p: pointer to an address structure
 995 *
 996 * Returns 0 on success, negative on failure
 997 **/
 998static int iavf_set_mac(struct net_device *netdev, void *p)
 999{
1000        struct iavf_adapter *adapter = netdev_priv(netdev);
1001        struct iavf_hw *hw = &adapter->hw;
1002        struct iavf_mac_filter *f;
1003        struct sockaddr *addr = p;
1004
1005        if (!is_valid_ether_addr(addr->sa_data))
1006                return -EADDRNOTAVAIL;
1007
1008        if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1009                return 0;
1010
1011        spin_lock_bh(&adapter->mac_vlan_list_lock);
1012
1013        f = iavf_find_filter(adapter, hw->mac.addr);
1014        if (f) {
1015                f->remove = true;
1016                f->is_primary = true;
1017                adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1018        }
1019
1020        f = iavf_add_filter(adapter, addr->sa_data);
1021        if (f) {
1022                f->is_primary = true;
1023                ether_addr_copy(hw->mac.addr, addr->sa_data);
1024        }
1025
1026        spin_unlock_bh(&adapter->mac_vlan_list_lock);
1027
1028        /* schedule the watchdog task to immediately process the request */
1029        if (f)
1030                queue_work(iavf_wq, &adapter->watchdog_task.work);
1031
1032        return (f == NULL) ? -ENOMEM : 0;
1033}
1034
1035/**
1036 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1037 * @netdev: the netdevice
1038 * @addr: address to add
1039 *
1040 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1041 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1042 */
1043static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1044{
1045        struct iavf_adapter *adapter = netdev_priv(netdev);
1046
1047        if (iavf_add_filter(adapter, addr))
1048                return 0;
1049        else
1050                return -ENOMEM;
1051}
1052
1053/**
1054 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1055 * @netdev: the netdevice
1056 * @addr: address to add
1057 *
1058 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1059 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1060 */
1061static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1062{
1063        struct iavf_adapter *adapter = netdev_priv(netdev);
1064        struct iavf_mac_filter *f;
1065
1066        /* Under some circumstances, we might receive a request to delete
1067         * our own device address from our uc list. Because we store the
1068         * device address in the VSI's MAC/VLAN filter list, we need to ignore
1069         * such requests and not delete our device address from this list.
1070         */
1071        if (ether_addr_equal(addr, netdev->dev_addr))
1072                return 0;
1073
1074        f = iavf_find_filter(adapter, addr);
1075        if (f) {
1076                f->remove = true;
1077                adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1078        }
1079        return 0;
1080}
1081
1082/**
1083 * iavf_set_rx_mode - NDO callback to set the netdev filters
1084 * @netdev: network interface device structure
1085 **/
1086static void iavf_set_rx_mode(struct net_device *netdev)
1087{
1088        struct iavf_adapter *adapter = netdev_priv(netdev);
1089
1090        spin_lock_bh(&adapter->mac_vlan_list_lock);
1091        __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1092        __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1093        spin_unlock_bh(&adapter->mac_vlan_list_lock);
1094
1095        if (netdev->flags & IFF_PROMISC &&
1096            !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1097                adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1098        else if (!(netdev->flags & IFF_PROMISC) &&
1099                 adapter->flags & IAVF_FLAG_PROMISC_ON)
1100                adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1101
1102        if (netdev->flags & IFF_ALLMULTI &&
1103            !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1104                adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1105        else if (!(netdev->flags & IFF_ALLMULTI) &&
1106                 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1107                adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1108}
1109
1110/**
1111 * iavf_napi_enable_all - enable NAPI on all queue vectors
1112 * @adapter: board private structure
1113 **/
1114static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1115{
1116        int q_idx;
1117        struct iavf_q_vector *q_vector;
1118        int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1119
1120        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1121                struct napi_struct *napi;
1122
1123                q_vector = &adapter->q_vectors[q_idx];
1124                napi = &q_vector->napi;
1125                napi_enable(napi);
1126        }
1127}
1128
1129/**
1130 * iavf_napi_disable_all - disable NAPI on all queue vectors
1131 * @adapter: board private structure
1132 **/
1133static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1134{
1135        int q_idx;
1136        struct iavf_q_vector *q_vector;
1137        int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1138
1139        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1140                q_vector = &adapter->q_vectors[q_idx];
1141                napi_disable(&q_vector->napi);
1142        }
1143}
1144
1145/**
1146 * iavf_configure - set up transmit and receive data structures
1147 * @adapter: board private structure
1148 **/
1149static void iavf_configure(struct iavf_adapter *adapter)
1150{
1151        struct net_device *netdev = adapter->netdev;
1152        int i;
1153
1154        iavf_set_rx_mode(netdev);
1155
1156        iavf_configure_tx(adapter);
1157        iavf_configure_rx(adapter);
1158        adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1159
1160        for (i = 0; i < adapter->num_active_queues; i++) {
1161                struct iavf_ring *ring = &adapter->rx_rings[i];
1162
1163                iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1164        }
1165}
1166
1167/**
1168 * iavf_up_complete - Finish the last steps of bringing up a connection
1169 * @adapter: board private structure
1170 *
1171 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1172 **/
1173static void iavf_up_complete(struct iavf_adapter *adapter)
1174{
1175        iavf_change_state(adapter, __IAVF_RUNNING);
1176        clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1177
1178        iavf_napi_enable_all(adapter);
1179
1180        adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1181        if (CLIENT_ENABLED(adapter))
1182                adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1183        mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1184}
1185
1186/**
1187 * iavf_down - Shutdown the connection processing
1188 * @adapter: board private structure
1189 *
1190 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1191 **/
1192void iavf_down(struct iavf_adapter *adapter)
1193{
1194        struct net_device *netdev = adapter->netdev;
1195        struct iavf_vlan_filter *vlf;
1196        struct iavf_cloud_filter *cf;
1197        struct iavf_fdir_fltr *fdir;
1198        struct iavf_mac_filter *f;
1199        struct iavf_adv_rss *rss;
1200
1201        if (adapter->state <= __IAVF_DOWN_PENDING)
1202                return;
1203
1204        netif_carrier_off(netdev);
1205        netif_tx_disable(netdev);
1206        adapter->link_up = false;
1207        iavf_napi_disable_all(adapter);
1208        iavf_irq_disable(adapter);
1209
1210        spin_lock_bh(&adapter->mac_vlan_list_lock);
1211
1212        /* clear the sync flag on all filters */
1213        __dev_uc_unsync(adapter->netdev, NULL);
1214        __dev_mc_unsync(adapter->netdev, NULL);
1215
1216        /* remove all MAC filters */
1217        list_for_each_entry(f, &adapter->mac_filter_list, list) {
1218                f->remove = true;
1219        }
1220
1221        /* remove all VLAN filters */
1222        list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1223                vlf->remove = true;
1224        }
1225
1226        spin_unlock_bh(&adapter->mac_vlan_list_lock);
1227
1228        /* remove all cloud filters */
1229        spin_lock_bh(&adapter->cloud_filter_list_lock);
1230        list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1231                cf->del = true;
1232        }
1233        spin_unlock_bh(&adapter->cloud_filter_list_lock);
1234
1235        /* remove all Flow Director filters */
1236        spin_lock_bh(&adapter->fdir_fltr_lock);
1237        list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1238                fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1239        }
1240        spin_unlock_bh(&adapter->fdir_fltr_lock);
1241
1242        /* remove all advance RSS configuration */
1243        spin_lock_bh(&adapter->adv_rss_lock);
1244        list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
1245                rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1246        spin_unlock_bh(&adapter->adv_rss_lock);
1247
1248        if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1249                /* cancel any current operation */
1250                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1251                /* Schedule operations to close down the HW. Don't wait
1252                 * here for this to complete. The watchdog is still running
1253                 * and it will take care of this.
1254                 */
1255                adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1256                adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1257                adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1258                adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1259                adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1260                adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1261        }
1262
1263        mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1264}
1265
1266/**
1267 * iavf_acquire_msix_vectors - Setup the MSIX capability
1268 * @adapter: board private structure
1269 * @vectors: number of vectors to request
1270 *
1271 * Work with the OS to set up the MSIX vectors needed.
1272 *
1273 * Returns 0 on success, negative on failure
1274 **/
1275static int
1276iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1277{
1278        int err, vector_threshold;
1279
1280        /* We'll want at least 3 (vector_threshold):
1281         * 0) Other (Admin Queue and link, mostly)
1282         * 1) TxQ[0] Cleanup
1283         * 2) RxQ[0] Cleanup
1284         */
1285        vector_threshold = MIN_MSIX_COUNT;
1286
1287        /* The more we get, the more we will assign to Tx/Rx Cleanup
1288         * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1289         * Right now, we simply care about how many we'll get; we'll
1290         * set them up later while requesting irq's.
1291         */
1292        err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1293                                    vector_threshold, vectors);
1294        if (err < 0) {
1295                dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1296                kfree(adapter->msix_entries);
1297                adapter->msix_entries = NULL;
1298                return err;
1299        }
1300
1301        /* Adjust for only the vectors we'll use, which is minimum
1302         * of max_msix_q_vectors + NONQ_VECS, or the number of
1303         * vectors we were allocated.
1304         */
1305        adapter->num_msix_vectors = err;
1306        return 0;
1307}
1308
1309/**
1310 * iavf_free_queues - Free memory for all rings
1311 * @adapter: board private structure to initialize
1312 *
1313 * Free all of the memory associated with queue pairs.
1314 **/
1315static void iavf_free_queues(struct iavf_adapter *adapter)
1316{
1317        if (!adapter->vsi_res)
1318                return;
1319        adapter->num_active_queues = 0;
1320        kfree(adapter->tx_rings);
1321        adapter->tx_rings = NULL;
1322        kfree(adapter->rx_rings);
1323        adapter->rx_rings = NULL;
1324}
1325
1326/**
1327 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1328 * @adapter: board private structure
1329 *
1330 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1331 * stripped in certain descriptor fields. Instead of checking the offload
1332 * capability bits in the hot path, cache the location the ring specific
1333 * flags.
1334 */
1335void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1336{
1337        int i;
1338
1339        for (i = 0; i < adapter->num_active_queues; i++) {
1340                struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1341                struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1342
1343                /* prevent multiple L2TAG bits being set after VFR */
1344                tx_ring->flags &=
1345                        ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1346                          IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1347                rx_ring->flags &=
1348                        ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1349                          IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1350
1351                if (VLAN_ALLOWED(adapter)) {
1352                        tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1353                        rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1354                } else if (VLAN_V2_ALLOWED(adapter)) {
1355                        struct virtchnl_vlan_supported_caps *stripping_support;
1356                        struct virtchnl_vlan_supported_caps *insertion_support;
1357
1358                        stripping_support =
1359                                &adapter->vlan_v2_caps.offloads.stripping_support;
1360                        insertion_support =
1361                                &adapter->vlan_v2_caps.offloads.insertion_support;
1362
1363                        if (stripping_support->outer) {
1364                                if (stripping_support->outer &
1365                                    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1366                                        rx_ring->flags |=
1367                                                IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1368                                else if (stripping_support->outer &
1369                                         VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1370                                        rx_ring->flags |=
1371                                                IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1372                        } else if (stripping_support->inner) {
1373                                if (stripping_support->inner &
1374                                    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1375                                        rx_ring->flags |=
1376                                                IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1377                                else if (stripping_support->inner &
1378                                         VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1379                                        rx_ring->flags |=
1380                                                IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1381                        }
1382
1383                        if (insertion_support->outer) {
1384                                if (insertion_support->outer &
1385                                    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1386                                        tx_ring->flags |=
1387                                                IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1388                                else if (insertion_support->outer &
1389                                         VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1390                                        tx_ring->flags |=
1391                                                IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1392                        } else if (insertion_support->inner) {
1393                                if (insertion_support->inner &
1394                                    VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1395                                        tx_ring->flags |=
1396                                                IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1397                                else if (insertion_support->inner &
1398                                         VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1399                                        tx_ring->flags |=
1400                                                IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1401                        }
1402                }
1403        }
1404}
1405
1406/**
1407 * iavf_alloc_queues - Allocate memory for all rings
1408 * @adapter: board private structure to initialize
1409 *
1410 * We allocate one ring per queue at run-time since we don't know the
1411 * number of queues at compile-time.  The polling_netdev array is
1412 * intended for Multiqueue, but should work fine with a single queue.
1413 **/
1414static int iavf_alloc_queues(struct iavf_adapter *adapter)
1415{
1416        int i, num_active_queues;
1417
1418        /* If we're in reset reallocating queues we don't actually know yet for
1419         * certain the PF gave us the number of queues we asked for but we'll
1420         * assume it did.  Once basic reset is finished we'll confirm once we
1421         * start negotiating config with PF.
1422         */
1423        if (adapter->num_req_queues)
1424                num_active_queues = adapter->num_req_queues;
1425        else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1426                 adapter->num_tc)
1427                num_active_queues = adapter->ch_config.total_qps;
1428        else
1429                num_active_queues = min_t(int,
1430                                          adapter->vsi_res->num_queue_pairs,
1431                                          (int)(num_online_cpus()));
1432
1433
1434        adapter->tx_rings = kcalloc(num_active_queues,
1435                                    sizeof(struct iavf_ring), GFP_KERNEL);
1436        if (!adapter->tx_rings)
1437                goto err_out;
1438        adapter->rx_rings = kcalloc(num_active_queues,
1439                                    sizeof(struct iavf_ring), GFP_KERNEL);
1440        if (!adapter->rx_rings)
1441                goto err_out;
1442
1443        for (i = 0; i < num_active_queues; i++) {
1444                struct iavf_ring *tx_ring;
1445                struct iavf_ring *rx_ring;
1446
1447                tx_ring = &adapter->tx_rings[i];
1448
1449                tx_ring->queue_index = i;
1450                tx_ring->netdev = adapter->netdev;
1451                tx_ring->dev = &adapter->pdev->dev;
1452                tx_ring->count = adapter->tx_desc_count;
1453                tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1454                if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1455                        tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1456
1457                rx_ring = &adapter->rx_rings[i];
1458                rx_ring->queue_index = i;
1459                rx_ring->netdev = adapter->netdev;
1460                rx_ring->dev = &adapter->pdev->dev;
1461                rx_ring->count = adapter->rx_desc_count;
1462                rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1463        }
1464
1465        adapter->num_active_queues = num_active_queues;
1466
1467        iavf_set_queue_vlan_tag_loc(adapter);
1468
1469        return 0;
1470
1471err_out:
1472        iavf_free_queues(adapter);
1473        return -ENOMEM;
1474}
1475
1476/**
1477 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1478 * @adapter: board private structure to initialize
1479 *
1480 * Attempt to configure the interrupts using the best available
1481 * capabilities of the hardware and the kernel.
1482 **/
1483static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1484{
1485        int vector, v_budget;
1486        int pairs = 0;
1487        int err = 0;
1488
1489        if (!adapter->vsi_res) {
1490                err = -EIO;
1491                goto out;
1492        }
1493        pairs = adapter->num_active_queues;
1494
1495        /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1496         * us much good if we have more vectors than CPUs. However, we already
1497         * limit the total number of queues by the number of CPUs so we do not
1498         * need any further limiting here.
1499         */
1500        v_budget = min_t(int, pairs + NONQ_VECS,
1501                         (int)adapter->vf_res->max_vectors);
1502
1503        adapter->msix_entries = kcalloc(v_budget,
1504                                        sizeof(struct msix_entry), GFP_KERNEL);
1505        if (!adapter->msix_entries) {
1506                err = -ENOMEM;
1507                goto out;
1508        }
1509
1510        for (vector = 0; vector < v_budget; vector++)
1511                adapter->msix_entries[vector].entry = vector;
1512
1513        err = iavf_acquire_msix_vectors(adapter, v_budget);
1514
1515out:
1516        netif_set_real_num_rx_queues(adapter->netdev, pairs);
1517        netif_set_real_num_tx_queues(adapter->netdev, pairs);
1518        return err;
1519}
1520
1521/**
1522 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1523 * @adapter: board private structure
1524 *
1525 * Return 0 on success, negative on failure
1526 **/
1527static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1528{
1529        struct iavf_aqc_get_set_rss_key_data *rss_key =
1530                (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1531        struct iavf_hw *hw = &adapter->hw;
1532        enum iavf_status status;
1533
1534        if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1535                /* bail because we already have a command pending */
1536                dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1537                        adapter->current_op);
1538                return -EBUSY;
1539        }
1540
1541        status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1542        if (status) {
1543                dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1544                        iavf_stat_str(hw, status),
1545                        iavf_aq_str(hw, hw->aq.asq_last_status));
1546                return iavf_status_to_errno(status);
1547
1548        }
1549
1550        status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1551                                     adapter->rss_lut, adapter->rss_lut_size);
1552        if (status) {
1553                dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1554                        iavf_stat_str(hw, status),
1555                        iavf_aq_str(hw, hw->aq.asq_last_status));
1556                return iavf_status_to_errno(status);
1557        }
1558
1559        return 0;
1560
1561}
1562
1563/**
1564 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1565 * @adapter: board private structure
1566 *
1567 * Returns 0 on success, negative on failure
1568 **/
1569static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1570{
1571        struct iavf_hw *hw = &adapter->hw;
1572        u32 *dw;
1573        u16 i;
1574
1575        dw = (u32 *)adapter->rss_key;
1576        for (i = 0; i <= adapter->rss_key_size / 4; i++)
1577                wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1578
1579        dw = (u32 *)adapter->rss_lut;
1580        for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1581                wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1582
1583        iavf_flush(hw);
1584
1585        return 0;
1586}
1587
1588/**
1589 * iavf_config_rss - Configure RSS keys and lut
1590 * @adapter: board private structure
1591 *
1592 * Returns 0 on success, negative on failure
1593 **/
1594int iavf_config_rss(struct iavf_adapter *adapter)
1595{
1596
1597        if (RSS_PF(adapter)) {
1598                adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1599                                        IAVF_FLAG_AQ_SET_RSS_KEY;
1600                return 0;
1601        } else if (RSS_AQ(adapter)) {
1602                return iavf_config_rss_aq(adapter);
1603        } else {
1604                return iavf_config_rss_reg(adapter);
1605        }
1606}
1607
1608/**
1609 * iavf_fill_rss_lut - Fill the lut with default values
1610 * @adapter: board private structure
1611 **/
1612static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1613{
1614        u16 i;
1615
1616        for (i = 0; i < adapter->rss_lut_size; i++)
1617                adapter->rss_lut[i] = i % adapter->num_active_queues;
1618}
1619
1620/**
1621 * iavf_init_rss - Prepare for RSS
1622 * @adapter: board private structure
1623 *
1624 * Return 0 on success, negative on failure
1625 **/
1626static int iavf_init_rss(struct iavf_adapter *adapter)
1627{
1628        struct iavf_hw *hw = &adapter->hw;
1629
1630        if (!RSS_PF(adapter)) {
1631                /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1632                if (adapter->vf_res->vf_cap_flags &
1633                    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1634                        adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1635                else
1636                        adapter->hena = IAVF_DEFAULT_RSS_HENA;
1637
1638                wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1639                wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1640        }
1641
1642        iavf_fill_rss_lut(adapter);
1643        netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1644
1645        return iavf_config_rss(adapter);
1646}
1647
1648/**
1649 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1650 * @adapter: board private structure to initialize
1651 *
1652 * We allocate one q_vector per queue interrupt.  If allocation fails we
1653 * return -ENOMEM.
1654 **/
1655static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1656{
1657        int q_idx = 0, num_q_vectors;
1658        struct iavf_q_vector *q_vector;
1659
1660        num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1661        adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1662                                     GFP_KERNEL);
1663        if (!adapter->q_vectors)
1664                return -ENOMEM;
1665
1666        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1667                q_vector = &adapter->q_vectors[q_idx];
1668                q_vector->adapter = adapter;
1669                q_vector->vsi = &adapter->vsi;
1670                q_vector->v_idx = q_idx;
1671                q_vector->reg_idx = q_idx;
1672                cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1673                netif_napi_add(adapter->netdev, &q_vector->napi,
1674                               iavf_napi_poll, NAPI_POLL_WEIGHT);
1675        }
1676
1677        return 0;
1678}
1679
1680/**
1681 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1682 * @adapter: board private structure to initialize
1683 *
1684 * This function frees the memory allocated to the q_vectors.  In addition if
1685 * NAPI is enabled it will delete any references to the NAPI struct prior
1686 * to freeing the q_vector.
1687 **/
1688static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1689{
1690        int q_idx, num_q_vectors;
1691        int napi_vectors;
1692
1693        if (!adapter->q_vectors)
1694                return;
1695
1696        num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1697        napi_vectors = adapter->num_active_queues;
1698
1699        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1700                struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1701
1702                if (q_idx < napi_vectors)
1703                        netif_napi_del(&q_vector->napi);
1704        }
1705        kfree(adapter->q_vectors);
1706        adapter->q_vectors = NULL;
1707}
1708
1709/**
1710 * iavf_reset_interrupt_capability - Reset MSIX setup
1711 * @adapter: board private structure
1712 *
1713 **/
1714void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1715{
1716        if (!adapter->msix_entries)
1717                return;
1718
1719        pci_disable_msix(adapter->pdev);
1720        kfree(adapter->msix_entries);
1721        adapter->msix_entries = NULL;
1722}
1723
1724/**
1725 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1726 * @adapter: board private structure to initialize
1727 *
1728 **/
1729int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1730{
1731        int err;
1732
1733        err = iavf_alloc_queues(adapter);
1734        if (err) {
1735                dev_err(&adapter->pdev->dev,
1736                        "Unable to allocate memory for queues\n");
1737                goto err_alloc_queues;
1738        }
1739
1740        rtnl_lock();
1741        err = iavf_set_interrupt_capability(adapter);
1742        rtnl_unlock();
1743        if (err) {
1744                dev_err(&adapter->pdev->dev,
1745                        "Unable to setup interrupt capabilities\n");
1746                goto err_set_interrupt;
1747        }
1748
1749        err = iavf_alloc_q_vectors(adapter);
1750        if (err) {
1751                dev_err(&adapter->pdev->dev,
1752                        "Unable to allocate memory for queue vectors\n");
1753                goto err_alloc_q_vectors;
1754        }
1755
1756        /* If we've made it so far while ADq flag being ON, then we haven't
1757         * bailed out anywhere in middle. And ADq isn't just enabled but actual
1758         * resources have been allocated in the reset path.
1759         * Now we can truly claim that ADq is enabled.
1760         */
1761        if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1762            adapter->num_tc)
1763                dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1764                         adapter->num_tc);
1765
1766        dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1767                 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1768                 adapter->num_active_queues);
1769
1770        return 0;
1771err_alloc_q_vectors:
1772        iavf_reset_interrupt_capability(adapter);
1773err_set_interrupt:
1774        iavf_free_queues(adapter);
1775err_alloc_queues:
1776        return err;
1777}
1778
1779/**
1780 * iavf_free_rss - Free memory used by RSS structs
1781 * @adapter: board private structure
1782 **/
1783static void iavf_free_rss(struct iavf_adapter *adapter)
1784{
1785        kfree(adapter->rss_key);
1786        adapter->rss_key = NULL;
1787
1788        kfree(adapter->rss_lut);
1789        adapter->rss_lut = NULL;
1790}
1791
1792/**
1793 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1794 * @adapter: board private structure
1795 *
1796 * Returns 0 on success, negative on failure
1797 **/
1798static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1799{
1800        struct net_device *netdev = adapter->netdev;
1801        int err;
1802
1803        if (netif_running(netdev))
1804                iavf_free_traffic_irqs(adapter);
1805        iavf_free_misc_irq(adapter);
1806        iavf_reset_interrupt_capability(adapter);
1807        iavf_free_q_vectors(adapter);
1808        iavf_free_queues(adapter);
1809
1810        err =  iavf_init_interrupt_scheme(adapter);
1811        if (err)
1812                goto err;
1813
1814        netif_tx_stop_all_queues(netdev);
1815
1816        err = iavf_request_misc_irq(adapter);
1817        if (err)
1818                goto err;
1819
1820        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1821
1822        iavf_map_rings_to_vectors(adapter);
1823err:
1824        return err;
1825}
1826
1827/**
1828 * iavf_process_aq_command - process aq_required flags
1829 * and sends aq command
1830 * @adapter: pointer to iavf adapter structure
1831 *
1832 * Returns 0 on success
1833 * Returns error code if no command was sent
1834 * or error code if the command failed.
1835 **/
1836static int iavf_process_aq_command(struct iavf_adapter *adapter)
1837{
1838        if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1839                return iavf_send_vf_config_msg(adapter);
1840        if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
1841                return iavf_send_vf_offload_vlan_v2_msg(adapter);
1842        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1843                iavf_disable_queues(adapter);
1844                return 0;
1845        }
1846
1847        if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1848                iavf_map_queues(adapter);
1849                return 0;
1850        }
1851
1852        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1853                iavf_add_ether_addrs(adapter);
1854                return 0;
1855        }
1856
1857        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1858                iavf_add_vlans(adapter);
1859                return 0;
1860        }
1861
1862        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1863                iavf_del_ether_addrs(adapter);
1864                return 0;
1865        }
1866
1867        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1868                iavf_del_vlans(adapter);
1869                return 0;
1870        }
1871
1872        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1873                iavf_enable_vlan_stripping(adapter);
1874                return 0;
1875        }
1876
1877        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1878                iavf_disable_vlan_stripping(adapter);
1879                return 0;
1880        }
1881
1882        if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1883                iavf_configure_queues(adapter);
1884                return 0;
1885        }
1886
1887        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1888                iavf_enable_queues(adapter);
1889                return 0;
1890        }
1891
1892        if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1893                /* This message goes straight to the firmware, not the
1894                 * PF, so we don't have to set current_op as we will
1895                 * not get a response through the ARQ.
1896                 */
1897                adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1898                return 0;
1899        }
1900        if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1901                iavf_get_hena(adapter);
1902                return 0;
1903        }
1904        if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1905                iavf_set_hena(adapter);
1906                return 0;
1907        }
1908        if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1909                iavf_set_rss_key(adapter);
1910                return 0;
1911        }
1912        if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1913                iavf_set_rss_lut(adapter);
1914                return 0;
1915        }
1916
1917        if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1918                iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1919                                       FLAG_VF_MULTICAST_PROMISC);
1920                return 0;
1921        }
1922
1923        if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1924                iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1925                return 0;
1926        }
1927        if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
1928            (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1929                iavf_set_promiscuous(adapter, 0);
1930                return 0;
1931        }
1932
1933        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1934                iavf_enable_channels(adapter);
1935                return 0;
1936        }
1937
1938        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1939                iavf_disable_channels(adapter);
1940                return 0;
1941        }
1942        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1943                iavf_add_cloud_filter(adapter);
1944                return 0;
1945        }
1946
1947        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1948                iavf_del_cloud_filter(adapter);
1949                return 0;
1950        }
1951        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1952                iavf_del_cloud_filter(adapter);
1953                return 0;
1954        }
1955        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1956                iavf_add_cloud_filter(adapter);
1957                return 0;
1958        }
1959        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
1960                iavf_add_fdir_filter(adapter);
1961                return IAVF_SUCCESS;
1962        }
1963        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
1964                iavf_del_fdir_filter(adapter);
1965                return IAVF_SUCCESS;
1966        }
1967        if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
1968                iavf_add_adv_rss_cfg(adapter);
1969                return 0;
1970        }
1971        if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
1972                iavf_del_adv_rss_cfg(adapter);
1973                return 0;
1974        }
1975        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
1976                iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
1977                return 0;
1978        }
1979        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
1980                iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
1981                return 0;
1982        }
1983        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
1984                iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
1985                return 0;
1986        }
1987        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
1988                iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
1989                return 0;
1990        }
1991        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
1992                iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
1993                return 0;
1994        }
1995        if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
1996                iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
1997                return 0;
1998        }
1999        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2000                iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2001                return 0;
2002        }
2003        if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2004                iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2005                return 0;
2006        }
2007
2008        if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2009                iavf_request_stats(adapter);
2010                return 0;
2011        }
2012
2013        return -EAGAIN;
2014}
2015
2016/**
2017 * iavf_set_vlan_offload_features - set VLAN offload configuration
2018 * @adapter: board private structure
2019 * @prev_features: previous features used for comparison
2020 * @features: updated features used for configuration
2021 *
2022 * Set the aq_required bit(s) based on the requested features passed in to
2023 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2024 * the watchdog if any changes are requested to expedite the request via
2025 * virtchnl.
2026 **/
2027void
2028iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2029                               netdev_features_t prev_features,
2030                               netdev_features_t features)
2031{
2032        bool enable_stripping = true, enable_insertion = true;
2033        u16 vlan_ethertype = 0;
2034        u64 aq_required = 0;
2035
2036        /* keep cases separate because one ethertype for offloads can be
2037         * disabled at the same time as another is disabled, so check for an
2038         * enabled ethertype first, then check for disabled. Default to
2039         * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2040         * stripping.
2041         */
2042        if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2043                vlan_ethertype = ETH_P_8021AD;
2044        else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2045                vlan_ethertype = ETH_P_8021Q;
2046        else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2047                vlan_ethertype = ETH_P_8021AD;
2048        else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2049                vlan_ethertype = ETH_P_8021Q;
2050        else
2051                vlan_ethertype = ETH_P_8021Q;
2052
2053        if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2054                enable_stripping = false;
2055        if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2056                enable_insertion = false;
2057
2058        if (VLAN_ALLOWED(adapter)) {
2059                /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2060                 * stripping via virtchnl. VLAN insertion can be toggled on the
2061                 * netdev, but it doesn't require a virtchnl message
2062                 */
2063                if (enable_stripping)
2064                        aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2065                else
2066                        aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2067
2068        } else if (VLAN_V2_ALLOWED(adapter)) {
2069                switch (vlan_ethertype) {
2070                case ETH_P_8021Q:
2071                        if (enable_stripping)
2072                                aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2073                        else
2074                                aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2075
2076                        if (enable_insertion)
2077                                aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2078                        else
2079                                aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2080                        break;
2081                case ETH_P_8021AD:
2082                        if (enable_stripping)
2083                                aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2084                        else
2085                                aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2086
2087                        if (enable_insertion)
2088                                aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2089                        else
2090                                aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2091                        break;
2092                }
2093        }
2094
2095        if (aq_required) {
2096                adapter->aq_required |= aq_required;
2097                mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
2098        }
2099}
2100
2101/**
2102 * iavf_startup - first step of driver startup
2103 * @adapter: board private structure
2104 *
2105 * Function process __IAVF_STARTUP driver state.
2106 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2107 * when fails the state is changed to __IAVF_INIT_FAILED
2108 **/
2109static void iavf_startup(struct iavf_adapter *adapter)
2110{
2111        struct pci_dev *pdev = adapter->pdev;
2112        struct iavf_hw *hw = &adapter->hw;
2113        enum iavf_status status;
2114        int ret;
2115
2116        WARN_ON(adapter->state != __IAVF_STARTUP);
2117
2118        /* driver loaded, probe complete */
2119        adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2120        adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2121        status = iavf_set_mac_type(hw);
2122        if (status) {
2123                dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2124                goto err;
2125        }
2126
2127        ret = iavf_check_reset_complete(hw);
2128        if (ret) {
2129                dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2130                         ret);
2131                goto err;
2132        }
2133        hw->aq.num_arq_entries = IAVF_AQ_LEN;
2134        hw->aq.num_asq_entries = IAVF_AQ_LEN;
2135        hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2136        hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2137
2138        status = iavf_init_adminq(hw);
2139        if (status) {
2140                dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2141                        status);
2142                goto err;
2143        }
2144        ret = iavf_send_api_ver(adapter);
2145        if (ret) {
2146                dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2147                iavf_shutdown_adminq(hw);
2148                goto err;
2149        }
2150        iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2151        return;
2152err:
2153        iavf_change_state(adapter, __IAVF_INIT_FAILED);
2154}
2155
2156/**
2157 * iavf_init_version_check - second step of driver startup
2158 * @adapter: board private structure
2159 *
2160 * Function process __IAVF_INIT_VERSION_CHECK driver state.
2161 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2162 * when fails the state is changed to __IAVF_INIT_FAILED
2163 **/
2164static void iavf_init_version_check(struct iavf_adapter *adapter)
2165{
2166        struct pci_dev *pdev = adapter->pdev;
2167        struct iavf_hw *hw = &adapter->hw;
2168        int err = -EAGAIN;
2169
2170        WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2171
2172        if (!iavf_asq_done(hw)) {
2173                dev_err(&pdev->dev, "Admin queue command never completed\n");
2174                iavf_shutdown_adminq(hw);
2175                iavf_change_state(adapter, __IAVF_STARTUP);
2176                goto err;
2177        }
2178
2179        /* aq msg sent, awaiting reply */
2180        err = iavf_verify_api_ver(adapter);
2181        if (err) {
2182                if (err == -EALREADY)
2183                        err = iavf_send_api_ver(adapter);
2184                else
2185                        dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2186                                adapter->pf_version.major,
2187                                adapter->pf_version.minor,
2188                                VIRTCHNL_VERSION_MAJOR,
2189                                VIRTCHNL_VERSION_MINOR);
2190                goto err;
2191        }
2192        err = iavf_send_vf_config_msg(adapter);
2193        if (err) {
2194                dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2195                        err);
2196                goto err;
2197        }
2198        iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2199        return;
2200err:
2201        iavf_change_state(adapter, __IAVF_INIT_FAILED);
2202}
2203
2204/**
2205 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2206 * @adapter: board private structure
2207 */
2208int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2209{
2210        int i, num_req_queues = adapter->num_req_queues;
2211        struct iavf_vsi *vsi = &adapter->vsi;
2212
2213        for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2214                if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2215                        adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2216        }
2217        if (!adapter->vsi_res) {
2218                dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2219                return -ENODEV;
2220        }
2221
2222        if (num_req_queues &&
2223            num_req_queues > adapter->vsi_res->num_queue_pairs) {
2224                /* Problem.  The PF gave us fewer queues than what we had
2225                 * negotiated in our request.  Need a reset to see if we can't
2226                 * get back to a working state.
2227                 */
2228                dev_err(&adapter->pdev->dev,
2229                        "Requested %d queues, but PF only gave us %d.\n",
2230                        num_req_queues,
2231                        adapter->vsi_res->num_queue_pairs);
2232                adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2233                adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2234                iavf_schedule_reset(adapter);
2235
2236                return -EAGAIN;
2237        }
2238        adapter->num_req_queues = 0;
2239        adapter->vsi.id = adapter->vsi_res->vsi_id;
2240
2241        adapter->vsi.back = adapter;
2242        adapter->vsi.base_vector = 1;
2243        vsi->netdev = adapter->netdev;
2244        vsi->qs_handle = adapter->vsi_res->qset_handle;
2245        if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2246                adapter->rss_key_size = adapter->vf_res->rss_key_size;
2247                adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2248        } else {
2249                adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2250                adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2251        }
2252
2253        return 0;
2254}
2255
2256/**
2257 * iavf_init_get_resources - third step of driver startup
2258 * @adapter: board private structure
2259 *
2260 * Function process __IAVF_INIT_GET_RESOURCES driver state and
2261 * finishes driver initialization procedure.
2262 * When success the state is changed to __IAVF_DOWN
2263 * when fails the state is changed to __IAVF_INIT_FAILED
2264 **/
2265static void iavf_init_get_resources(struct iavf_adapter *adapter)
2266{
2267        struct pci_dev *pdev = adapter->pdev;
2268        struct iavf_hw *hw = &adapter->hw;
2269        int err;
2270
2271        WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2272        /* aq msg sent, awaiting reply */
2273        if (!adapter->vf_res) {
2274                adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2275                                          GFP_KERNEL);
2276                if (!adapter->vf_res) {
2277                        err = -ENOMEM;
2278                        goto err;
2279                }
2280        }
2281        err = iavf_get_vf_config(adapter);
2282        if (err == -EALREADY) {
2283                err = iavf_send_vf_config_msg(adapter);
2284                goto err_alloc;
2285        } else if (err == -EINVAL) {
2286                /* We only get -EINVAL if the device is in a very bad
2287                 * state or if we've been disabled for previous bad
2288                 * behavior. Either way, we're done now.
2289                 */
2290                iavf_shutdown_adminq(hw);
2291                dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2292                return;
2293        }
2294        if (err) {
2295                dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2296                goto err_alloc;
2297        }
2298
2299        err = iavf_parse_vf_resource_msg(adapter);
2300        if (err) {
2301                dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2302                        err);
2303                goto err_alloc;
2304        }
2305        /* Some features require additional messages to negotiate extended
2306         * capabilities. These are processed in sequence by the
2307         * __IAVF_INIT_EXTENDED_CAPS driver state.
2308         */
2309        adapter->extended_caps = IAVF_EXTENDED_CAPS;
2310
2311        iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2312        return;
2313
2314err_alloc:
2315        kfree(adapter->vf_res);
2316        adapter->vf_res = NULL;
2317err:
2318        iavf_change_state(adapter, __IAVF_INIT_FAILED);
2319}
2320
2321/**
2322 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2323 * @adapter: board private structure
2324 *
2325 * Function processes send of the extended VLAN V2 capability message to the
2326 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2327 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2328 */
2329static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2330{
2331        int ret;
2332
2333        WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2334
2335        ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2336        if (ret && ret == -EOPNOTSUPP) {
2337                /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2338                 * we did not send the capability exchange message and do not
2339                 * expect a response.
2340                 */
2341                adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2342        }
2343
2344        /* We sent the message, so move on to the next step */
2345        adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2346}
2347
2348/**
2349 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2350 * @adapter: board private structure
2351 *
2352 * Function processes receipt of the extended VLAN V2 capability message from
2353 * the PF.
2354 **/
2355static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2356{
2357        int ret;
2358
2359        WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2360
2361        memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2362
2363        ret = iavf_get_vf_vlan_v2_caps(adapter);
2364        if (ret)
2365                goto err;
2366
2367        /* We've processed receipt of the VLAN V2 caps message */
2368        adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2369        return;
2370err:
2371        /* We didn't receive a reply. Make sure we try sending again when
2372         * __IAVF_INIT_FAILED attempts to recover.
2373         */
2374        adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2375        iavf_change_state(adapter, __IAVF_INIT_FAILED);
2376}
2377
2378/**
2379 * iavf_init_process_extended_caps - Part of driver startup
2380 * @adapter: board private structure
2381 *
2382 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2383 * handles negotiating capabilities for features which require an additional
2384 * message.
2385 *
2386 * Once all extended capabilities exchanges are finished, the driver will
2387 * transition into __IAVF_INIT_CONFIG_ADAPTER.
2388 */
2389static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2390{
2391        WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2392
2393        /* Process capability exchange for VLAN V2 */
2394        if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2395                iavf_init_send_offload_vlan_v2_caps(adapter);
2396                return;
2397        } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2398                iavf_init_recv_offload_vlan_v2_caps(adapter);
2399                return;
2400        }
2401
2402        /* When we reach here, no further extended capabilities exchanges are
2403         * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2404         */
2405        iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2406}
2407
2408/**
2409 * iavf_init_config_adapter - last part of driver startup
2410 * @adapter: board private structure
2411 *
2412 * After all the supported capabilities are negotiated, then the
2413 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2414 */
2415static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2416{
2417        struct net_device *netdev = adapter->netdev;
2418        struct pci_dev *pdev = adapter->pdev;
2419        int err;
2420
2421        WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2422
2423        if (iavf_process_config(adapter))
2424                goto err;
2425
2426        adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2427
2428        adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2429
2430        netdev->netdev_ops = &iavf_netdev_ops;
2431        iavf_set_ethtool_ops(netdev);
2432        netdev->watchdog_timeo = 5 * HZ;
2433
2434        /* MTU range: 68 - 9710 */
2435        netdev->min_mtu = ETH_MIN_MTU;
2436        netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2437
2438        if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2439                dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2440                         adapter->hw.mac.addr);
2441                eth_hw_addr_random(netdev);
2442                ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2443        } else {
2444                eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2445                ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2446        }
2447
2448        adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2449        adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2450        err = iavf_init_interrupt_scheme(adapter);
2451        if (err)
2452                goto err_sw_init;
2453        iavf_map_rings_to_vectors(adapter);
2454        if (adapter->vf_res->vf_cap_flags &
2455                VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2456                adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2457
2458        err = iavf_request_misc_irq(adapter);
2459        if (err)
2460                goto err_sw_init;
2461
2462        netif_carrier_off(netdev);
2463        adapter->link_up = false;
2464
2465        /* set the semaphore to prevent any callbacks after device registration
2466         * up to time when state of driver will be set to __IAVF_DOWN
2467         */
2468        rtnl_lock();
2469        if (!adapter->netdev_registered) {
2470                err = register_netdevice(netdev);
2471                if (err) {
2472                        rtnl_unlock();
2473                        goto err_register;
2474                }
2475        }
2476
2477        adapter->netdev_registered = true;
2478
2479        netif_tx_stop_all_queues(netdev);
2480        if (CLIENT_ALLOWED(adapter)) {
2481                err = iavf_lan_add_device(adapter);
2482                if (err)
2483                        dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2484                                 err);
2485        }
2486        dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2487        if (netdev->features & NETIF_F_GRO)
2488                dev_info(&pdev->dev, "GRO is enabled\n");
2489
2490        iavf_change_state(adapter, __IAVF_DOWN);
2491        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2492        rtnl_unlock();
2493
2494        iavf_misc_irq_enable(adapter);
2495        wake_up(&adapter->down_waitqueue);
2496
2497        adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2498        adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2499        if (!adapter->rss_key || !adapter->rss_lut) {
2500                err = -ENOMEM;
2501                goto err_mem;
2502        }
2503        if (RSS_AQ(adapter))
2504                adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2505        else
2506                iavf_init_rss(adapter);
2507
2508        if (VLAN_V2_ALLOWED(adapter))
2509                /* request initial VLAN offload settings */
2510                iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2511
2512        return;
2513err_mem:
2514        iavf_free_rss(adapter);
2515err_register:
2516        iavf_free_misc_irq(adapter);
2517err_sw_init:
2518        iavf_reset_interrupt_capability(adapter);
2519err:
2520        iavf_change_state(adapter, __IAVF_INIT_FAILED);
2521}
2522
2523/**
2524 * iavf_watchdog_task - Periodic call-back task
2525 * @work: pointer to work_struct
2526 **/
2527static void iavf_watchdog_task(struct work_struct *work)
2528{
2529        struct iavf_adapter *adapter = container_of(work,
2530                                                    struct iavf_adapter,
2531                                                    watchdog_task.work);
2532        struct iavf_hw *hw = &adapter->hw;
2533        u32 reg_val;
2534
2535        if (!mutex_trylock(&adapter->crit_lock)) {
2536                if (adapter->state == __IAVF_REMOVE)
2537                        return;
2538
2539                goto restart_watchdog;
2540        }
2541
2542        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2543                iavf_change_state(adapter, __IAVF_COMM_FAILED);
2544
2545        if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2546                adapter->aq_required = 0;
2547                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2548                mutex_unlock(&adapter->crit_lock);
2549                queue_work(iavf_wq, &adapter->reset_task);
2550                return;
2551        }
2552
2553        switch (adapter->state) {
2554        case __IAVF_STARTUP:
2555                iavf_startup(adapter);
2556                mutex_unlock(&adapter->crit_lock);
2557                queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2558                                   msecs_to_jiffies(30));
2559                return;
2560        case __IAVF_INIT_VERSION_CHECK:
2561                iavf_init_version_check(adapter);
2562                mutex_unlock(&adapter->crit_lock);
2563                queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2564                                   msecs_to_jiffies(30));
2565                return;
2566        case __IAVF_INIT_GET_RESOURCES:
2567                iavf_init_get_resources(adapter);
2568                mutex_unlock(&adapter->crit_lock);
2569                queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2570                                   msecs_to_jiffies(1));
2571                return;
2572        case __IAVF_INIT_EXTENDED_CAPS:
2573                iavf_init_process_extended_caps(adapter);
2574                mutex_unlock(&adapter->crit_lock);
2575                queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2576                                   msecs_to_jiffies(1));
2577                return;
2578        case __IAVF_INIT_CONFIG_ADAPTER:
2579                iavf_init_config_adapter(adapter);
2580                mutex_unlock(&adapter->crit_lock);
2581                queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2582                                   msecs_to_jiffies(1));
2583                return;
2584        case __IAVF_INIT_FAILED:
2585                if (test_bit(__IAVF_IN_REMOVE_TASK,
2586                             &adapter->crit_section)) {
2587                        /* Do not update the state and do not reschedule
2588                         * watchdog task, iavf_remove should handle this state
2589                         * as it can loop forever
2590                         */
2591                        mutex_unlock(&adapter->crit_lock);
2592                        return;
2593                }
2594                if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2595                        dev_err(&adapter->pdev->dev,
2596                                "Failed to communicate with PF; waiting before retry\n");
2597                        adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2598                        iavf_shutdown_adminq(hw);
2599                        mutex_unlock(&adapter->crit_lock);
2600                        queue_delayed_work(iavf_wq,
2601                                           &adapter->watchdog_task, (5 * HZ));
2602                        return;
2603                }
2604                /* Try again from failed step*/
2605                iavf_change_state(adapter, adapter->last_state);
2606                mutex_unlock(&adapter->crit_lock);
2607                queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2608                return;
2609        case __IAVF_COMM_FAILED:
2610                if (test_bit(__IAVF_IN_REMOVE_TASK,
2611                             &adapter->crit_section)) {
2612                        /* Set state to __IAVF_INIT_FAILED and perform remove
2613                         * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2614                         * doesn't bring the state back to __IAVF_COMM_FAILED.
2615                         */
2616                        iavf_change_state(adapter, __IAVF_INIT_FAILED);
2617                        adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2618                        mutex_unlock(&adapter->crit_lock);
2619                        return;
2620                }
2621                reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2622                          IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2623                if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2624                    reg_val == VIRTCHNL_VFR_COMPLETED) {
2625                        /* A chance for redemption! */
2626                        dev_err(&adapter->pdev->dev,
2627                                "Hardware came out of reset. Attempting reinit.\n");
2628                        /* When init task contacts the PF and
2629                         * gets everything set up again, it'll restart the
2630                         * watchdog for us. Down, boy. Sit. Stay. Woof.
2631                         */
2632                        iavf_change_state(adapter, __IAVF_STARTUP);
2633                        adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2634                }
2635                adapter->aq_required = 0;
2636                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2637                mutex_unlock(&adapter->crit_lock);
2638                queue_delayed_work(iavf_wq,
2639                                   &adapter->watchdog_task,
2640                                   msecs_to_jiffies(10));
2641                return;
2642        case __IAVF_RESETTING:
2643                mutex_unlock(&adapter->crit_lock);
2644                queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2645                return;
2646        case __IAVF_DOWN:
2647        case __IAVF_DOWN_PENDING:
2648        case __IAVF_TESTING:
2649        case __IAVF_RUNNING:
2650                if (adapter->current_op) {
2651                        if (!iavf_asq_done(hw)) {
2652                                dev_dbg(&adapter->pdev->dev,
2653                                        "Admin queue timeout\n");
2654                                iavf_send_api_ver(adapter);
2655                        }
2656                } else {
2657                        int ret = iavf_process_aq_command(adapter);
2658
2659                        /* An error will be returned if no commands were
2660                         * processed; use this opportunity to update stats
2661                         * if the error isn't -ENOTSUPP
2662                         */
2663                        if (ret && ret != -EOPNOTSUPP &&
2664                            adapter->state == __IAVF_RUNNING)
2665                                iavf_request_stats(adapter);
2666                }
2667                if (adapter->state == __IAVF_RUNNING)
2668                        iavf_detect_recover_hung(&adapter->vsi);
2669                break;
2670        case __IAVF_REMOVE:
2671        default:
2672                mutex_unlock(&adapter->crit_lock);
2673                return;
2674        }
2675
2676        /* check for hw reset */
2677        reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2678        if (!reg_val) {
2679                adapter->flags |= IAVF_FLAG_RESET_PENDING;
2680                adapter->aq_required = 0;
2681                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2682                dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2683                queue_work(iavf_wq, &adapter->reset_task);
2684                mutex_unlock(&adapter->crit_lock);
2685                queue_delayed_work(iavf_wq,
2686                                   &adapter->watchdog_task, HZ * 2);
2687                return;
2688        }
2689
2690        schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2691        mutex_unlock(&adapter->crit_lock);
2692restart_watchdog:
2693        if (adapter->state >= __IAVF_DOWN)
2694                queue_work(iavf_wq, &adapter->adminq_task);
2695        if (adapter->aq_required)
2696                queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2697                                   msecs_to_jiffies(20));
2698        else
2699                queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2700}
2701
2702/**
2703 * iavf_disable_vf - disable VF
2704 * @adapter: board private structure
2705 *
2706 * Set communication failed flag and free all resources.
2707 * NOTE: This function is expected to be called with crit_lock being held.
2708 **/
2709static void iavf_disable_vf(struct iavf_adapter *adapter)
2710{
2711        struct iavf_mac_filter *f, *ftmp;
2712        struct iavf_vlan_filter *fv, *fvtmp;
2713        struct iavf_cloud_filter *cf, *cftmp;
2714
2715        adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2716
2717        /* We don't use netif_running() because it may be true prior to
2718         * ndo_open() returning, so we can't assume it means all our open
2719         * tasks have finished, since we're not holding the rtnl_lock here.
2720         */
2721        if (adapter->state == __IAVF_RUNNING) {
2722                set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2723                netif_carrier_off(adapter->netdev);
2724                netif_tx_disable(adapter->netdev);
2725                adapter->link_up = false;
2726                iavf_napi_disable_all(adapter);
2727                iavf_irq_disable(adapter);
2728                iavf_free_traffic_irqs(adapter);
2729                iavf_free_all_tx_resources(adapter);
2730                iavf_free_all_rx_resources(adapter);
2731        }
2732
2733        spin_lock_bh(&adapter->mac_vlan_list_lock);
2734
2735        /* Delete all of the filters */
2736        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2737                list_del(&f->list);
2738                kfree(f);
2739        }
2740
2741        list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2742                list_del(&fv->list);
2743                kfree(fv);
2744        }
2745
2746        spin_unlock_bh(&adapter->mac_vlan_list_lock);
2747
2748        spin_lock_bh(&adapter->cloud_filter_list_lock);
2749        list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2750                list_del(&cf->list);
2751                kfree(cf);
2752                adapter->num_cloud_filters--;
2753        }
2754        spin_unlock_bh(&adapter->cloud_filter_list_lock);
2755
2756        iavf_free_misc_irq(adapter);
2757        iavf_reset_interrupt_capability(adapter);
2758        iavf_free_q_vectors(adapter);
2759        iavf_free_queues(adapter);
2760        memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2761        iavf_shutdown_adminq(&adapter->hw);
2762        adapter->netdev->flags &= ~IFF_UP;
2763        adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2764        iavf_change_state(adapter, __IAVF_DOWN);
2765        wake_up(&adapter->down_waitqueue);
2766        dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2767}
2768
2769/**
2770 * iavf_reset_task - Call-back task to handle hardware reset
2771 * @work: pointer to work_struct
2772 *
2773 * During reset we need to shut down and reinitialize the admin queue
2774 * before we can use it to communicate with the PF again. We also clear
2775 * and reinit the rings because that context is lost as well.
2776 **/
2777static void iavf_reset_task(struct work_struct *work)
2778{
2779        struct iavf_adapter *adapter = container_of(work,
2780                                                      struct iavf_adapter,
2781                                                      reset_task);
2782        struct virtchnl_vf_resource *vfres = adapter->vf_res;
2783        struct net_device *netdev = adapter->netdev;
2784        struct iavf_hw *hw = &adapter->hw;
2785        struct iavf_mac_filter *f, *ftmp;
2786        struct iavf_cloud_filter *cf;
2787        enum iavf_status status;
2788        u32 reg_val;
2789        int i = 0, err;
2790        bool running;
2791
2792        /* When device is being removed it doesn't make sense to run the reset
2793         * task, just return in such a case.
2794         */
2795        if (!mutex_trylock(&adapter->crit_lock)) {
2796                if (adapter->state != __IAVF_REMOVE)
2797                        queue_work(iavf_wq, &adapter->reset_task);
2798
2799                return;
2800        }
2801
2802        while (!mutex_trylock(&adapter->client_lock))
2803                usleep_range(500, 1000);
2804        if (CLIENT_ENABLED(adapter)) {
2805                adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2806                                    IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2807                                    IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2808                                    IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2809                cancel_delayed_work_sync(&adapter->client_task);
2810                iavf_notify_client_close(&adapter->vsi, true);
2811        }
2812        iavf_misc_irq_disable(adapter);
2813        if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2814                adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2815                /* Restart the AQ here. If we have been reset but didn't
2816                 * detect it, or if the PF had to reinit, our AQ will be hosed.
2817                 */
2818                iavf_shutdown_adminq(hw);
2819                iavf_init_adminq(hw);
2820                iavf_request_reset(adapter);
2821        }
2822        adapter->flags |= IAVF_FLAG_RESET_PENDING;
2823
2824        /* poll until we see the reset actually happen */
2825        for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2826                reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2827                          IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2828                if (!reg_val)
2829                        break;
2830                usleep_range(5000, 10000);
2831        }
2832        if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2833                dev_info(&adapter->pdev->dev, "Never saw reset\n");
2834                goto continue_reset; /* act like the reset happened */
2835        }
2836
2837        /* wait until the reset is complete and the PF is responding to us */
2838        for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2839                /* sleep first to make sure a minimum wait time is met */
2840                msleep(IAVF_RESET_WAIT_MS);
2841
2842                reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2843                          IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2844                if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2845                        break;
2846        }
2847
2848        pci_set_master(adapter->pdev);
2849        pci_restore_msi_state(adapter->pdev);
2850
2851        if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2852                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2853                        reg_val);
2854                iavf_disable_vf(adapter);
2855                mutex_unlock(&adapter->client_lock);
2856                mutex_unlock(&adapter->crit_lock);
2857                return; /* Do not attempt to reinit. It's dead, Jim. */
2858        }
2859
2860continue_reset:
2861        /* We don't use netif_running() because it may be true prior to
2862         * ndo_open() returning, so we can't assume it means all our open
2863         * tasks have finished, since we're not holding the rtnl_lock here.
2864         */
2865        running = adapter->state == __IAVF_RUNNING;
2866
2867        if (running) {
2868                netif_carrier_off(netdev);
2869                netif_tx_stop_all_queues(netdev);
2870                adapter->link_up = false;
2871                iavf_napi_disable_all(adapter);
2872        }
2873        iavf_irq_disable(adapter);
2874
2875        iavf_change_state(adapter, __IAVF_RESETTING);
2876        adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2877
2878        /* free the Tx/Rx rings and descriptors, might be better to just
2879         * re-use them sometime in the future
2880         */
2881        iavf_free_all_rx_resources(adapter);
2882        iavf_free_all_tx_resources(adapter);
2883
2884        adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2885        /* kill and reinit the admin queue */
2886        iavf_shutdown_adminq(hw);
2887        adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2888        status = iavf_init_adminq(hw);
2889        if (status) {
2890                dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2891                         status);
2892                goto reset_err;
2893        }
2894        adapter->aq_required = 0;
2895
2896        if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
2897            (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
2898                err = iavf_reinit_interrupt_scheme(adapter);
2899                if (err)
2900                        goto reset_err;
2901        }
2902
2903        if (RSS_AQ(adapter)) {
2904                adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2905        } else {
2906                err = iavf_init_rss(adapter);
2907                if (err)
2908                        goto reset_err;
2909        }
2910
2911        adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2912        /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
2913         * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
2914         * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
2915         * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
2916         * been successfully sent and negotiated
2917         */
2918        adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
2919        adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
2920
2921        spin_lock_bh(&adapter->mac_vlan_list_lock);
2922
2923        /* Delete filter for the current MAC address, it could have
2924         * been changed by the PF via administratively set MAC.
2925         * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2926         */
2927        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2928                if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2929                        list_del(&f->list);
2930                        kfree(f);
2931                }
2932        }
2933        /* re-add all MAC filters */
2934        list_for_each_entry(f, &adapter->mac_filter_list, list) {
2935                f->add = true;
2936        }
2937        spin_unlock_bh(&adapter->mac_vlan_list_lock);
2938
2939        /* check if TCs are running and re-add all cloud filters */
2940        spin_lock_bh(&adapter->cloud_filter_list_lock);
2941        if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2942            adapter->num_tc) {
2943                list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2944                        cf->add = true;
2945                }
2946        }
2947        spin_unlock_bh(&adapter->cloud_filter_list_lock);
2948
2949        adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2950        adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2951        iavf_misc_irq_enable(adapter);
2952
2953        bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
2954        bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
2955
2956        mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
2957
2958        /* We were running when the reset started, so we need to restore some
2959         * state here.
2960         */
2961        if (running) {
2962                /* allocate transmit descriptors */
2963                err = iavf_setup_all_tx_resources(adapter);
2964                if (err)
2965                        goto reset_err;
2966
2967                /* allocate receive descriptors */
2968                err = iavf_setup_all_rx_resources(adapter);
2969                if (err)
2970                        goto reset_err;
2971
2972                if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
2973                    (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
2974                        err = iavf_request_traffic_irqs(adapter, netdev->name);
2975                        if (err)
2976                                goto reset_err;
2977
2978                        adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
2979                }
2980
2981                iavf_configure(adapter);
2982
2983                /* iavf_up_complete() will switch device back
2984                 * to __IAVF_RUNNING
2985                 */
2986                iavf_up_complete(adapter);
2987
2988                iavf_irq_enable(adapter, true);
2989        } else {
2990                iavf_change_state(adapter, __IAVF_DOWN);
2991                wake_up(&adapter->down_waitqueue);
2992        }
2993
2994        adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2995
2996        mutex_unlock(&adapter->client_lock);
2997        mutex_unlock(&adapter->crit_lock);
2998
2999        return;
3000reset_err:
3001        mutex_unlock(&adapter->client_lock);
3002        mutex_unlock(&adapter->crit_lock);
3003        if (running)
3004                iavf_change_state(adapter, __IAVF_RUNNING);
3005        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3006        iavf_close(netdev);
3007}
3008
3009/**
3010 * iavf_adminq_task - worker thread to clean the admin queue
3011 * @work: pointer to work_struct containing our data
3012 **/
3013static void iavf_adminq_task(struct work_struct *work)
3014{
3015        struct iavf_adapter *adapter =
3016                container_of(work, struct iavf_adapter, adminq_task);
3017        struct iavf_hw *hw = &adapter->hw;
3018        struct iavf_arq_event_info event;
3019        enum virtchnl_ops v_op;
3020        enum iavf_status ret, v_ret;
3021        u32 val, oldval;
3022        u16 pending;
3023
3024        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3025                goto out;
3026
3027        if (!mutex_trylock(&adapter->crit_lock)) {
3028                if (adapter->state == __IAVF_REMOVE)
3029                        return;
3030
3031                queue_work(iavf_wq, &adapter->adminq_task);
3032                goto out;
3033        }
3034
3035        event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3036        event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3037        if (!event.msg_buf)
3038                goto out;
3039
3040        do {
3041                ret = iavf_clean_arq_element(hw, &event, &pending);
3042                v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3043                v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3044
3045                if (ret || !v_op)
3046                        break; /* No event to process or error cleaning ARQ */
3047
3048                iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3049                                         event.msg_len);
3050                if (pending != 0)
3051                        memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3052        } while (pending);
3053        mutex_unlock(&adapter->crit_lock);
3054
3055        if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
3056                if (adapter->netdev_registered ||
3057                    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
3058                        struct net_device *netdev = adapter->netdev;
3059
3060                        rtnl_lock();
3061                        netdev_update_features(netdev);
3062                        rtnl_unlock();
3063                        /* Request VLAN offload settings */
3064                        if (VLAN_V2_ALLOWED(adapter))
3065                                iavf_set_vlan_offload_features
3066                                        (adapter, 0, netdev->features);
3067
3068                        iavf_set_queue_vlan_tag_loc(adapter);
3069                }
3070
3071                adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
3072        }
3073        if ((adapter->flags &
3074             (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3075            adapter->state == __IAVF_RESETTING)
3076                goto freedom;
3077
3078        /* check for error indications */
3079        val = rd32(hw, hw->aq.arq.len);
3080        if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3081                goto freedom;
3082        oldval = val;
3083        if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3084                dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3085                val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3086        }
3087        if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3088                dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3089                val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3090        }
3091        if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3092                dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3093                val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3094        }
3095        if (oldval != val)
3096                wr32(hw, hw->aq.arq.len, val);
3097
3098        val = rd32(hw, hw->aq.asq.len);
3099        oldval = val;
3100        if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3101                dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3102                val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3103        }
3104        if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3105                dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3106                val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3107        }
3108        if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3109                dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3110                val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3111        }
3112        if (oldval != val)
3113                wr32(hw, hw->aq.asq.len, val);
3114
3115freedom:
3116        kfree(event.msg_buf);
3117out:
3118        /* re-enable Admin queue interrupt cause */
3119        iavf_misc_irq_enable(adapter);
3120}
3121
3122/**
3123 * iavf_client_task - worker thread to perform client work
3124 * @work: pointer to work_struct containing our data
3125 *
3126 * This task handles client interactions. Because client calls can be
3127 * reentrant, we can't handle them in the watchdog.
3128 **/
3129static void iavf_client_task(struct work_struct *work)
3130{
3131        struct iavf_adapter *adapter =
3132                container_of(work, struct iavf_adapter, client_task.work);
3133
3134        /* If we can't get the client bit, just give up. We'll be rescheduled
3135         * later.
3136         */
3137
3138        if (!mutex_trylock(&adapter->client_lock))
3139                return;
3140
3141        if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3142                iavf_client_subtask(adapter);
3143                adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3144                goto out;
3145        }
3146        if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3147                iavf_notify_client_l2_params(&adapter->vsi);
3148                adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3149                goto out;
3150        }
3151        if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3152                iavf_notify_client_close(&adapter->vsi, false);
3153                adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3154                goto out;
3155        }
3156        if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3157                iavf_notify_client_open(&adapter->vsi);
3158                adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3159        }
3160out:
3161        mutex_unlock(&adapter->client_lock);
3162}
3163
3164/**
3165 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3166 * @adapter: board private structure
3167 *
3168 * Free all transmit software resources
3169 **/
3170void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3171{
3172        int i;
3173
3174        if (!adapter->tx_rings)
3175                return;
3176
3177        for (i = 0; i < adapter->num_active_queues; i++)
3178                if (adapter->tx_rings[i].desc)
3179                        iavf_free_tx_resources(&adapter->tx_rings[i]);
3180}
3181
3182/**
3183 * iavf_setup_all_tx_resources - allocate all queues Tx resources
3184 * @adapter: board private structure
3185 *
3186 * If this function returns with an error, then it's possible one or
3187 * more of the rings is populated (while the rest are not).  It is the
3188 * callers duty to clean those orphaned rings.
3189 *
3190 * Return 0 on success, negative on failure
3191 **/
3192static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3193{
3194        int i, err = 0;
3195
3196        for (i = 0; i < adapter->num_active_queues; i++) {
3197                adapter->tx_rings[i].count = adapter->tx_desc_count;
3198                err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3199                if (!err)
3200                        continue;
3201                dev_err(&adapter->pdev->dev,
3202                        "Allocation for Tx Queue %u failed\n", i);
3203                break;
3204        }
3205
3206        return err;
3207}
3208
3209/**
3210 * iavf_setup_all_rx_resources - allocate all queues Rx resources
3211 * @adapter: board private structure
3212 *
3213 * If this function returns with an error, then it's possible one or
3214 * more of the rings is populated (while the rest are not).  It is the
3215 * callers duty to clean those orphaned rings.
3216 *
3217 * Return 0 on success, negative on failure
3218 **/
3219static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3220{
3221        int i, err = 0;
3222
3223        for (i = 0; i < adapter->num_active_queues; i++) {
3224                adapter->rx_rings[i].count = adapter->rx_desc_count;
3225                err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3226                if (!err)
3227                        continue;
3228                dev_err(&adapter->pdev->dev,
3229                        "Allocation for Rx Queue %u failed\n", i);
3230                break;
3231        }
3232        return err;
3233}
3234
3235/**
3236 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3237 * @adapter: board private structure
3238 *
3239 * Free all receive software resources
3240 **/
3241void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3242{
3243        int i;
3244
3245        if (!adapter->rx_rings)
3246                return;
3247
3248        for (i = 0; i < adapter->num_active_queues; i++)
3249                if (adapter->rx_rings[i].desc)
3250                        iavf_free_rx_resources(&adapter->rx_rings[i]);
3251}
3252
3253/**
3254 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3255 * @adapter: board private structure
3256 * @max_tx_rate: max Tx bw for a tc
3257 **/
3258static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3259                                      u64 max_tx_rate)
3260{
3261        int speed = 0, ret = 0;
3262
3263        if (ADV_LINK_SUPPORT(adapter)) {
3264                if (adapter->link_speed_mbps < U32_MAX) {
3265                        speed = adapter->link_speed_mbps;
3266                        goto validate_bw;
3267                } else {
3268                        dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3269                        return -EINVAL;
3270                }
3271        }
3272
3273        switch (adapter->link_speed) {
3274        case VIRTCHNL_LINK_SPEED_40GB:
3275                speed = SPEED_40000;
3276                break;
3277        case VIRTCHNL_LINK_SPEED_25GB:
3278                speed = SPEED_25000;
3279                break;
3280        case VIRTCHNL_LINK_SPEED_20GB:
3281                speed = SPEED_20000;
3282                break;
3283        case VIRTCHNL_LINK_SPEED_10GB:
3284                speed = SPEED_10000;
3285                break;
3286        case VIRTCHNL_LINK_SPEED_5GB:
3287                speed = SPEED_5000;
3288                break;
3289        case VIRTCHNL_LINK_SPEED_2_5GB:
3290                speed = SPEED_2500;
3291                break;
3292        case VIRTCHNL_LINK_SPEED_1GB:
3293                speed = SPEED_1000;
3294                break;
3295        case VIRTCHNL_LINK_SPEED_100MB:
3296                speed = SPEED_100;
3297                break;
3298        default:
3299                break;
3300        }
3301
3302validate_bw:
3303        if (max_tx_rate > speed) {
3304                dev_err(&adapter->pdev->dev,
3305                        "Invalid tx rate specified\n");
3306                ret = -EINVAL;
3307        }
3308
3309        return ret;
3310}
3311
3312/**
3313 * iavf_validate_ch_config - validate queue mapping info
3314 * @adapter: board private structure
3315 * @mqprio_qopt: queue parameters
3316 *
3317 * This function validates if the config provided by the user to
3318 * configure queue channels is valid or not. Returns 0 on a valid
3319 * config.
3320 **/
3321static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3322                                   struct tc_mqprio_qopt_offload *mqprio_qopt)
3323{
3324        u64 total_max_rate = 0;
3325        int i, num_qps = 0;
3326        u64 tx_rate = 0;
3327        int ret = 0;
3328
3329        if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3330            mqprio_qopt->qopt.num_tc < 1)
3331                return -EINVAL;
3332
3333        for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3334                if (!mqprio_qopt->qopt.count[i] ||
3335                    mqprio_qopt->qopt.offset[i] != num_qps)
3336                        return -EINVAL;
3337                if (mqprio_qopt->min_rate[i]) {
3338                        dev_err(&adapter->pdev->dev,
3339                                "Invalid min tx rate (greater than 0) specified\n");
3340                        return -EINVAL;
3341                }
3342                /*convert to Mbps */
3343                tx_rate = div_u64(mqprio_qopt->max_rate[i],
3344                                  IAVF_MBPS_DIVISOR);
3345                total_max_rate += tx_rate;
3346                num_qps += mqprio_qopt->qopt.count[i];
3347        }
3348        if (num_qps > adapter->num_active_queues) {
3349                dev_err(&adapter->pdev->dev,
3350                        "Cannot support requested number of queues\n");
3351                return -EINVAL;
3352        }
3353
3354        ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3355        return ret;
3356}
3357
3358/**
3359 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3360 * @adapter: board private structure
3361 **/
3362static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3363{
3364        struct iavf_cloud_filter *cf, *cftmp;
3365
3366        spin_lock_bh(&adapter->cloud_filter_list_lock);
3367        list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3368                                 list) {
3369                list_del(&cf->list);
3370                kfree(cf);
3371                adapter->num_cloud_filters--;
3372        }
3373        spin_unlock_bh(&adapter->cloud_filter_list_lock);
3374}
3375
3376/**
3377 * __iavf_setup_tc - configure multiple traffic classes
3378 * @netdev: network interface device structure
3379 * @type_data: tc offload data
3380 *
3381 * This function processes the config information provided by the
3382 * user to configure traffic classes/queue channels and packages the
3383 * information to request the PF to setup traffic classes.
3384 *
3385 * Returns 0 on success.
3386 **/
3387static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3388{
3389        struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3390        struct iavf_adapter *adapter = netdev_priv(netdev);
3391        struct virtchnl_vf_resource *vfres = adapter->vf_res;
3392        u8 num_tc = 0, total_qps = 0;
3393        int ret = 0, netdev_tc = 0;
3394        u64 max_tx_rate;
3395        u16 mode;
3396        int i;
3397
3398        num_tc = mqprio_qopt->qopt.num_tc;
3399        mode = mqprio_qopt->mode;
3400
3401        /* delete queue_channel */
3402        if (!mqprio_qopt->qopt.hw) {
3403                if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3404                        /* reset the tc configuration */
3405                        netdev_reset_tc(netdev);
3406                        adapter->num_tc = 0;
3407                        netif_tx_stop_all_queues(netdev);
3408                        netif_tx_disable(netdev);
3409                        iavf_del_all_cloud_filters(adapter);
3410                        adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3411                        goto exit;
3412                } else {
3413                        return -EINVAL;
3414                }
3415        }
3416
3417        /* add queue channel */
3418        if (mode == TC_MQPRIO_MODE_CHANNEL) {
3419                if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3420                        dev_err(&adapter->pdev->dev, "ADq not supported\n");
3421                        return -EOPNOTSUPP;
3422                }
3423                if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3424                        dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3425                        return -EINVAL;
3426                }
3427
3428                ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3429                if (ret)
3430                        return ret;
3431                /* Return if same TC config is requested */
3432                if (adapter->num_tc == num_tc)
3433                        return 0;
3434                adapter->num_tc = num_tc;
3435
3436                for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3437                        if (i < num_tc) {
3438                                adapter->ch_config.ch_info[i].count =
3439                                        mqprio_qopt->qopt.count[i];
3440                                adapter->ch_config.ch_info[i].offset =
3441                                        mqprio_qopt->qopt.offset[i];
3442                                total_qps += mqprio_qopt->qopt.count[i];
3443                                max_tx_rate = mqprio_qopt->max_rate[i];
3444                                /* convert to Mbps */
3445                                max_tx_rate = div_u64(max_tx_rate,
3446                                                      IAVF_MBPS_DIVISOR);
3447                                adapter->ch_config.ch_info[i].max_tx_rate =
3448                                        max_tx_rate;
3449                        } else {
3450                                adapter->ch_config.ch_info[i].count = 1;
3451                                adapter->ch_config.ch_info[i].offset = 0;
3452                        }
3453                }
3454                adapter->ch_config.total_qps = total_qps;
3455                netif_tx_stop_all_queues(netdev);
3456                netif_tx_disable(netdev);
3457                adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3458                netdev_reset_tc(netdev);
3459                /* Report the tc mapping up the stack */
3460                netdev_set_num_tc(adapter->netdev, num_tc);
3461                for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3462                        u16 qcount = mqprio_qopt->qopt.count[i];
3463                        u16 qoffset = mqprio_qopt->qopt.offset[i];
3464
3465                        if (i < num_tc)
3466                                netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3467                                                    qoffset);
3468                }
3469        }
3470exit:
3471        return ret;
3472}
3473
3474/**
3475 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3476 * @adapter: board private structure
3477 * @f: pointer to struct flow_cls_offload
3478 * @filter: pointer to cloud filter structure
3479 */
3480static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3481                                 struct flow_cls_offload *f,
3482                                 struct iavf_cloud_filter *filter)
3483{
3484        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3485        struct flow_dissector *dissector = rule->match.dissector;
3486        u16 n_proto_mask = 0;
3487        u16 n_proto_key = 0;
3488        u8 field_flags = 0;
3489        u16 addr_type = 0;
3490        u16 n_proto = 0;
3491        int i = 0;
3492        struct virtchnl_filter *vf = &filter->f;
3493
3494        if (dissector->used_keys &
3495            ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3496              BIT(FLOW_DISSECTOR_KEY_BASIC) |
3497              BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3498              BIT(FLOW_DISSECTOR_KEY_VLAN) |
3499              BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3500              BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3501              BIT(FLOW_DISSECTOR_KEY_PORTS) |
3502              BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3503                dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3504                        dissector->used_keys);
3505                return -EOPNOTSUPP;
3506        }
3507
3508        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3509                struct flow_match_enc_keyid match;
3510
3511                flow_rule_match_enc_keyid(rule, &match);
3512                if (match.mask->keyid != 0)
3513                        field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3514        }
3515
3516        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3517                struct flow_match_basic match;
3518
3519                flow_rule_match_basic(rule, &match);
3520                n_proto_key = ntohs(match.key->n_proto);
3521                n_proto_mask = ntohs(match.mask->n_proto);
3522
3523                if (n_proto_key == ETH_P_ALL) {
3524                        n_proto_key = 0;
3525                        n_proto_mask = 0;
3526                }
3527                n_proto = n_proto_key & n_proto_mask;
3528                if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3529                        return -EINVAL;
3530                if (n_proto == ETH_P_IPV6) {
3531                        /* specify flow type as TCP IPv6 */
3532                        vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3533                }
3534
3535                if (match.key->ip_proto != IPPROTO_TCP) {
3536                        dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3537                        return -EINVAL;
3538                }
3539        }
3540
3541        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3542                struct flow_match_eth_addrs match;
3543
3544                flow_rule_match_eth_addrs(rule, &match);
3545
3546                /* use is_broadcast and is_zero to check for all 0xf or 0 */
3547                if (!is_zero_ether_addr(match.mask->dst)) {
3548                        if (is_broadcast_ether_addr(match.mask->dst)) {
3549                                field_flags |= IAVF_CLOUD_FIELD_OMAC;
3550                        } else {
3551                                dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3552                                        match.mask->dst);
3553                                return -EINVAL;
3554                        }
3555                }
3556
3557                if (!is_zero_ether_addr(match.mask->src)) {
3558                        if (is_broadcast_ether_addr(match.mask->src)) {
3559                                field_flags |= IAVF_CLOUD_FIELD_IMAC;
3560                        } else {
3561                                dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3562                                        match.mask->src);
3563                                return -EINVAL;
3564                        }
3565                }
3566
3567                if (!is_zero_ether_addr(match.key->dst))
3568                        if (is_valid_ether_addr(match.key->dst) ||
3569                            is_multicast_ether_addr(match.key->dst)) {
3570                                /* set the mask if a valid dst_mac address */
3571                                for (i = 0; i < ETH_ALEN; i++)
3572                                        vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3573                                ether_addr_copy(vf->data.tcp_spec.dst_mac,
3574                                                match.key->dst);
3575                        }
3576
3577                if (!is_zero_ether_addr(match.key->src))
3578                        if (is_valid_ether_addr(match.key->src) ||
3579                            is_multicast_ether_addr(match.key->src)) {
3580                                /* set the mask if a valid dst_mac address */
3581                                for (i = 0; i < ETH_ALEN; i++)
3582                                        vf->mask.tcp_spec.src_mac[i] |= 0xff;
3583                                ether_addr_copy(vf->data.tcp_spec.src_mac,
3584                                                match.key->src);
3585                }
3586        }
3587
3588        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3589                struct flow_match_vlan match;
3590
3591                flow_rule_match_vlan(rule, &match);
3592                if (match.mask->vlan_id) {
3593                        if (match.mask->vlan_id == VLAN_VID_MASK) {
3594                                field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3595                        } else {
3596                                dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3597                                        match.mask->vlan_id);
3598                                return -EINVAL;
3599                        }
3600                }
3601                vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3602                vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3603        }
3604
3605        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3606                struct flow_match_control match;
3607
3608                flow_rule_match_control(rule, &match);
3609                addr_type = match.key->addr_type;
3610        }
3611
3612        if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3613                struct flow_match_ipv4_addrs match;
3614
3615                flow_rule_match_ipv4_addrs(rule, &match);
3616                if (match.mask->dst) {
3617                        if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3618                                field_flags |= IAVF_CLOUD_FIELD_IIP;
3619                        } else {
3620                                dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3621                                        be32_to_cpu(match.mask->dst));
3622                                return -EINVAL;
3623                        }
3624                }
3625
3626                if (match.mask->src) {
3627                        if (match.mask->src == cpu_to_be32(0xffffffff)) {
3628                                field_flags |= IAVF_CLOUD_FIELD_IIP;
3629                        } else {
3630                                dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3631                                        be32_to_cpu(match.mask->dst));
3632                                return -EINVAL;
3633                        }
3634                }
3635
3636                if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3637                        dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3638                        return -EINVAL;
3639                }
3640                if (match.key->dst) {
3641                        vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3642                        vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3643                }
3644                if (match.key->src) {
3645                        vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3646                        vf->data.tcp_spec.src_ip[0] = match.key->src;
3647                }
3648        }
3649
3650        if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3651                struct flow_match_ipv6_addrs match;
3652
3653                flow_rule_match_ipv6_addrs(rule, &match);
3654
3655                /* validate mask, make sure it is not IPV6_ADDR_ANY */
3656                if (ipv6_addr_any(&match.mask->dst)) {
3657                        dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3658                                IPV6_ADDR_ANY);
3659                        return -EINVAL;
3660                }
3661
3662                /* src and dest IPv6 address should not be LOOPBACK
3663                 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3664                 */
3665                if (ipv6_addr_loopback(&match.key->dst) ||
3666                    ipv6_addr_loopback(&match.key->src)) {
3667                        dev_err(&adapter->pdev->dev,
3668                                "ipv6 addr should not be loopback\n");
3669                        return -EINVAL;
3670                }
3671                if (!ipv6_addr_any(&match.mask->dst) ||
3672                    !ipv6_addr_any(&match.mask->src))
3673                        field_flags |= IAVF_CLOUD_FIELD_IIP;
3674
3675                for (i = 0; i < 4; i++)
3676                        vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3677                memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3678                       sizeof(vf->data.tcp_spec.dst_ip));
3679                for (i = 0; i < 4; i++)
3680                        vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3681                memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3682                       sizeof(vf->data.tcp_spec.src_ip));
3683        }
3684        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3685                struct flow_match_ports match;
3686
3687                flow_rule_match_ports(rule, &match);
3688                if (match.mask->src) {
3689                        if (match.mask->src == cpu_to_be16(0xffff)) {
3690                                field_flags |= IAVF_CLOUD_FIELD_IIP;
3691                        } else {
3692                                dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3693                                        be16_to_cpu(match.mask->src));
3694                                return -EINVAL;
3695                        }
3696                }
3697
3698                if (match.mask->dst) {
3699                        if (match.mask->dst == cpu_to_be16(0xffff)) {
3700                                field_flags |= IAVF_CLOUD_FIELD_IIP;
3701                        } else {
3702                                dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3703                                        be16_to_cpu(match.mask->dst));
3704                                return -EINVAL;
3705                        }
3706                }
3707                if (match.key->dst) {
3708                        vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3709                        vf->data.tcp_spec.dst_port = match.key->dst;
3710                }
3711
3712                if (match.key->src) {
3713                        vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3714                        vf->data.tcp_spec.src_port = match.key->src;
3715                }
3716        }
3717        vf->field_flags = field_flags;
3718
3719        return 0;
3720}
3721
3722/**
3723 * iavf_handle_tclass - Forward to a traffic class on the device
3724 * @adapter: board private structure
3725 * @tc: traffic class index on the device
3726 * @filter: pointer to cloud filter structure
3727 */
3728static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3729                              struct iavf_cloud_filter *filter)
3730{
3731        if (tc == 0)
3732                return 0;
3733        if (tc < adapter->num_tc) {
3734                if (!filter->f.data.tcp_spec.dst_port) {
3735                        dev_err(&adapter->pdev->dev,
3736                                "Specify destination port to redirect to traffic class other than TC0\n");
3737                        return -EINVAL;
3738                }
3739        }
3740        /* redirect to a traffic class on the same device */
3741        filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3742        filter->f.action_meta = tc;
3743        return 0;
3744}
3745
3746/**
3747 * iavf_configure_clsflower - Add tc flower filters
3748 * @adapter: board private structure
3749 * @cls_flower: Pointer to struct flow_cls_offload
3750 */
3751static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3752                                    struct flow_cls_offload *cls_flower)
3753{
3754        int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3755        struct iavf_cloud_filter *filter = NULL;
3756        int err = -EINVAL, count = 50;
3757
3758        if (tc < 0) {
3759                dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3760                return -EINVAL;
3761        }
3762
3763        filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3764        if (!filter)
3765                return -ENOMEM;
3766
3767        while (!mutex_trylock(&adapter->crit_lock)) {
3768                if (--count == 0) {
3769                        kfree(filter);
3770                        return err;
3771                }
3772                udelay(1);
3773        }
3774
3775        filter->cookie = cls_flower->cookie;
3776
3777        /* set the mask to all zeroes to begin with */
3778        memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3779        /* start out with flow type and eth type IPv4 to begin with */
3780        filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
3781        err = iavf_parse_cls_flower(adapter, cls_flower, filter);
3782        if (err)
3783                goto err;
3784
3785        err = iavf_handle_tclass(adapter, tc, filter);
3786        if (err)
3787                goto err;
3788
3789        /* add filter to the list */
3790        spin_lock_bh(&adapter->cloud_filter_list_lock);
3791        list_add_tail(&filter->list, &adapter->cloud_filter_list);
3792        adapter->num_cloud_filters++;
3793        filter->add = true;
3794        adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3795        spin_unlock_bh(&adapter->cloud_filter_list_lock);
3796err:
3797        if (err)
3798                kfree(filter);
3799
3800        mutex_unlock(&adapter->crit_lock);
3801        return err;
3802}
3803
3804/* iavf_find_cf - Find the cloud filter in the list
3805 * @adapter: Board private structure
3806 * @cookie: filter specific cookie
3807 *
3808 * Returns ptr to the filter object or NULL. Must be called while holding the
3809 * cloud_filter_list_lock.
3810 */
3811static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3812                                              unsigned long *cookie)
3813{
3814        struct iavf_cloud_filter *filter = NULL;
3815
3816        if (!cookie)
3817                return NULL;
3818
3819        list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3820                if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3821                        return filter;
3822        }
3823        return NULL;
3824}
3825
3826/**
3827 * iavf_delete_clsflower - Remove tc flower filters
3828 * @adapter: board private structure
3829 * @cls_flower: Pointer to struct flow_cls_offload
3830 */
3831static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3832                                 struct flow_cls_offload *cls_flower)
3833{
3834        struct iavf_cloud_filter *filter = NULL;
3835        int err = 0;
3836
3837        spin_lock_bh(&adapter->cloud_filter_list_lock);
3838        filter = iavf_find_cf(adapter, &cls_flower->cookie);
3839        if (filter) {
3840                filter->del = true;
3841                adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3842        } else {
3843                err = -EINVAL;
3844        }
3845        spin_unlock_bh(&adapter->cloud_filter_list_lock);
3846
3847        return err;
3848}
3849
3850/**
3851 * iavf_setup_tc_cls_flower - flower classifier offloads
3852 * @adapter: board private structure
3853 * @cls_flower: pointer to flow_cls_offload struct with flow info
3854 */
3855static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
3856                                    struct flow_cls_offload *cls_flower)
3857{
3858        switch (cls_flower->command) {
3859        case FLOW_CLS_REPLACE:
3860                return iavf_configure_clsflower(adapter, cls_flower);
3861        case FLOW_CLS_DESTROY:
3862                return iavf_delete_clsflower(adapter, cls_flower);
3863        case FLOW_CLS_STATS:
3864                return -EOPNOTSUPP;
3865        default:
3866                return -EOPNOTSUPP;
3867        }
3868}
3869
3870/**
3871 * iavf_setup_tc_block_cb - block callback for tc
3872 * @type: type of offload
3873 * @type_data: offload data
3874 * @cb_priv:
3875 *
3876 * This function is the block callback for traffic classes
3877 **/
3878static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3879                                  void *cb_priv)
3880{
3881        struct iavf_adapter *adapter = cb_priv;
3882
3883        if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3884                return -EOPNOTSUPP;
3885
3886        switch (type) {
3887        case TC_SETUP_CLSFLOWER:
3888                return iavf_setup_tc_cls_flower(cb_priv, type_data);
3889        default:
3890                return -EOPNOTSUPP;
3891        }
3892}
3893
3894static LIST_HEAD(iavf_block_cb_list);
3895
3896/**
3897 * iavf_setup_tc - configure multiple traffic classes
3898 * @netdev: network interface device structure
3899 * @type: type of offload
3900 * @type_data: tc offload data
3901 *
3902 * This function is the callback to ndo_setup_tc in the
3903 * netdev_ops.
3904 *
3905 * Returns 0 on success
3906 **/
3907static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3908                         void *type_data)
3909{
3910        struct iavf_adapter *adapter = netdev_priv(netdev);
3911
3912        switch (type) {
3913        case TC_SETUP_QDISC_MQPRIO:
3914                return __iavf_setup_tc(netdev, type_data);
3915        case TC_SETUP_BLOCK:
3916                return flow_block_cb_setup_simple(type_data,
3917                                                  &iavf_block_cb_list,
3918                                                  iavf_setup_tc_block_cb,
3919                                                  adapter, adapter, true);
3920        default:
3921                return -EOPNOTSUPP;
3922        }
3923}
3924
3925/**
3926 * iavf_open - Called when a network interface is made active
3927 * @netdev: network interface device structure
3928 *
3929 * Returns 0 on success, negative value on failure
3930 *
3931 * The open entry point is called when a network interface is made
3932 * active by the system (IFF_UP).  At this point all resources needed
3933 * for transmit and receive operations are allocated, the interrupt
3934 * handler is registered with the OS, the watchdog is started,
3935 * and the stack is notified that the interface is ready.
3936 **/
3937static int iavf_open(struct net_device *netdev)
3938{
3939        struct iavf_adapter *adapter = netdev_priv(netdev);
3940        int err;
3941
3942        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
3943                dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3944                return -EIO;
3945        }
3946
3947        while (!mutex_trylock(&adapter->crit_lock))
3948                usleep_range(500, 1000);
3949
3950        if (adapter->state != __IAVF_DOWN) {
3951                err = -EBUSY;
3952                goto err_unlock;
3953        }
3954
3955        if (adapter->state == __IAVF_RUNNING &&
3956            !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
3957                dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
3958                err = 0;
3959                goto err_unlock;
3960        }
3961
3962        /* allocate transmit descriptors */
3963        err = iavf_setup_all_tx_resources(adapter);
3964        if (err)
3965                goto err_setup_tx;
3966
3967        /* allocate receive descriptors */
3968        err = iavf_setup_all_rx_resources(adapter);
3969        if (err)
3970                goto err_setup_rx;
3971
3972        /* clear any pending interrupts, may auto mask */
3973        err = iavf_request_traffic_irqs(adapter, netdev->name);
3974        if (err)
3975                goto err_req_irq;
3976
3977        spin_lock_bh(&adapter->mac_vlan_list_lock);
3978
3979        iavf_add_filter(adapter, adapter->hw.mac.addr);
3980
3981        spin_unlock_bh(&adapter->mac_vlan_list_lock);
3982
3983        /* Restore VLAN filters that were removed with IFF_DOWN */
3984        iavf_restore_filters(adapter);
3985
3986        iavf_configure(adapter);
3987
3988        iavf_up_complete(adapter);
3989
3990        iavf_irq_enable(adapter, true);
3991
3992        mutex_unlock(&adapter->crit_lock);
3993
3994        return 0;
3995
3996err_req_irq:
3997        iavf_down(adapter);
3998        iavf_free_traffic_irqs(adapter);
3999err_setup_rx:
4000        iavf_free_all_rx_resources(adapter);
4001err_setup_tx:
4002        iavf_free_all_tx_resources(adapter);
4003err_unlock:
4004        mutex_unlock(&adapter->crit_lock);
4005
4006        return err;
4007}
4008
4009/**
4010 * iavf_close - Disables a network interface
4011 * @netdev: network interface device structure
4012 *
4013 * Returns 0, this is not allowed to fail
4014 *
4015 * The close entry point is called when an interface is de-activated
4016 * by the OS.  The hardware is still under the drivers control, but
4017 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4018 * are freed, along with all transmit and receive resources.
4019 **/
4020static int iavf_close(struct net_device *netdev)
4021{
4022        struct iavf_adapter *adapter = netdev_priv(netdev);
4023        int status;
4024
4025        mutex_lock(&adapter->crit_lock);
4026
4027        if (adapter->state <= __IAVF_DOWN_PENDING) {
4028                mutex_unlock(&adapter->crit_lock);
4029                return 0;
4030        }
4031
4032        set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4033        if (CLIENT_ENABLED(adapter))
4034                adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4035
4036        iavf_down(adapter);
4037        iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4038        iavf_free_traffic_irqs(adapter);
4039
4040        mutex_unlock(&adapter->crit_lock);
4041
4042        /* We explicitly don't free resources here because the hardware is
4043         * still active and can DMA into memory. Resources are cleared in
4044         * iavf_virtchnl_completion() after we get confirmation from the PF
4045         * driver that the rings have been stopped.
4046         *
4047         * Also, we wait for state to transition to __IAVF_DOWN before
4048         * returning. State change occurs in iavf_virtchnl_completion() after
4049         * VF resources are released (which occurs after PF driver processes and
4050         * responds to admin queue commands).
4051         */
4052
4053        status = wait_event_timeout(adapter->down_waitqueue,
4054                                    adapter->state == __IAVF_DOWN,
4055                                    msecs_to_jiffies(500));
4056        if (!status)
4057                netdev_warn(netdev, "Device resources not yet released\n");
4058        return 0;
4059}
4060
4061/**
4062 * iavf_change_mtu - Change the Maximum Transfer Unit
4063 * @netdev: network interface device structure
4064 * @new_mtu: new value for maximum frame size
4065 *
4066 * Returns 0 on success, negative on failure
4067 **/
4068static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4069{
4070        struct iavf_adapter *adapter = netdev_priv(netdev);
4071
4072        netdev_dbg(netdev, "changing MTU from %d to %d\n",
4073                   netdev->mtu, new_mtu);
4074        netdev->mtu = new_mtu;
4075        if (CLIENT_ENABLED(adapter)) {
4076                iavf_notify_client_l2_params(&adapter->vsi);
4077                adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4078        }
4079
4080        if (netif_running(netdev)) {
4081                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4082                queue_work(iavf_wq, &adapter->reset_task);
4083        }
4084
4085        return 0;
4086}
4087
4088#define NETIF_VLAN_OFFLOAD_FEATURES     (NETIF_F_HW_VLAN_CTAG_RX | \
4089                                         NETIF_F_HW_VLAN_CTAG_TX | \
4090                                         NETIF_F_HW_VLAN_STAG_RX | \
4091                                         NETIF_F_HW_VLAN_STAG_TX)
4092
4093/**
4094 * iavf_set_features - set the netdev feature flags
4095 * @netdev: ptr to the netdev being adjusted
4096 * @features: the feature set that the stack is suggesting
4097 * Note: expects to be called while under rtnl_lock()
4098 **/
4099static int iavf_set_features(struct net_device *netdev,
4100                             netdev_features_t features)
4101{
4102        struct iavf_adapter *adapter = netdev_priv(netdev);
4103
4104        /* trigger update on any VLAN feature change */
4105        if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4106            (features & NETIF_VLAN_OFFLOAD_FEATURES))
4107                iavf_set_vlan_offload_features(adapter, netdev->features,
4108                                               features);
4109
4110        return 0;
4111}
4112
4113/**
4114 * iavf_features_check - Validate encapsulated packet conforms to limits
4115 * @skb: skb buff
4116 * @dev: This physical port's netdev
4117 * @features: Offload features that the stack believes apply
4118 **/
4119static netdev_features_t iavf_features_check(struct sk_buff *skb,
4120                                             struct net_device *dev,
4121                                             netdev_features_t features)
4122{
4123        size_t len;
4124
4125        /* No point in doing any of this if neither checksum nor GSO are
4126         * being requested for this frame.  We can rule out both by just
4127         * checking for CHECKSUM_PARTIAL
4128         */
4129        if (skb->ip_summed != CHECKSUM_PARTIAL)
4130                return features;
4131
4132        /* We cannot support GSO if the MSS is going to be less than
4133         * 64 bytes.  If it is then we need to drop support for GSO.
4134         */
4135        if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4136                features &= ~NETIF_F_GSO_MASK;
4137
4138        /* MACLEN can support at most 63 words */
4139        len = skb_network_header(skb) - skb->data;
4140        if (len & ~(63 * 2))
4141                goto out_err;
4142
4143        /* IPLEN and EIPLEN can support at most 127 dwords */
4144        len = skb_transport_header(skb) - skb_network_header(skb);
4145        if (len & ~(127 * 4))
4146                goto out_err;
4147
4148        if (skb->encapsulation) {
4149                /* L4TUNLEN can support 127 words */
4150                len = skb_inner_network_header(skb) - skb_transport_header(skb);
4151                if (len & ~(127 * 2))
4152                        goto out_err;
4153
4154                /* IPLEN can support at most 127 dwords */
4155                len = skb_inner_transport_header(skb) -
4156                      skb_inner_network_header(skb);
4157                if (len & ~(127 * 4))
4158                        goto out_err;
4159        }
4160
4161        /* No need to validate L4LEN as TCP is the only protocol with a
4162         * a flexible value and we support all possible values supported
4163         * by TCP, which is at most 15 dwords
4164         */
4165
4166        return features;
4167out_err:
4168        return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4169}
4170
4171/**
4172 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4173 * @adapter: board private structure
4174 *
4175 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4176 * were negotiated determine the VLAN features that can be toggled on and off.
4177 **/
4178static netdev_features_t
4179iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4180{
4181        netdev_features_t hw_features = 0;
4182
4183        if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4184                return hw_features;
4185
4186        /* Enable VLAN features if supported */
4187        if (VLAN_ALLOWED(adapter)) {
4188                hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4189                                NETIF_F_HW_VLAN_CTAG_RX);
4190        } else if (VLAN_V2_ALLOWED(adapter)) {
4191                struct virtchnl_vlan_caps *vlan_v2_caps =
4192                        &adapter->vlan_v2_caps;
4193                struct virtchnl_vlan_supported_caps *stripping_support =
4194                        &vlan_v2_caps->offloads.stripping_support;
4195                struct virtchnl_vlan_supported_caps *insertion_support =
4196                        &vlan_v2_caps->offloads.insertion_support;
4197
4198                if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4199                    stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4200                        if (stripping_support->outer &
4201                            VIRTCHNL_VLAN_ETHERTYPE_8100)
4202                                hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4203                        if (stripping_support->outer &
4204                            VIRTCHNL_VLAN_ETHERTYPE_88A8)
4205                                hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4206                } else if (stripping_support->inner !=
4207                           VIRTCHNL_VLAN_UNSUPPORTED &&
4208                           stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4209                        if (stripping_support->inner &
4210                            VIRTCHNL_VLAN_ETHERTYPE_8100)
4211                                hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4212                }
4213
4214                if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4215                    insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4216                        if (insertion_support->outer &
4217                            VIRTCHNL_VLAN_ETHERTYPE_8100)
4218                                hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4219                        if (insertion_support->outer &
4220                            VIRTCHNL_VLAN_ETHERTYPE_88A8)
4221                                hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4222                } else if (insertion_support->inner &&
4223                           insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4224                        if (insertion_support->inner &
4225                            VIRTCHNL_VLAN_ETHERTYPE_8100)
4226                                hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4227                }
4228        }
4229
4230        return hw_features;
4231}
4232
4233/**
4234 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4235 * @adapter: board private structure
4236 *
4237 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4238 * were negotiated determine the VLAN features that are enabled by default.
4239 **/
4240static netdev_features_t
4241iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4242{
4243        netdev_features_t features = 0;
4244
4245        if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4246                return features;
4247
4248        if (VLAN_ALLOWED(adapter)) {
4249                features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4250                        NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4251        } else if (VLAN_V2_ALLOWED(adapter)) {
4252                struct virtchnl_vlan_caps *vlan_v2_caps =
4253                        &adapter->vlan_v2_caps;
4254                struct virtchnl_vlan_supported_caps *filtering_support =
4255                        &vlan_v2_caps->filtering.filtering_support;
4256                struct virtchnl_vlan_supported_caps *stripping_support =
4257                        &vlan_v2_caps->offloads.stripping_support;
4258                struct virtchnl_vlan_supported_caps *insertion_support =
4259                        &vlan_v2_caps->offloads.insertion_support;
4260                u32 ethertype_init;
4261
4262                /* give priority to outer stripping and don't support both outer
4263                 * and inner stripping
4264                 */
4265                ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4266                if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4267                        if (stripping_support->outer &
4268                            VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4269                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4270                                features |= NETIF_F_HW_VLAN_CTAG_RX;
4271                        else if (stripping_support->outer &
4272                                 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4273                                 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4274                                features |= NETIF_F_HW_VLAN_STAG_RX;
4275                } else if (stripping_support->inner !=
4276                           VIRTCHNL_VLAN_UNSUPPORTED) {
4277                        if (stripping_support->inner &
4278                            VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4279                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4280                                features |= NETIF_F_HW_VLAN_CTAG_RX;
4281                }
4282
4283                /* give priority to outer insertion and don't support both outer
4284                 * and inner insertion
4285                 */
4286                if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4287                        if (insertion_support->outer &
4288                            VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4289                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4290                                features |= NETIF_F_HW_VLAN_CTAG_TX;
4291                        else if (insertion_support->outer &
4292                                 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4293                                 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4294                                features |= NETIF_F_HW_VLAN_STAG_TX;
4295                } else if (insertion_support->inner !=
4296                           VIRTCHNL_VLAN_UNSUPPORTED) {
4297                        if (insertion_support->inner &
4298                            VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4299                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4300                                features |= NETIF_F_HW_VLAN_CTAG_TX;
4301                }
4302
4303                /* give priority to outer filtering and don't bother if both
4304                 * outer and inner filtering are enabled
4305                 */
4306                ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4307                if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4308                        if (filtering_support->outer &
4309                            VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4310                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4311                                features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4312                        if (filtering_support->outer &
4313                            VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4314                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4315                                features |= NETIF_F_HW_VLAN_STAG_FILTER;
4316                } else if (filtering_support->inner !=
4317                           VIRTCHNL_VLAN_UNSUPPORTED) {
4318                        if (filtering_support->inner &
4319                            VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4320                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4321                                features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4322                        if (filtering_support->inner &
4323                            VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4324                            ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4325                                features |= NETIF_F_HW_VLAN_STAG_FILTER;
4326                }
4327        }
4328
4329        return features;
4330}
4331
4332#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4333        (!(((requested) & (feature_bit)) && \
4334           !((allowed) & (feature_bit))))
4335
4336/**
4337 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4338 * @adapter: board private structure
4339 * @requested_features: stack requested NETDEV features
4340 **/
4341static netdev_features_t
4342iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4343                              netdev_features_t requested_features)
4344{
4345        netdev_features_t allowed_features;
4346
4347        allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4348                iavf_get_netdev_vlan_features(adapter);
4349
4350        if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4351                                              allowed_features,
4352                                              NETIF_F_HW_VLAN_CTAG_TX))
4353                requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4354
4355        if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4356                                              allowed_features,
4357                                              NETIF_F_HW_VLAN_CTAG_RX))
4358                requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4359
4360        if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4361                                              allowed_features,
4362                                              NETIF_F_HW_VLAN_STAG_TX))
4363                requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4364        if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4365                                              allowed_features,
4366                                              NETIF_F_HW_VLAN_STAG_RX))
4367                requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4368
4369        if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4370                                              allowed_features,
4371                                              NETIF_F_HW_VLAN_CTAG_FILTER))
4372                requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4373
4374        if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4375                                              allowed_features,
4376                                              NETIF_F_HW_VLAN_STAG_FILTER))
4377                requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4378
4379        if ((requested_features &
4380             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4381            (requested_features &
4382             (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4383            adapter->vlan_v2_caps.offloads.ethertype_match ==
4384            VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4385                netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4386                requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4387                                        NETIF_F_HW_VLAN_STAG_TX);
4388        }
4389
4390        return requested_features;
4391}
4392
4393/**
4394 * iavf_fix_features - fix up the netdev feature bits
4395 * @netdev: our net device
4396 * @features: desired feature bits
4397 *
4398 * Returns fixed-up features bits
4399 **/
4400static netdev_features_t iavf_fix_features(struct net_device *netdev,
4401                                           netdev_features_t features)
4402{
4403        struct iavf_adapter *adapter = netdev_priv(netdev);
4404
4405        return iavf_fix_netdev_vlan_features(adapter, features);
4406}
4407
4408static const struct net_device_ops iavf_netdev_ops = {
4409        .ndo_open               = iavf_open,
4410        .ndo_stop               = iavf_close,
4411        .ndo_start_xmit         = iavf_xmit_frame,
4412        .ndo_set_rx_mode        = iavf_set_rx_mode,
4413        .ndo_validate_addr      = eth_validate_addr,
4414        .ndo_set_mac_address    = iavf_set_mac,
4415        .ndo_change_mtu         = iavf_change_mtu,
4416        .ndo_tx_timeout         = iavf_tx_timeout,
4417        .ndo_vlan_rx_add_vid    = iavf_vlan_rx_add_vid,
4418        .ndo_vlan_rx_kill_vid   = iavf_vlan_rx_kill_vid,
4419        .ndo_features_check     = iavf_features_check,
4420        .ndo_fix_features       = iavf_fix_features,
4421        .ndo_set_features       = iavf_set_features,
4422        .ndo_setup_tc           = iavf_setup_tc,
4423};
4424
4425/**
4426 * iavf_check_reset_complete - check that VF reset is complete
4427 * @hw: pointer to hw struct
4428 *
4429 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4430 **/
4431static int iavf_check_reset_complete(struct iavf_hw *hw)
4432{
4433        u32 rstat;
4434        int i;
4435
4436        for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4437                rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4438                             IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4439                if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4440                    (rstat == VIRTCHNL_VFR_COMPLETED))
4441                        return 0;
4442                usleep_range(10, 20);
4443        }
4444        return -EBUSY;
4445}
4446
4447/**
4448 * iavf_process_config - Process the config information we got from the PF
4449 * @adapter: board private structure
4450 *
4451 * Verify that we have a valid config struct, and set up our netdev features
4452 * and our VSI struct.
4453 **/
4454int iavf_process_config(struct iavf_adapter *adapter)
4455{
4456        struct virtchnl_vf_resource *vfres = adapter->vf_res;
4457        netdev_features_t hw_vlan_features, vlan_features;
4458        struct net_device *netdev = adapter->netdev;
4459        netdev_features_t hw_enc_features;
4460        netdev_features_t hw_features;
4461
4462        hw_enc_features = NETIF_F_SG                    |
4463                          NETIF_F_IP_CSUM               |
4464                          NETIF_F_IPV6_CSUM             |
4465                          NETIF_F_HIGHDMA               |
4466                          NETIF_F_SOFT_FEATURES |
4467                          NETIF_F_TSO                   |
4468                          NETIF_F_TSO_ECN               |
4469                          NETIF_F_TSO6                  |
4470                          NETIF_F_SCTP_CRC              |
4471                          NETIF_F_RXHASH                |
4472                          NETIF_F_RXCSUM                |
4473                          0;
4474
4475        /* advertise to stack only if offloads for encapsulated packets is
4476         * supported
4477         */
4478        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4479                hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
4480                                   NETIF_F_GSO_GRE              |
4481                                   NETIF_F_GSO_GRE_CSUM         |
4482                                   NETIF_F_GSO_IPXIP4           |
4483                                   NETIF_F_GSO_IPXIP6           |
4484                                   NETIF_F_GSO_UDP_TUNNEL_CSUM  |
4485                                   NETIF_F_GSO_PARTIAL          |
4486                                   0;
4487
4488                if (!(vfres->vf_cap_flags &
4489                      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4490                        netdev->gso_partial_features |=
4491                                NETIF_F_GSO_UDP_TUNNEL_CSUM;
4492
4493                netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4494                netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4495                netdev->hw_enc_features |= hw_enc_features;
4496        }
4497        /* record features VLANs can make use of */
4498        netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4499
4500        /* Write features and hw_features separately to avoid polluting
4501         * with, or dropping, features that are set when we registered.
4502         */
4503        hw_features = hw_enc_features;
4504
4505        /* get HW VLAN features that can be toggled */
4506        hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4507
4508        /* Enable cloud filter if ADQ is supported */
4509        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4510                hw_features |= NETIF_F_HW_TC;
4511        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4512                hw_features |= NETIF_F_GSO_UDP_L4;
4513
4514        netdev->hw_features |= hw_features | hw_vlan_features;
4515        vlan_features = iavf_get_netdev_vlan_features(adapter);
4516
4517        netdev->features |= hw_features | vlan_features;
4518
4519        if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4520                netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4521
4522        netdev->priv_flags |= IFF_UNICAST_FLT;
4523
4524        /* Do not turn on offloads when they are requested to be turned off.
4525         * TSO needs minimum 576 bytes to work correctly.
4526         */
4527        if (netdev->wanted_features) {
4528                if (!(netdev->wanted_features & NETIF_F_TSO) ||
4529                    netdev->mtu < 576)
4530                        netdev->features &= ~NETIF_F_TSO;
4531                if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4532                    netdev->mtu < 576)
4533                        netdev->features &= ~NETIF_F_TSO6;
4534                if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4535                        netdev->features &= ~NETIF_F_TSO_ECN;
4536                if (!(netdev->wanted_features & NETIF_F_GRO))
4537                        netdev->features &= ~NETIF_F_GRO;
4538                if (!(netdev->wanted_features & NETIF_F_GSO))
4539                        netdev->features &= ~NETIF_F_GSO;
4540        }
4541
4542        return 0;
4543}
4544
4545/**
4546 * iavf_shutdown - Shutdown the device in preparation for a reboot
4547 * @pdev: pci device structure
4548 **/
4549static void iavf_shutdown(struct pci_dev *pdev)
4550{
4551        struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4552        struct net_device *netdev = adapter->netdev;
4553
4554        netif_device_detach(netdev);
4555
4556        if (netif_running(netdev))
4557                iavf_close(netdev);
4558
4559        if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4560                dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4561        /* Prevent the watchdog from running. */
4562        iavf_change_state(adapter, __IAVF_REMOVE);
4563        adapter->aq_required = 0;
4564        mutex_unlock(&adapter->crit_lock);
4565
4566#ifdef CONFIG_PM
4567        pci_save_state(pdev);
4568
4569#endif
4570        pci_disable_device(pdev);
4571}
4572
4573/**
4574 * iavf_probe - Device Initialization Routine
4575 * @pdev: PCI device information struct
4576 * @ent: entry in iavf_pci_tbl
4577 *
4578 * Returns 0 on success, negative on failure
4579 *
4580 * iavf_probe initializes an adapter identified by a pci_dev structure.
4581 * The OS initialization, configuring of the adapter private structure,
4582 * and a hardware reset occur.
4583 **/
4584static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4585{
4586        struct net_device *netdev;
4587        struct iavf_adapter *adapter = NULL;
4588        struct iavf_hw *hw = NULL;
4589        int err;
4590
4591        err = pci_enable_device(pdev);
4592        if (err)
4593                return err;
4594
4595        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4596        if (err) {
4597                dev_err(&pdev->dev,
4598                        "DMA configuration failed: 0x%x\n", err);
4599                goto err_dma;
4600        }
4601
4602        err = pci_request_regions(pdev, iavf_driver_name);
4603        if (err) {
4604                dev_err(&pdev->dev,
4605                        "pci_request_regions failed 0x%x\n", err);
4606                goto err_pci_reg;
4607        }
4608
4609        pci_enable_pcie_error_reporting(pdev);
4610
4611        pci_set_master(pdev);
4612
4613        netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4614                                   IAVF_MAX_REQ_QUEUES);
4615        if (!netdev) {
4616                err = -ENOMEM;
4617                goto err_alloc_etherdev;
4618        }
4619
4620        SET_NETDEV_DEV(netdev, &pdev->dev);
4621
4622        pci_set_drvdata(pdev, netdev);
4623        adapter = netdev_priv(netdev);
4624
4625        adapter->netdev = netdev;
4626        adapter->pdev = pdev;
4627
4628        hw = &adapter->hw;
4629        hw->back = adapter;
4630
4631        adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4632        iavf_change_state(adapter, __IAVF_STARTUP);
4633
4634        /* Call save state here because it relies on the adapter struct. */
4635        pci_save_state(pdev);
4636
4637        hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4638                              pci_resource_len(pdev, 0));
4639        if (!hw->hw_addr) {
4640                err = -EIO;
4641                goto err_ioremap;
4642        }
4643        hw->vendor_id = pdev->vendor;
4644        hw->device_id = pdev->device;
4645        pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4646        hw->subsystem_vendor_id = pdev->subsystem_vendor;
4647        hw->subsystem_device_id = pdev->subsystem_device;
4648        hw->bus.device = PCI_SLOT(pdev->devfn);
4649        hw->bus.func = PCI_FUNC(pdev->devfn);
4650        hw->bus.bus_id = pdev->bus->number;
4651
4652        /* set up the locks for the AQ, do this only once in probe
4653         * and destroy them only once in remove
4654         */
4655        mutex_init(&adapter->crit_lock);
4656        mutex_init(&adapter->client_lock);
4657        mutex_init(&hw->aq.asq_mutex);
4658        mutex_init(&hw->aq.arq_mutex);
4659
4660        spin_lock_init(&adapter->mac_vlan_list_lock);
4661        spin_lock_init(&adapter->cloud_filter_list_lock);
4662        spin_lock_init(&adapter->fdir_fltr_lock);
4663        spin_lock_init(&adapter->adv_rss_lock);
4664
4665        INIT_LIST_HEAD(&adapter->mac_filter_list);
4666        INIT_LIST_HEAD(&adapter->vlan_filter_list);
4667        INIT_LIST_HEAD(&adapter->cloud_filter_list);
4668        INIT_LIST_HEAD(&adapter->fdir_list_head);
4669        INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4670
4671        INIT_WORK(&adapter->reset_task, iavf_reset_task);
4672        INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4673        INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4674        INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4675        queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4676                           msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4677
4678        /* Setup the wait queue for indicating transition to down status */
4679        init_waitqueue_head(&adapter->down_waitqueue);
4680
4681        return 0;
4682
4683err_ioremap:
4684        free_netdev(netdev);
4685err_alloc_etherdev:
4686        pci_disable_pcie_error_reporting(pdev);
4687        pci_release_regions(pdev);
4688err_pci_reg:
4689err_dma:
4690        pci_disable_device(pdev);
4691        return err;
4692}
4693
4694/**
4695 * iavf_suspend - Power management suspend routine
4696 * @dev_d: device info pointer
4697 *
4698 * Called when the system (VM) is entering sleep/suspend.
4699 **/
4700static int __maybe_unused iavf_suspend(struct device *dev_d)
4701{
4702        struct net_device *netdev = dev_get_drvdata(dev_d);
4703        struct iavf_adapter *adapter = netdev_priv(netdev);
4704
4705        netif_device_detach(netdev);
4706
4707        while (!mutex_trylock(&adapter->crit_lock))
4708                usleep_range(500, 1000);
4709
4710        if (netif_running(netdev)) {
4711                rtnl_lock();
4712                iavf_down(adapter);
4713                rtnl_unlock();
4714        }
4715        iavf_free_misc_irq(adapter);
4716        iavf_reset_interrupt_capability(adapter);
4717
4718        mutex_unlock(&adapter->crit_lock);
4719
4720        return 0;
4721}
4722
4723/**
4724 * iavf_resume - Power management resume routine
4725 * @dev_d: device info pointer
4726 *
4727 * Called when the system (VM) is resumed from sleep/suspend.
4728 **/
4729static int __maybe_unused iavf_resume(struct device *dev_d)
4730{
4731        struct pci_dev *pdev = to_pci_dev(dev_d);
4732        struct iavf_adapter *adapter;
4733        u32 err;
4734
4735        adapter = iavf_pdev_to_adapter(pdev);
4736
4737        pci_set_master(pdev);
4738
4739        rtnl_lock();
4740        err = iavf_set_interrupt_capability(adapter);
4741        if (err) {
4742                rtnl_unlock();
4743                dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
4744                return err;
4745        }
4746        err = iavf_request_misc_irq(adapter);
4747        rtnl_unlock();
4748        if (err) {
4749                dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
4750                return err;
4751        }
4752
4753        queue_work(iavf_wq, &adapter->reset_task);
4754
4755        netif_device_attach(adapter->netdev);
4756
4757        return err;
4758}
4759
4760/**
4761 * iavf_remove - Device Removal Routine
4762 * @pdev: PCI device information struct
4763 *
4764 * iavf_remove is called by the PCI subsystem to alert the driver
4765 * that it should release a PCI device.  The could be caused by a
4766 * Hot-Plug event, or because the driver is going to be removed from
4767 * memory.
4768 **/
4769static void iavf_remove(struct pci_dev *pdev)
4770{
4771        struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4772        struct net_device *netdev = adapter->netdev;
4773        struct iavf_fdir_fltr *fdir, *fdirtmp;
4774        struct iavf_vlan_filter *vlf, *vlftmp;
4775        struct iavf_adv_rss *rss, *rsstmp;
4776        struct iavf_mac_filter *f, *ftmp;
4777        struct iavf_cloud_filter *cf, *cftmp;
4778        struct iavf_hw *hw = &adapter->hw;
4779        int err;
4780
4781        /* When reboot/shutdown is in progress no need to do anything
4782         * as the adapter is already REMOVE state that was set during
4783         * iavf_shutdown() callback.
4784         */
4785        if (adapter->state == __IAVF_REMOVE)
4786                return;
4787
4788        set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
4789        /* Wait until port initialization is complete.
4790         * There are flows where register/unregister netdev may race.
4791         */
4792        while (1) {
4793                mutex_lock(&adapter->crit_lock);
4794                if (adapter->state == __IAVF_RUNNING ||
4795                    adapter->state == __IAVF_DOWN ||
4796                    adapter->state == __IAVF_INIT_FAILED) {
4797                        mutex_unlock(&adapter->crit_lock);
4798                        break;
4799                }
4800
4801                mutex_unlock(&adapter->crit_lock);
4802                usleep_range(500, 1000);
4803        }
4804        cancel_delayed_work_sync(&adapter->watchdog_task);
4805
4806        if (adapter->netdev_registered) {
4807                rtnl_lock();
4808                unregister_netdevice(netdev);
4809                adapter->netdev_registered = false;
4810                rtnl_unlock();
4811        }
4812        if (CLIENT_ALLOWED(adapter)) {
4813                err = iavf_lan_del_device(adapter);
4814                if (err)
4815                        dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
4816                                 err);
4817        }
4818
4819        mutex_lock(&adapter->crit_lock);
4820        dev_info(&adapter->pdev->dev, "Remove device\n");
4821        iavf_change_state(adapter, __IAVF_REMOVE);
4822
4823        iavf_request_reset(adapter);
4824        msleep(50);
4825        /* If the FW isn't responding, kick it once, but only once. */
4826        if (!iavf_asq_done(hw)) {
4827                iavf_request_reset(adapter);
4828                msleep(50);
4829        }
4830
4831        iavf_misc_irq_disable(adapter);
4832        /* Shut down all the garbage mashers on the detention level */
4833        cancel_work_sync(&adapter->reset_task);
4834        cancel_delayed_work_sync(&adapter->watchdog_task);
4835        cancel_work_sync(&adapter->adminq_task);
4836        cancel_delayed_work_sync(&adapter->client_task);
4837
4838        adapter->aq_required = 0;
4839        adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
4840
4841        iavf_free_all_tx_resources(adapter);
4842        iavf_free_all_rx_resources(adapter);
4843        iavf_free_misc_irq(adapter);
4844
4845        iavf_reset_interrupt_capability(adapter);
4846        iavf_free_q_vectors(adapter);
4847
4848        iavf_free_rss(adapter);
4849
4850        if (hw->aq.asq.count)
4851                iavf_shutdown_adminq(hw);
4852
4853        /* destroy the locks only once, here */
4854        mutex_destroy(&hw->aq.arq_mutex);
4855        mutex_destroy(&hw->aq.asq_mutex);
4856        mutex_destroy(&adapter->client_lock);
4857        mutex_unlock(&adapter->crit_lock);
4858        mutex_destroy(&adapter->crit_lock);
4859
4860        iounmap(hw->hw_addr);
4861        pci_release_regions(pdev);
4862        iavf_free_queues(adapter);
4863        kfree(adapter->vf_res);
4864        spin_lock_bh(&adapter->mac_vlan_list_lock);
4865        /* If we got removed before an up/down sequence, we've got a filter
4866         * hanging out there that we need to get rid of.
4867         */
4868        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
4869                list_del(&f->list);
4870                kfree(f);
4871        }
4872        list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
4873                                 list) {
4874                list_del(&vlf->list);
4875                kfree(vlf);
4876        }
4877
4878        spin_unlock_bh(&adapter->mac_vlan_list_lock);
4879
4880        spin_lock_bh(&adapter->cloud_filter_list_lock);
4881        list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
4882                list_del(&cf->list);
4883                kfree(cf);
4884        }
4885        spin_unlock_bh(&adapter->cloud_filter_list_lock);
4886
4887        spin_lock_bh(&adapter->fdir_fltr_lock);
4888        list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
4889                list_del(&fdir->list);
4890                kfree(fdir);
4891        }
4892        spin_unlock_bh(&adapter->fdir_fltr_lock);
4893
4894        spin_lock_bh(&adapter->adv_rss_lock);
4895        list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
4896                                 list) {
4897                list_del(&rss->list);
4898                kfree(rss);
4899        }
4900        spin_unlock_bh(&adapter->adv_rss_lock);
4901
4902        free_netdev(netdev);
4903
4904        pci_disable_pcie_error_reporting(pdev);
4905
4906        pci_disable_device(pdev);
4907}
4908
4909static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
4910
4911static struct pci_driver iavf_driver = {
4912        .name      = iavf_driver_name,
4913        .id_table  = iavf_pci_tbl,
4914        .probe     = iavf_probe,
4915        .remove    = iavf_remove,
4916        .driver.pm = &iavf_pm_ops,
4917        .shutdown  = iavf_shutdown,
4918};
4919
4920/**
4921 * iavf_init_module - Driver Registration Routine
4922 *
4923 * iavf_init_module is the first routine called when the driver is
4924 * loaded. All it does is register with the PCI subsystem.
4925 **/
4926static int __init iavf_init_module(void)
4927{
4928        pr_info("iavf: %s\n", iavf_driver_string);
4929
4930        pr_info("%s\n", iavf_copyright);
4931
4932        iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4933                                  iavf_driver_name);
4934        if (!iavf_wq) {
4935                pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
4936                return -ENOMEM;
4937        }
4938        return pci_register_driver(&iavf_driver);
4939}
4940
4941module_init(iavf_init_module);
4942
4943/**
4944 * iavf_exit_module - Driver Exit Cleanup Routine
4945 *
4946 * iavf_exit_module is called just before the driver is removed
4947 * from memory.
4948 **/
4949static void __exit iavf_exit_module(void)
4950{
4951        pci_unregister_driver(&iavf_driver);
4952        destroy_workqueue(iavf_wq);
4953}
4954
4955module_exit(iavf_exit_module);
4956
4957/* iavf_main.c */
4958