linux/drivers/net/ethernet/cavium/liquidio/lio_main.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/module.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <linux/firmware.h>
  22#include <net/vxlan.h>
  23#include <linux/kthread.h>
  24#include "liquidio_common.h"
  25#include "octeon_droq.h"
  26#include "octeon_iq.h"
  27#include "response_manager.h"
  28#include "octeon_device.h"
  29#include "octeon_nic.h"
  30#include "octeon_main.h"
  31#include "octeon_network.h"
  32#include "cn66xx_regs.h"
  33#include "cn66xx_device.h"
  34#include "cn68xx_device.h"
  35#include "cn23xx_pf_device.h"
  36#include "liquidio_image.h"
  37#include "lio_vf_rep.h"
  38
  39MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  40MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
  41MODULE_LICENSE("GPL");
  42MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
  43                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  44MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
  45                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  46MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
  47                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  48MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
  49                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  50
  51static int ddr_timeout = 10000;
  52module_param(ddr_timeout, int, 0644);
  53MODULE_PARM_DESC(ddr_timeout,
  54                 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
  55
  56#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  57
  58static int debug = -1;
  59module_param(debug, int, 0644);
  60MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  61
  62static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
  63module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
  64MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
  65
  66static u32 console_bitmask;
  67module_param(console_bitmask, int, 0644);
  68MODULE_PARM_DESC(console_bitmask,
  69                 "Bitmask indicating which consoles have debug output redirected to syslog.");
  70
  71/**
  72 * octeon_console_debug_enabled - determines if a given console has debug enabled.
  73 * @console: console to check
  74 * Return:  1 = enabled. 0 otherwise
  75 */
  76static int octeon_console_debug_enabled(u32 console)
  77{
  78        return (console_bitmask >> (console)) & 0x1;
  79}
  80
  81/* Polling interval for determining when NIC application is alive */
  82#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
  83
  84/* runtime link query interval */
  85#define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
  86/* update localtime to octeon firmware every 60 seconds.
  87 * make firmware to use same time reference, so that it will be easy to
  88 * correlate firmware logged events/errors with host events, for debugging.
  89 */
  90#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
  91
  92/* time to wait for possible in-flight requests in milliseconds */
  93#define WAIT_INFLIGHT_REQUEST   msecs_to_jiffies(1000)
  94
  95struct lio_trusted_vf_ctx {
  96        struct completion complete;
  97        int status;
  98};
  99
 100struct oct_link_status_resp {
 101        u64 rh;
 102        struct oct_link_info link_info;
 103        u64 status;
 104};
 105
 106struct oct_timestamp_resp {
 107        u64 rh;
 108        u64 timestamp;
 109        u64 status;
 110};
 111
 112#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
 113
 114union tx_info {
 115        u64 u64;
 116        struct {
 117#ifdef __BIG_ENDIAN_BITFIELD
 118                u16 gso_size;
 119                u16 gso_segs;
 120                u32 reserved;
 121#else
 122                u32 reserved;
 123                u16 gso_segs;
 124                u16 gso_size;
 125#endif
 126        } s;
 127};
 128
 129/* Octeon device properties to be used by the NIC module.
 130 * Each octeon device in the system will be represented
 131 * by this structure in the NIC module.
 132 */
 133
 134#define OCTNIC_GSO_MAX_HEADER_SIZE 128
 135#define OCTNIC_GSO_MAX_SIZE                                                    \
 136        (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
 137
 138struct handshake {
 139        struct completion init;
 140        struct completion started;
 141        struct pci_dev *pci_dev;
 142        int init_ok;
 143        int started_ok;
 144};
 145
 146#ifdef CONFIG_PCI_IOV
 147static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
 148#endif
 149
 150static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
 151                                    char *prefix, char *suffix);
 152
 153static int octeon_device_init(struct octeon_device *);
 154static int liquidio_stop(struct net_device *netdev);
 155static void liquidio_remove(struct pci_dev *pdev);
 156static int liquidio_probe(struct pci_dev *pdev,
 157                          const struct pci_device_id *ent);
 158static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
 159                                      int linkstate);
 160
 161static struct handshake handshake[MAX_OCTEON_DEVICES];
 162static struct completion first_stage;
 163
 164static void octeon_droq_bh(struct tasklet_struct *t)
 165{
 166        int q_no;
 167        int reschedule = 0;
 168        struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
 169                                                          droq_tasklet);
 170        struct octeon_device *oct = oct_priv->dev;
 171
 172        for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
 173                if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
 174                        continue;
 175                reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
 176                                                          MAX_PACKET_BUDGET);
 177                lio_enable_irq(oct->droq[q_no], NULL);
 178
 179                if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
 180                        /* set time and cnt interrupt thresholds for this DROQ
 181                         * for NAPI
 182                         */
 183                        int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
 184
 185                        octeon_write_csr64(
 186                            oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
 187                            0x5700000040ULL);
 188                        octeon_write_csr64(
 189                            oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
 190                }
 191        }
 192
 193        if (reschedule)
 194                tasklet_schedule(&oct_priv->droq_tasklet);
 195}
 196
 197static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 198{
 199        struct octeon_device_priv *oct_priv =
 200                (struct octeon_device_priv *)oct->priv;
 201        int retry = 100, pkt_cnt = 0, pending_pkts = 0;
 202        int i;
 203
 204        do {
 205                pending_pkts = 0;
 206
 207                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 208                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
 209                                continue;
 210                        pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
 211                }
 212                if (pkt_cnt > 0) {
 213                        pending_pkts += pkt_cnt;
 214                        tasklet_schedule(&oct_priv->droq_tasklet);
 215                }
 216                pkt_cnt = 0;
 217                schedule_timeout_uninterruptible(1);
 218
 219        } while (retry-- && pending_pkts);
 220
 221        return pkt_cnt;
 222}
 223
 224/**
 225 * force_io_queues_off - Forces all IO queues off on a given device
 226 * @oct: Pointer to Octeon device
 227 */
 228static void force_io_queues_off(struct octeon_device *oct)
 229{
 230        if ((oct->chip_id == OCTEON_CN66XX) ||
 231            (oct->chip_id == OCTEON_CN68XX)) {
 232                /* Reset the Enable bits for Input Queues. */
 233                octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
 234
 235                /* Reset the Enable bits for Output Queues. */
 236                octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
 237        }
 238}
 239
 240/**
 241 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
 242 * @oct: Pointer to Octeon device
 243 */
 244static inline void pcierror_quiesce_device(struct octeon_device *oct)
 245{
 246        int i;
 247
 248        /* Disable the input and output queues now. No more packets will
 249         * arrive from Octeon, but we should wait for all packet processing
 250         * to finish.
 251         */
 252        force_io_queues_off(oct);
 253
 254        /* To allow for in-flight requests */
 255        schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
 256
 257        if (wait_for_pending_requests(oct))
 258                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 259
 260        /* Force all requests waiting to be fetched by OCTEON to complete. */
 261        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 262                struct octeon_instr_queue *iq;
 263
 264                if (!(oct->io_qmask.iq & BIT_ULL(i)))
 265                        continue;
 266                iq = oct->instr_queue[i];
 267
 268                if (atomic_read(&iq->instr_pending)) {
 269                        spin_lock_bh(&iq->lock);
 270                        iq->fill_cnt = 0;
 271                        iq->octeon_read_index = iq->host_write_index;
 272                        iq->stats.instr_processed +=
 273                                atomic_read(&iq->instr_pending);
 274                        lio_process_iq_request_list(oct, iq, 0);
 275                        spin_unlock_bh(&iq->lock);
 276                }
 277        }
 278
 279        /* Force all pending ordered list requests to time out. */
 280        lio_process_ordered_list(oct, 1);
 281
 282        /* We do not need to wait for output queue packets to be processed. */
 283}
 284
 285/**
 286 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
 287 * @dev: Pointer to PCI device
 288 */
 289static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 290{
 291        int pos = 0x100;
 292        u32 status, mask;
 293
 294        pr_info("%s :\n", __func__);
 295
 296        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 297        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 298        if (dev->error_state == pci_channel_io_normal)
 299                status &= ~mask;        /* Clear corresponding nonfatal bits */
 300        else
 301                status &= mask;         /* Clear corresponding fatal bits */
 302        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 303}
 304
 305/**
 306 * stop_pci_io - Stop all PCI IO to a given device
 307 * @oct: Pointer to Octeon device
 308 */
 309static void stop_pci_io(struct octeon_device *oct)
 310{
 311        /* No more instructions will be forwarded. */
 312        atomic_set(&oct->status, OCT_DEV_IN_RESET);
 313
 314        pci_disable_device(oct->pci_dev);
 315
 316        /* Disable interrupts  */
 317        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 318
 319        pcierror_quiesce_device(oct);
 320
 321        /* Release the interrupt line */
 322        free_irq(oct->pci_dev->irq, oct);
 323
 324        if (oct->flags & LIO_FLAG_MSI_ENABLED)
 325                pci_disable_msi(oct->pci_dev);
 326
 327        dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 328                lio_get_state_string(&oct->status));
 329
 330        /* making it a common function for all OCTEON models */
 331        cleanup_aer_uncorrect_error_status(oct->pci_dev);
 332}
 333
 334/**
 335 * liquidio_pcie_error_detected - called when PCI error is detected
 336 * @pdev: Pointer to PCI device
 337 * @state: The current pci connection state
 338 *
 339 * This function is called after a PCI bus error affecting
 340 * this device has been detected.
 341 */
 342static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
 343                                                     pci_channel_state_t state)
 344{
 345        struct octeon_device *oct = pci_get_drvdata(pdev);
 346
 347        /* Non-correctable Non-fatal errors */
 348        if (state == pci_channel_io_normal) {
 349                dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
 350                cleanup_aer_uncorrect_error_status(oct->pci_dev);
 351                return PCI_ERS_RESULT_CAN_RECOVER;
 352        }
 353
 354        /* Non-correctable Fatal errors */
 355        dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
 356        stop_pci_io(oct);
 357
 358        /* Always return a DISCONNECT. There is no support for recovery but only
 359         * for a clean shutdown.
 360         */
 361        return PCI_ERS_RESULT_DISCONNECT;
 362}
 363
 364/**
 365 * liquidio_pcie_mmio_enabled - mmio handler
 366 * @pdev: Pointer to PCI device
 367 */
 368static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
 369{
 370        /* We should never hit this since we never ask for a reset for a Fatal
 371         * Error. We always return DISCONNECT in io_error above.
 372         * But play safe and return RECOVERED for now.
 373         */
 374        return PCI_ERS_RESULT_RECOVERED;
 375}
 376
 377/**
 378 * liquidio_pcie_slot_reset - called after the pci bus has been reset.
 379 * @pdev: Pointer to PCI device
 380 *
 381 * Restart the card from scratch, as if from a cold-boot. Implementation
 382 * resembles the first-half of the octeon_resume routine.
 383 */
 384static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
 385{
 386        /* We should never hit this since we never ask for a reset for a Fatal
 387         * Error. We always return DISCONNECT in io_error above.
 388         * But play safe and return RECOVERED for now.
 389         */
 390        return PCI_ERS_RESULT_RECOVERED;
 391}
 392
 393/**
 394 * liquidio_pcie_resume - called when traffic can start flowing again.
 395 * @pdev: Pointer to PCI device
 396 *
 397 * This callback is called when the error recovery driver tells us that
 398 * its OK to resume normal operation. Implementation resembles the
 399 * second-half of the octeon_resume routine.
 400 */
 401static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
 402{
 403        /* Nothing to be done here. */
 404}
 405
 406#define liquidio_suspend NULL
 407#define liquidio_resume NULL
 408
 409/* For PCI-E Advanced Error Recovery (AER) Interface */
 410static const struct pci_error_handlers liquidio_err_handler = {
 411        .error_detected = liquidio_pcie_error_detected,
 412        .mmio_enabled   = liquidio_pcie_mmio_enabled,
 413        .slot_reset     = liquidio_pcie_slot_reset,
 414        .resume         = liquidio_pcie_resume,
 415};
 416
 417static const struct pci_device_id liquidio_pci_tbl[] = {
 418        {       /* 68xx */
 419                PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 420        },
 421        {       /* 66xx */
 422                PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 423        },
 424        {       /* 23xx pf */
 425                PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 426        },
 427        {
 428                0, 0, 0, 0, 0, 0, 0
 429        }
 430};
 431MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
 432
 433static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
 434
 435static struct pci_driver liquidio_pci_driver = {
 436        .name           = "LiquidIO",
 437        .id_table       = liquidio_pci_tbl,
 438        .probe          = liquidio_probe,
 439        .remove         = liquidio_remove,
 440        .err_handler    = &liquidio_err_handler,    /* For AER */
 441        .driver.pm      = &liquidio_pm_ops,
 442#ifdef CONFIG_PCI_IOV
 443        .sriov_configure = liquidio_enable_sriov,
 444#endif
 445};
 446
 447/**
 448 * liquidio_init_pci - register PCI driver
 449 */
 450static int liquidio_init_pci(void)
 451{
 452        return pci_register_driver(&liquidio_pci_driver);
 453}
 454
 455/**
 456 * liquidio_deinit_pci - unregister PCI driver
 457 */
 458static void liquidio_deinit_pci(void)
 459{
 460        pci_unregister_driver(&liquidio_pci_driver);
 461}
 462
 463/**
 464 * check_txq_status - Check Tx queue status, and take appropriate action
 465 * @lio: per-network private data
 466 * Return: 0 if full, number of queues woken up otherwise
 467 */
 468static inline int check_txq_status(struct lio *lio)
 469{
 470        int numqs = lio->netdev->real_num_tx_queues;
 471        int ret_val = 0;
 472        int q, iq;
 473
 474        /* check each sub-queue state */
 475        for (q = 0; q < numqs; q++) {
 476                iq = lio->linfo.txpciq[q %
 477                        lio->oct_dev->num_iqs].s.q_no;
 478                if (octnet_iq_is_full(lio->oct_dev, iq))
 479                        continue;
 480                if (__netif_subqueue_stopped(lio->netdev, q)) {
 481                        netif_wake_subqueue(lio->netdev, q);
 482                        INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
 483                                                  tx_restart, 1);
 484                        ret_val++;
 485                }
 486        }
 487
 488        return ret_val;
 489}
 490
 491/**
 492 * print_link_info -  Print link information
 493 * @netdev: network device
 494 */
 495static void print_link_info(struct net_device *netdev)
 496{
 497        struct lio *lio = GET_LIO(netdev);
 498
 499        if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
 500            ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
 501                struct oct_link_info *linfo = &lio->linfo;
 502
 503                if (linfo->link.s.link_up) {
 504                        netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
 505                                   linfo->link.s.speed,
 506                                   (linfo->link.s.duplex) ? "Full" : "Half");
 507                } else {
 508                        netif_info(lio, link, lio->netdev, "Link Down\n");
 509                }
 510        }
 511}
 512
 513/**
 514 * octnet_link_status_change - Routine to notify MTU change
 515 * @work: work_struct data structure
 516 */
 517static void octnet_link_status_change(struct work_struct *work)
 518{
 519        struct cavium_wk *wk = (struct cavium_wk *)work;
 520        struct lio *lio = (struct lio *)wk->ctxptr;
 521
 522        /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
 523         * this API is invoked only when new max-MTU of the interface is
 524         * less than current MTU.
 525         */
 526        rtnl_lock();
 527        dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
 528        rtnl_unlock();
 529}
 530
 531/**
 532 * setup_link_status_change_wq - Sets up the mtu status change work
 533 * @netdev: network device
 534 */
 535static inline int setup_link_status_change_wq(struct net_device *netdev)
 536{
 537        struct lio *lio = GET_LIO(netdev);
 538        struct octeon_device *oct = lio->oct_dev;
 539
 540        lio->link_status_wq.wq = alloc_workqueue("link-status",
 541                                                 WQ_MEM_RECLAIM, 0);
 542        if (!lio->link_status_wq.wq) {
 543                dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
 544                return -1;
 545        }
 546        INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
 547                          octnet_link_status_change);
 548        lio->link_status_wq.wk.ctxptr = lio;
 549
 550        return 0;
 551}
 552
 553static inline void cleanup_link_status_change_wq(struct net_device *netdev)
 554{
 555        struct lio *lio = GET_LIO(netdev);
 556
 557        if (lio->link_status_wq.wq) {
 558                cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
 559                destroy_workqueue(lio->link_status_wq.wq);
 560        }
 561}
 562
 563/**
 564 * update_link_status - Update link status
 565 * @netdev: network device
 566 * @ls: link status structure
 567 *
 568 * Called on receipt of a link status response from the core application to
 569 * update each interface's link status.
 570 */
 571static inline void update_link_status(struct net_device *netdev,
 572                                      union oct_link_status *ls)
 573{
 574        struct lio *lio = GET_LIO(netdev);
 575        int changed = (lio->linfo.link.u64 != ls->u64);
 576        int current_max_mtu = lio->linfo.link.s.mtu;
 577        struct octeon_device *oct = lio->oct_dev;
 578
 579        dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
 580                __func__, lio->linfo.link.u64, ls->u64);
 581        lio->linfo.link.u64 = ls->u64;
 582
 583        if ((lio->intf_open) && (changed)) {
 584                print_link_info(netdev);
 585                lio->link_changes++;
 586
 587                if (lio->linfo.link.s.link_up) {
 588                        dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
 589                        netif_carrier_on(netdev);
 590                        wake_txqs(netdev);
 591                } else {
 592                        dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
 593                        netif_carrier_off(netdev);
 594                        stop_txqs(netdev);
 595                }
 596                if (lio->linfo.link.s.mtu != current_max_mtu) {
 597                        netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
 598                                   current_max_mtu, lio->linfo.link.s.mtu);
 599                        netdev->max_mtu = lio->linfo.link.s.mtu;
 600                }
 601                if (lio->linfo.link.s.mtu < netdev->mtu) {
 602                        dev_warn(&oct->pci_dev->dev,
 603                                 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
 604                                     netdev->mtu, lio->linfo.link.s.mtu);
 605                        queue_delayed_work(lio->link_status_wq.wq,
 606                                           &lio->link_status_wq.wk.work, 0);
 607                }
 608        }
 609}
 610
 611/**
 612 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
 613 * firmware will correct it's time, in case there is a time skew
 614 *
 615 * @work: work scheduled to send time update to octeon firmware
 616 **/
 617static void lio_sync_octeon_time(struct work_struct *work)
 618{
 619        struct cavium_wk *wk = (struct cavium_wk *)work;
 620        struct lio *lio = (struct lio *)wk->ctxptr;
 621        struct octeon_device *oct = lio->oct_dev;
 622        struct octeon_soft_command *sc;
 623        struct timespec64 ts;
 624        struct lio_time *lt;
 625        int ret;
 626
 627        sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
 628        if (!sc) {
 629                dev_err(&oct->pci_dev->dev,
 630                        "Failed to sync time to octeon: soft command allocation failed\n");
 631                return;
 632        }
 633
 634        lt = (struct lio_time *)sc->virtdptr;
 635
 636        /* Get time of the day */
 637        ktime_get_real_ts64(&ts);
 638        lt->sec = ts.tv_sec;
 639        lt->nsec = ts.tv_nsec;
 640        octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
 641
 642        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 643        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 644                                    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
 645
 646        init_completion(&sc->complete);
 647        sc->sc_status = OCTEON_REQUEST_PENDING;
 648
 649        ret = octeon_send_soft_command(oct, sc);
 650        if (ret == IQ_SEND_FAILED) {
 651                dev_err(&oct->pci_dev->dev,
 652                        "Failed to sync time to octeon: failed to send soft command\n");
 653                octeon_free_soft_command(oct, sc);
 654        } else {
 655                WRITE_ONCE(sc->caller_is_done, true);
 656        }
 657
 658        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 659                           &lio->sync_octeon_time_wq.wk.work,
 660                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 661}
 662
 663/**
 664 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
 665 *
 666 * @netdev: network device which should send time update to firmware
 667 **/
 668static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
 669{
 670        struct lio *lio = GET_LIO(netdev);
 671        struct octeon_device *oct = lio->oct_dev;
 672
 673        lio->sync_octeon_time_wq.wq =
 674                alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
 675        if (!lio->sync_octeon_time_wq.wq) {
 676                dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
 677                return -1;
 678        }
 679        INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
 680                          lio_sync_octeon_time);
 681        lio->sync_octeon_time_wq.wk.ctxptr = lio;
 682        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 683                           &lio->sync_octeon_time_wq.wk.work,
 684                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 685
 686        return 0;
 687}
 688
 689/**
 690 * cleanup_sync_octeon_time_wq - destroy wq
 691 *
 692 * @netdev: network device which should send time update to firmware
 693 *
 694 * Stop scheduling and destroy the work created to periodically update local
 695 * time to octeon firmware.
 696 **/
 697static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
 698{
 699        struct lio *lio = GET_LIO(netdev);
 700        struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
 701
 702        if (time_wq->wq) {
 703                cancel_delayed_work_sync(&time_wq->wk.work);
 704                destroy_workqueue(time_wq->wq);
 705        }
 706}
 707
 708static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
 709{
 710        struct octeon_device *other_oct;
 711
 712        other_oct = lio_get_device(oct->octeon_id + 1);
 713
 714        if (other_oct && other_oct->pci_dev) {
 715                int oct_busnum, other_oct_busnum;
 716
 717                oct_busnum = oct->pci_dev->bus->number;
 718                other_oct_busnum = other_oct->pci_dev->bus->number;
 719
 720                if (oct_busnum == other_oct_busnum) {
 721                        int oct_slot, other_oct_slot;
 722
 723                        oct_slot = PCI_SLOT(oct->pci_dev->devfn);
 724                        other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
 725
 726                        if (oct_slot == other_oct_slot)
 727                                return other_oct;
 728                }
 729        }
 730
 731        return NULL;
 732}
 733
 734static void disable_all_vf_links(struct octeon_device *oct)
 735{
 736        struct net_device *netdev;
 737        int max_vfs, vf, i;
 738
 739        if (!oct)
 740                return;
 741
 742        max_vfs = oct->sriov_info.max_vfs;
 743
 744        for (i = 0; i < oct->ifcount; i++) {
 745                netdev = oct->props[i].netdev;
 746                if (!netdev)
 747                        continue;
 748
 749                for (vf = 0; vf < max_vfs; vf++)
 750                        liquidio_set_vf_link_state(netdev, vf,
 751                                                   IFLA_VF_LINK_STATE_DISABLE);
 752        }
 753}
 754
 755static int liquidio_watchdog(void *param)
 756{
 757        bool err_msg_was_printed[LIO_MAX_CORES];
 758        u16 mask_of_crashed_or_stuck_cores = 0;
 759        bool all_vf_links_are_disabled = false;
 760        struct octeon_device *oct = param;
 761        struct octeon_device *other_oct;
 762#ifdef CONFIG_MODULE_UNLOAD
 763        long refcount, vfs_referencing_pf;
 764        u64 vfs_mask1, vfs_mask2;
 765#endif
 766        int core;
 767
 768        memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
 769
 770        while (!kthread_should_stop()) {
 771                /* sleep for a couple of seconds so that we don't hog the CPU */
 772                set_current_state(TASK_INTERRUPTIBLE);
 773                schedule_timeout(msecs_to_jiffies(2000));
 774
 775                mask_of_crashed_or_stuck_cores =
 776                    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
 777
 778                if (!mask_of_crashed_or_stuck_cores)
 779                        continue;
 780
 781                WRITE_ONCE(oct->cores_crashed, true);
 782                other_oct = get_other_octeon_device(oct);
 783                if (other_oct)
 784                        WRITE_ONCE(other_oct->cores_crashed, true);
 785
 786                for (core = 0; core < LIO_MAX_CORES; core++) {
 787                        bool core_crashed_or_got_stuck;
 788
 789                        core_crashed_or_got_stuck =
 790                                                (mask_of_crashed_or_stuck_cores
 791                                                 >> core) & 1;
 792
 793                        if (core_crashed_or_got_stuck &&
 794                            !err_msg_was_printed[core]) {
 795                                dev_err(&oct->pci_dev->dev,
 796                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
 797                                        core);
 798                                err_msg_was_printed[core] = true;
 799                        }
 800                }
 801
 802                if (all_vf_links_are_disabled)
 803                        continue;
 804
 805                disable_all_vf_links(oct);
 806                disable_all_vf_links(other_oct);
 807                all_vf_links_are_disabled = true;
 808
 809#ifdef CONFIG_MODULE_UNLOAD
 810                vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
 811                vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
 812
 813                vfs_referencing_pf  = hweight64(vfs_mask1);
 814                vfs_referencing_pf += hweight64(vfs_mask2);
 815
 816                refcount = module_refcount(THIS_MODULE);
 817                if (refcount >= vfs_referencing_pf) {
 818                        while (vfs_referencing_pf) {
 819                                module_put(THIS_MODULE);
 820                                vfs_referencing_pf--;
 821                        }
 822                }
 823#endif
 824        }
 825
 826        return 0;
 827}
 828
 829/**
 830 * liquidio_probe - PCI probe handler
 831 * @pdev: PCI device structure
 832 * @ent: unused
 833 */
 834static int
 835liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
 836{
 837        struct octeon_device *oct_dev = NULL;
 838        struct handshake *hs;
 839
 840        oct_dev = octeon_allocate_device(pdev->device,
 841                                         sizeof(struct octeon_device_priv));
 842        if (!oct_dev) {
 843                dev_err(&pdev->dev, "Unable to allocate device\n");
 844                return -ENOMEM;
 845        }
 846
 847        if (pdev->device == OCTEON_CN23XX_PF_VID)
 848                oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
 849
 850        /* Enable PTP for 6XXX Device */
 851        if (((pdev->device == OCTEON_CN66XX) ||
 852             (pdev->device == OCTEON_CN68XX)))
 853                oct_dev->ptp_enable = true;
 854        else
 855                oct_dev->ptp_enable = false;
 856
 857        dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 858                 (u32)pdev->vendor, (u32)pdev->device);
 859
 860        /* Assign octeon_device for this device to the private data area. */
 861        pci_set_drvdata(pdev, oct_dev);
 862
 863        /* set linux specific device pointer */
 864        oct_dev->pci_dev = (void *)pdev;
 865
 866        oct_dev->subsystem_id = pdev->subsystem_vendor |
 867                (pdev->subsystem_device << 16);
 868
 869        hs = &handshake[oct_dev->octeon_id];
 870        init_completion(&hs->init);
 871        init_completion(&hs->started);
 872        hs->pci_dev = pdev;
 873
 874        if (oct_dev->octeon_id == 0)
 875                /* first LiquidIO NIC is detected */
 876                complete(&first_stage);
 877
 878        if (octeon_device_init(oct_dev)) {
 879                complete(&hs->init);
 880                liquidio_remove(pdev);
 881                return -ENOMEM;
 882        }
 883
 884        if (OCTEON_CN23XX_PF(oct_dev)) {
 885                u8 bus, device, function;
 886
 887                if (atomic_read(oct_dev->adapter_refcount) == 1) {
 888                        /* Each NIC gets one watchdog kernel thread.  The first
 889                         * PF (of each NIC) that gets pci_driver->probe()'d
 890                         * creates that thread.
 891                         */
 892                        bus = pdev->bus->number;
 893                        device = PCI_SLOT(pdev->devfn);
 894                        function = PCI_FUNC(pdev->devfn);
 895                        oct_dev->watchdog_task = kthread_create(
 896                            liquidio_watchdog, oct_dev,
 897                            "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
 898                        if (!IS_ERR(oct_dev->watchdog_task)) {
 899                                wake_up_process(oct_dev->watchdog_task);
 900                        } else {
 901                                oct_dev->watchdog_task = NULL;
 902                                dev_err(&oct_dev->pci_dev->dev,
 903                                        "failed to create kernel_thread\n");
 904                                liquidio_remove(pdev);
 905                                return -1;
 906                        }
 907                }
 908        }
 909
 910        oct_dev->rx_pause = 1;
 911        oct_dev->tx_pause = 1;
 912
 913        dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
 914
 915        return 0;
 916}
 917
 918static bool fw_type_is_auto(void)
 919{
 920        return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
 921                       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
 922}
 923
 924/**
 925 * octeon_pci_flr - PCI FLR for each Octeon device.
 926 * @oct: octeon device
 927 */
 928static void octeon_pci_flr(struct octeon_device *oct)
 929{
 930        int rc;
 931
 932        pci_save_state(oct->pci_dev);
 933
 934        pci_cfg_access_lock(oct->pci_dev);
 935
 936        /* Quiesce the device completely */
 937        pci_write_config_word(oct->pci_dev, PCI_COMMAND,
 938                              PCI_COMMAND_INTX_DISABLE);
 939
 940        rc = __pci_reset_function_locked(oct->pci_dev);
 941
 942        if (rc != 0)
 943                dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
 944                        rc, oct->pf_num);
 945
 946        pci_cfg_access_unlock(oct->pci_dev);
 947
 948        pci_restore_state(oct->pci_dev);
 949}
 950
 951/**
 952 * octeon_destroy_resources - Destroy resources associated with octeon device
 953 * @oct: octeon device
 954 */
 955static void octeon_destroy_resources(struct octeon_device *oct)
 956{
 957        int i, refcount;
 958        struct msix_entry *msix_entries;
 959        struct octeon_device_priv *oct_priv =
 960                (struct octeon_device_priv *)oct->priv;
 961
 962        struct handshake *hs;
 963
 964        switch (atomic_read(&oct->status)) {
 965        case OCT_DEV_RUNNING:
 966        case OCT_DEV_CORE_OK:
 967
 968                /* No more instructions will be forwarded. */
 969                atomic_set(&oct->status, OCT_DEV_IN_RESET);
 970
 971                oct->app_mode = CVM_DRV_INVALID_APP;
 972                dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 973                        lio_get_state_string(&oct->status));
 974
 975                schedule_timeout_uninterruptible(HZ / 10);
 976
 977                fallthrough;
 978        case OCT_DEV_HOST_OK:
 979
 980        case OCT_DEV_CONSOLE_INIT_DONE:
 981                /* Remove any consoles */
 982                octeon_remove_consoles(oct);
 983
 984                fallthrough;
 985        case OCT_DEV_IO_QUEUES_DONE:
 986                if (lio_wait_for_instr_fetch(oct))
 987                        dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
 988
 989                if (wait_for_pending_requests(oct))
 990                        dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 991
 992                /* Disable the input and output queues now. No more packets will
 993                 * arrive from Octeon, but we should wait for all packet
 994                 * processing to finish.
 995                 */
 996                oct->fn_list.disable_io_queues(oct);
 997
 998                if (lio_wait_for_oq_pkts(oct))
 999                        dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1000
1001                /* Force all requests waiting to be fetched by OCTEON to
1002                 * complete.
1003                 */
1004                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1005                        struct octeon_instr_queue *iq;
1006
1007                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
1008                                continue;
1009                        iq = oct->instr_queue[i];
1010
1011                        if (atomic_read(&iq->instr_pending)) {
1012                                spin_lock_bh(&iq->lock);
1013                                iq->fill_cnt = 0;
1014                                iq->octeon_read_index = iq->host_write_index;
1015                                iq->stats.instr_processed +=
1016                                        atomic_read(&iq->instr_pending);
1017                                lio_process_iq_request_list(oct, iq, 0);
1018                                spin_unlock_bh(&iq->lock);
1019                        }
1020                }
1021
1022                lio_process_ordered_list(oct, 1);
1023                octeon_free_sc_done_list(oct);
1024                octeon_free_sc_zombie_list(oct);
1025
1026                fallthrough;
1027        case OCT_DEV_INTR_SET_DONE:
1028                /* Disable interrupts  */
1029                oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1030
1031                if (oct->msix_on) {
1032                        msix_entries = (struct msix_entry *)oct->msix_entries;
1033                        for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1034                                if (oct->ioq_vector[i].vector) {
1035                                        /* clear the affinity_cpumask */
1036                                        irq_set_affinity_hint(
1037                                                        msix_entries[i].vector,
1038                                                        NULL);
1039                                        free_irq(msix_entries[i].vector,
1040                                                 &oct->ioq_vector[i]);
1041                                        oct->ioq_vector[i].vector = 0;
1042                                }
1043                        }
1044                        /* non-iov vector's argument is oct struct */
1045                        free_irq(msix_entries[i].vector, oct);
1046
1047                        pci_disable_msix(oct->pci_dev);
1048                        kfree(oct->msix_entries);
1049                        oct->msix_entries = NULL;
1050                } else {
1051                        /* Release the interrupt line */
1052                        free_irq(oct->pci_dev->irq, oct);
1053
1054                        if (oct->flags & LIO_FLAG_MSI_ENABLED)
1055                                pci_disable_msi(oct->pci_dev);
1056                }
1057
1058                kfree(oct->irq_name_storage);
1059                oct->irq_name_storage = NULL;
1060
1061                fallthrough;
1062        case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1063                if (OCTEON_CN23XX_PF(oct))
1064                        octeon_free_ioq_vector(oct);
1065
1066                fallthrough;
1067        case OCT_DEV_MBOX_SETUP_DONE:
1068                if (OCTEON_CN23XX_PF(oct))
1069                        oct->fn_list.free_mbox(oct);
1070
1071                fallthrough;
1072        case OCT_DEV_IN_RESET:
1073        case OCT_DEV_DROQ_INIT_DONE:
1074                /* Wait for any pending operations */
1075                mdelay(100);
1076                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1077                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
1078                                continue;
1079                        octeon_delete_droq(oct, i);
1080                }
1081
1082                /* Force any pending handshakes to complete */
1083                for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1084                        hs = &handshake[i];
1085
1086                        if (hs->pci_dev) {
1087                                handshake[oct->octeon_id].init_ok = 0;
1088                                complete(&handshake[oct->octeon_id].init);
1089                                handshake[oct->octeon_id].started_ok = 0;
1090                                complete(&handshake[oct->octeon_id].started);
1091                        }
1092                }
1093
1094                fallthrough;
1095        case OCT_DEV_RESP_LIST_INIT_DONE:
1096                octeon_delete_response_list(oct);
1097
1098                fallthrough;
1099        case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1100                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1101                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
1102                                continue;
1103                        octeon_delete_instr_queue(oct, i);
1104                }
1105#ifdef CONFIG_PCI_IOV
1106                if (oct->sriov_info.sriov_enabled)
1107                        pci_disable_sriov(oct->pci_dev);
1108#endif
1109                fallthrough;
1110        case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1111                octeon_free_sc_buffer_pool(oct);
1112
1113                fallthrough;
1114        case OCT_DEV_DISPATCH_INIT_DONE:
1115                octeon_delete_dispatch_list(oct);
1116                cancel_delayed_work_sync(&oct->nic_poll_work.work);
1117
1118                fallthrough;
1119        case OCT_DEV_PCI_MAP_DONE:
1120                refcount = octeon_deregister_device(oct);
1121
1122                /* Soft reset the octeon device before exiting.
1123                 * However, if fw was loaded from card (i.e. autoboot),
1124                 * perform an FLR instead.
1125                 * Implementation note: only soft-reset the device
1126                 * if it is a CN6XXX OR the LAST CN23XX device.
1127                 */
1128                if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1129                        octeon_pci_flr(oct);
1130                else if (OCTEON_CN6XXX(oct) || !refcount)
1131                        oct->fn_list.soft_reset(oct);
1132
1133                octeon_unmap_pci_barx(oct, 0);
1134                octeon_unmap_pci_barx(oct, 1);
1135
1136                fallthrough;
1137        case OCT_DEV_PCI_ENABLE_DONE:
1138                pci_clear_master(oct->pci_dev);
1139                /* Disable the device, releasing the PCI INT */
1140                pci_disable_device(oct->pci_dev);
1141
1142                fallthrough;
1143        case OCT_DEV_BEGIN_STATE:
1144                /* Nothing to be done here either */
1145                break;
1146        }                       /* end switch (oct->status) */
1147
1148        tasklet_kill(&oct_priv->droq_tasklet);
1149}
1150
1151/**
1152 * send_rx_ctrl_cmd - Send Rx control command
1153 * @lio: per-network private data
1154 * @start_stop: whether to start or stop
1155 */
1156static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1157{
1158        struct octeon_soft_command *sc;
1159        union octnet_cmd *ncmd;
1160        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1161        int retval;
1162
1163        if (oct->props[lio->ifidx].rx_on == start_stop)
1164                return 0;
1165
1166        sc = (struct octeon_soft_command *)
1167                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1168                                          16, 0);
1169        if (!sc) {
1170                netif_info(lio, rx_err, lio->netdev,
1171                           "Failed to allocate octeon_soft_command struct\n");
1172                return -ENOMEM;
1173        }
1174
1175        ncmd = (union octnet_cmd *)sc->virtdptr;
1176
1177        ncmd->u64 = 0;
1178        ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1179        ncmd->s.param1 = start_stop;
1180
1181        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1182
1183        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1184
1185        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1186                                    OPCODE_NIC_CMD, 0, 0, 0);
1187
1188        init_completion(&sc->complete);
1189        sc->sc_status = OCTEON_REQUEST_PENDING;
1190
1191        retval = octeon_send_soft_command(oct, sc);
1192        if (retval == IQ_SEND_FAILED) {
1193                netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1194                octeon_free_soft_command(oct, sc);
1195        } else {
1196                /* Sleep on a wait queue till the cond flag indicates that the
1197                 * response arrived or timed-out.
1198                 */
1199                retval = wait_for_sc_completion_timeout(oct, sc, 0);
1200                if (retval)
1201                        return retval;
1202
1203                oct->props[lio->ifidx].rx_on = start_stop;
1204                WRITE_ONCE(sc->caller_is_done, true);
1205        }
1206
1207        return retval;
1208}
1209
1210/**
1211 * liquidio_destroy_nic_device - Destroy NIC device interface
1212 * @oct: octeon device
1213 * @ifidx: which interface to destroy
1214 *
1215 * Cleanup associated with each interface for an Octeon device  when NIC
1216 * module is being unloaded or if initialization fails during load.
1217 */
1218static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1219{
1220        struct net_device *netdev = oct->props[ifidx].netdev;
1221        struct octeon_device_priv *oct_priv =
1222                (struct octeon_device_priv *)oct->priv;
1223        struct napi_struct *napi, *n;
1224        struct lio *lio;
1225
1226        if (!netdev) {
1227                dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1228                        __func__, ifidx);
1229                return;
1230        }
1231
1232        lio = GET_LIO(netdev);
1233
1234        dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1235
1236        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1237                liquidio_stop(netdev);
1238
1239        if (oct->props[lio->ifidx].napi_enabled == 1) {
1240                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1241                        napi_disable(napi);
1242
1243                oct->props[lio->ifidx].napi_enabled = 0;
1244
1245                if (OCTEON_CN23XX_PF(oct))
1246                        oct->droq[0]->ops.poll_mode = 0;
1247        }
1248
1249        /* Delete NAPI */
1250        list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1251                netif_napi_del(napi);
1252
1253        tasklet_enable(&oct_priv->droq_tasklet);
1254
1255        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1256                unregister_netdev(netdev);
1257
1258        cleanup_sync_octeon_time_wq(netdev);
1259        cleanup_link_status_change_wq(netdev);
1260
1261        cleanup_rx_oom_poll_fn(netdev);
1262
1263        lio_delete_glists(lio);
1264
1265        free_netdev(netdev);
1266
1267        oct->props[ifidx].gmxport = -1;
1268
1269        oct->props[ifidx].netdev = NULL;
1270}
1271
1272/**
1273 * liquidio_stop_nic_module - Stop complete NIC functionality
1274 * @oct: octeon device
1275 */
1276static int liquidio_stop_nic_module(struct octeon_device *oct)
1277{
1278        int i, j;
1279        struct lio *lio;
1280
1281        dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1282        if (!oct->ifcount) {
1283                dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1284                return 1;
1285        }
1286
1287        spin_lock_bh(&oct->cmd_resp_wqlock);
1288        oct->cmd_resp_state = OCT_DRV_OFFLINE;
1289        spin_unlock_bh(&oct->cmd_resp_wqlock);
1290
1291        lio_vf_rep_destroy(oct);
1292
1293        for (i = 0; i < oct->ifcount; i++) {
1294                lio = GET_LIO(oct->props[i].netdev);
1295                for (j = 0; j < oct->num_oqs; j++)
1296                        octeon_unregister_droq_ops(oct,
1297                                                   lio->linfo.rxpciq[j].s.q_no);
1298        }
1299
1300        for (i = 0; i < oct->ifcount; i++)
1301                liquidio_destroy_nic_device(oct, i);
1302
1303        if (oct->devlink) {
1304                devlink_unregister(oct->devlink);
1305                devlink_free(oct->devlink);
1306                oct->devlink = NULL;
1307        }
1308
1309        dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1310        return 0;
1311}
1312
1313/**
1314 * liquidio_remove - Cleans up resources at unload time
1315 * @pdev: PCI device structure
1316 */
1317static void liquidio_remove(struct pci_dev *pdev)
1318{
1319        struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1320
1321        dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1322
1323        if (oct_dev->watchdog_task)
1324                kthread_stop(oct_dev->watchdog_task);
1325
1326        if (!oct_dev->octeon_id &&
1327            oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1328                lio_vf_rep_modexit();
1329
1330        if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1331                liquidio_stop_nic_module(oct_dev);
1332
1333        /* Reset the octeon device and cleanup all memory allocated for
1334         * the octeon device by driver.
1335         */
1336        octeon_destroy_resources(oct_dev);
1337
1338        dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1339
1340        /* This octeon device has been removed. Update the global
1341         * data structure to reflect this. Free the device structure.
1342         */
1343        octeon_free_device_mem(oct_dev);
1344}
1345
1346/**
1347 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1348 * @oct: octeon device
1349 */
1350static int octeon_chip_specific_setup(struct octeon_device *oct)
1351{
1352        u32 dev_id, rev_id;
1353        int ret = 1;
1354
1355        pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1356        pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1357        oct->rev_id = rev_id & 0xff;
1358
1359        switch (dev_id) {
1360        case OCTEON_CN68XX_PCIID:
1361                oct->chip_id = OCTEON_CN68XX;
1362                ret = lio_setup_cn68xx_octeon_device(oct);
1363                break;
1364
1365        case OCTEON_CN66XX_PCIID:
1366                oct->chip_id = OCTEON_CN66XX;
1367                ret = lio_setup_cn66xx_octeon_device(oct);
1368                break;
1369
1370        case OCTEON_CN23XX_PCIID_PF:
1371                oct->chip_id = OCTEON_CN23XX_PF_VID;
1372                ret = setup_cn23xx_octeon_pf_device(oct);
1373                if (ret)
1374                        break;
1375#ifdef CONFIG_PCI_IOV
1376                if (!ret)
1377                        pci_sriov_set_totalvfs(oct->pci_dev,
1378                                               oct->sriov_info.max_vfs);
1379#endif
1380                break;
1381
1382        default:
1383                dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1384                        dev_id);
1385        }
1386
1387        return ret;
1388}
1389
1390/**
1391 * octeon_pci_os_setup - PCI initialization for each Octeon device.
1392 * @oct: octeon device
1393 */
1394static int octeon_pci_os_setup(struct octeon_device *oct)
1395{
1396        /* setup PCI stuff first */
1397        if (pci_enable_device(oct->pci_dev)) {
1398                dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1399                return 1;
1400        }
1401
1402        if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1403                dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1404                pci_disable_device(oct->pci_dev);
1405                return 1;
1406        }
1407
1408        /* Enable PCI DMA Master. */
1409        pci_set_master(oct->pci_dev);
1410
1411        return 0;
1412}
1413
1414/**
1415 * free_netbuf - Unmap and free network buffer
1416 * @buf: buffer
1417 */
1418static void free_netbuf(void *buf)
1419{
1420        struct sk_buff *skb;
1421        struct octnet_buf_free_info *finfo;
1422        struct lio *lio;
1423
1424        finfo = (struct octnet_buf_free_info *)buf;
1425        skb = finfo->skb;
1426        lio = finfo->lio;
1427
1428        dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1429                         DMA_TO_DEVICE);
1430
1431        tx_buffer_free(skb);
1432}
1433
1434/**
1435 * free_netsgbuf - Unmap and free gather buffer
1436 * @buf: buffer
1437 */
1438static void free_netsgbuf(void *buf)
1439{
1440        struct octnet_buf_free_info *finfo;
1441        struct sk_buff *skb;
1442        struct lio *lio;
1443        struct octnic_gather *g;
1444        int i, frags, iq;
1445
1446        finfo = (struct octnet_buf_free_info *)buf;
1447        skb = finfo->skb;
1448        lio = finfo->lio;
1449        g = finfo->g;
1450        frags = skb_shinfo(skb)->nr_frags;
1451
1452        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1453                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1454                         DMA_TO_DEVICE);
1455
1456        i = 1;
1457        while (frags--) {
1458                skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1459
1460                dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1461                               g->sg[(i >> 2)].ptr[(i & 3)],
1462                               skb_frag_size(frag), DMA_TO_DEVICE);
1463                i++;
1464        }
1465
1466        iq = skb_iq(lio->oct_dev, skb);
1467        spin_lock(&lio->glist_lock[iq]);
1468        list_add_tail(&g->list, &lio->glist[iq]);
1469        spin_unlock(&lio->glist_lock[iq]);
1470
1471        tx_buffer_free(skb);
1472}
1473
1474/**
1475 * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1476 * @buf: buffer
1477 */
1478static void free_netsgbuf_with_resp(void *buf)
1479{
1480        struct octeon_soft_command *sc;
1481        struct octnet_buf_free_info *finfo;
1482        struct sk_buff *skb;
1483        struct lio *lio;
1484        struct octnic_gather *g;
1485        int i, frags, iq;
1486
1487        sc = (struct octeon_soft_command *)buf;
1488        skb = (struct sk_buff *)sc->callback_arg;
1489        finfo = (struct octnet_buf_free_info *)&skb->cb;
1490
1491        lio = finfo->lio;
1492        g = finfo->g;
1493        frags = skb_shinfo(skb)->nr_frags;
1494
1495        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1496                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1497                         DMA_TO_DEVICE);
1498
1499        i = 1;
1500        while (frags--) {
1501                skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1502
1503                dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1504                               g->sg[(i >> 2)].ptr[(i & 3)],
1505                               skb_frag_size(frag), DMA_TO_DEVICE);
1506                i++;
1507        }
1508
1509        iq = skb_iq(lio->oct_dev, skb);
1510
1511        spin_lock(&lio->glist_lock[iq]);
1512        list_add_tail(&g->list, &lio->glist[iq]);
1513        spin_unlock(&lio->glist_lock[iq]);
1514
1515        /* Don't free the skb yet */
1516}
1517
1518/**
1519 * liquidio_ptp_adjfreq - Adjust ptp frequency
1520 * @ptp: PTP clock info
1521 * @ppb: how much to adjust by, in parts-per-billion
1522 */
1523static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1524{
1525        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1526        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1527        u64 comp, delta;
1528        unsigned long flags;
1529        bool neg_adj = false;
1530
1531        if (ppb < 0) {
1532                neg_adj = true;
1533                ppb = -ppb;
1534        }
1535
1536        /* The hardware adds the clock compensation value to the
1537         * PTP clock on every coprocessor clock cycle, so we
1538         * compute the delta in terms of coprocessor clocks.
1539         */
1540        delta = (u64)ppb << 32;
1541        do_div(delta, oct->coproc_clock_rate);
1542
1543        spin_lock_irqsave(&lio->ptp_lock, flags);
1544        comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1545        if (neg_adj)
1546                comp -= delta;
1547        else
1548                comp += delta;
1549        lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1550        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1551
1552        return 0;
1553}
1554
1555/**
1556 * liquidio_ptp_adjtime - Adjust ptp time
1557 * @ptp: PTP clock info
1558 * @delta: how much to adjust by, in nanosecs
1559 */
1560static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1561{
1562        unsigned long flags;
1563        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1564
1565        spin_lock_irqsave(&lio->ptp_lock, flags);
1566        lio->ptp_adjust += delta;
1567        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1568
1569        return 0;
1570}
1571
1572/**
1573 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1574 * @ptp: PTP clock info
1575 * @ts: timespec
1576 */
1577static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1578                                struct timespec64 *ts)
1579{
1580        u64 ns;
1581        unsigned long flags;
1582        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1583        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1584
1585        spin_lock_irqsave(&lio->ptp_lock, flags);
1586        ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1587        ns += lio->ptp_adjust;
1588        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1589
1590        *ts = ns_to_timespec64(ns);
1591
1592        return 0;
1593}
1594
1595/**
1596 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1597 * @ptp: PTP clock info
1598 * @ts: timespec
1599 */
1600static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1601                                const struct timespec64 *ts)
1602{
1603        u64 ns;
1604        unsigned long flags;
1605        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1606        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1607
1608        ns = timespec64_to_ns(ts);
1609
1610        spin_lock_irqsave(&lio->ptp_lock, flags);
1611        lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1612        lio->ptp_adjust = 0;
1613        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1614
1615        return 0;
1616}
1617
1618/**
1619 * liquidio_ptp_enable - Check if PTP is enabled
1620 * @ptp: PTP clock info
1621 * @rq: request
1622 * @on: is it on
1623 */
1624static int
1625liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1626                    struct ptp_clock_request __maybe_unused *rq,
1627                    int __maybe_unused on)
1628{
1629        return -EOPNOTSUPP;
1630}
1631
1632/**
1633 * oct_ptp_open - Open PTP clock source
1634 * @netdev: network device
1635 */
1636static void oct_ptp_open(struct net_device *netdev)
1637{
1638        struct lio *lio = GET_LIO(netdev);
1639        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1640
1641        spin_lock_init(&lio->ptp_lock);
1642
1643        snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1644        lio->ptp_info.owner = THIS_MODULE;
1645        lio->ptp_info.max_adj = 250000000;
1646        lio->ptp_info.n_alarm = 0;
1647        lio->ptp_info.n_ext_ts = 0;
1648        lio->ptp_info.n_per_out = 0;
1649        lio->ptp_info.pps = 0;
1650        lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1651        lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1652        lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1653        lio->ptp_info.settime64 = liquidio_ptp_settime;
1654        lio->ptp_info.enable = liquidio_ptp_enable;
1655
1656        lio->ptp_adjust = 0;
1657
1658        lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1659                                             &oct->pci_dev->dev);
1660
1661        if (IS_ERR(lio->ptp_clock))
1662                lio->ptp_clock = NULL;
1663}
1664
1665/**
1666 * liquidio_ptp_init - Init PTP clock
1667 * @oct: octeon device
1668 */
1669static void liquidio_ptp_init(struct octeon_device *oct)
1670{
1671        u64 clock_comp, cfg;
1672
1673        clock_comp = (u64)NSEC_PER_SEC << 32;
1674        do_div(clock_comp, oct->coproc_clock_rate);
1675        lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1676
1677        /* Enable */
1678        cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1679        lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1680}
1681
1682/**
1683 * load_firmware - Load firmware to device
1684 * @oct: octeon device
1685 *
1686 * Maps device to firmware filename, requests firmware, and downloads it
1687 */
1688static int load_firmware(struct octeon_device *oct)
1689{
1690        int ret = 0;
1691        const struct firmware *fw;
1692        char fw_name[LIO_MAX_FW_FILENAME_LEN];
1693        char *tmp_fw_type;
1694
1695        if (fw_type_is_auto()) {
1696                tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1697                strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1698        } else {
1699                tmp_fw_type = fw_type;
1700        }
1701
1702        sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1703                octeon_get_conf(oct)->card_name, tmp_fw_type,
1704                LIO_FW_NAME_SUFFIX);
1705
1706        ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1707        if (ret) {
1708                dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1709                        fw_name);
1710                release_firmware(fw);
1711                return ret;
1712        }
1713
1714        ret = octeon_download_firmware(oct, fw->data, fw->size);
1715
1716        release_firmware(fw);
1717
1718        return ret;
1719}
1720
1721/**
1722 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1723 * @work: work_struct data structure
1724 */
1725static void octnet_poll_check_txq_status(struct work_struct *work)
1726{
1727        struct cavium_wk *wk = (struct cavium_wk *)work;
1728        struct lio *lio = (struct lio *)wk->ctxptr;
1729
1730        if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1731                return;
1732
1733        check_txq_status(lio);
1734        queue_delayed_work(lio->txq_status_wq.wq,
1735                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1736}
1737
1738/**
1739 * setup_tx_poll_fn - Sets up the txq poll check
1740 * @netdev: network device
1741 */
1742static inline int setup_tx_poll_fn(struct net_device *netdev)
1743{
1744        struct lio *lio = GET_LIO(netdev);
1745        struct octeon_device *oct = lio->oct_dev;
1746
1747        lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1748                                                WQ_MEM_RECLAIM, 0);
1749        if (!lio->txq_status_wq.wq) {
1750                dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1751                return -1;
1752        }
1753        INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1754                          octnet_poll_check_txq_status);
1755        lio->txq_status_wq.wk.ctxptr = lio;
1756        queue_delayed_work(lio->txq_status_wq.wq,
1757                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1758        return 0;
1759}
1760
1761static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1762{
1763        struct lio *lio = GET_LIO(netdev);
1764
1765        if (lio->txq_status_wq.wq) {
1766                cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1767                destroy_workqueue(lio->txq_status_wq.wq);
1768        }
1769}
1770
1771/**
1772 * liquidio_open - Net device open for LiquidIO
1773 * @netdev: network device
1774 */
1775static int liquidio_open(struct net_device *netdev)
1776{
1777        struct lio *lio = GET_LIO(netdev);
1778        struct octeon_device *oct = lio->oct_dev;
1779        struct octeon_device_priv *oct_priv =
1780                (struct octeon_device_priv *)oct->priv;
1781        struct napi_struct *napi, *n;
1782        int ret = 0;
1783
1784        if (oct->props[lio->ifidx].napi_enabled == 0) {
1785                tasklet_disable(&oct_priv->droq_tasklet);
1786
1787                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1788                        napi_enable(napi);
1789
1790                oct->props[lio->ifidx].napi_enabled = 1;
1791
1792                if (OCTEON_CN23XX_PF(oct))
1793                        oct->droq[0]->ops.poll_mode = 1;
1794        }
1795
1796        if (oct->ptp_enable)
1797                oct_ptp_open(netdev);
1798
1799        ifstate_set(lio, LIO_IFSTATE_RUNNING);
1800
1801        if (OCTEON_CN23XX_PF(oct)) {
1802                if (!oct->msix_on)
1803                        if (setup_tx_poll_fn(netdev))
1804                                return -1;
1805        } else {
1806                if (setup_tx_poll_fn(netdev))
1807                        return -1;
1808        }
1809
1810        netif_tx_start_all_queues(netdev);
1811
1812        /* Ready for link status updates */
1813        lio->intf_open = 1;
1814
1815        netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1816
1817        /* tell Octeon to start forwarding packets to host */
1818        ret = send_rx_ctrl_cmd(lio, 1);
1819        if (ret)
1820                return ret;
1821
1822        /* start periodical statistics fetch */
1823        INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1824        lio->stats_wk.ctxptr = lio;
1825        schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1826                                        (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1827
1828        dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1829                 netdev->name);
1830
1831        return ret;
1832}
1833
1834/**
1835 * liquidio_stop - Net device stop for LiquidIO
1836 * @netdev: network device
1837 */
1838static int liquidio_stop(struct net_device *netdev)
1839{
1840        struct lio *lio = GET_LIO(netdev);
1841        struct octeon_device *oct = lio->oct_dev;
1842        struct octeon_device_priv *oct_priv =
1843                (struct octeon_device_priv *)oct->priv;
1844        struct napi_struct *napi, *n;
1845        int ret = 0;
1846
1847        ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1848
1849        /* Stop any link updates */
1850        lio->intf_open = 0;
1851
1852        stop_txqs(netdev);
1853
1854        /* Inform that netif carrier is down */
1855        netif_carrier_off(netdev);
1856        netif_tx_disable(netdev);
1857
1858        lio->linfo.link.s.link_up = 0;
1859        lio->link_changes++;
1860
1861        /* Tell Octeon that nic interface is down. */
1862        ret = send_rx_ctrl_cmd(lio, 0);
1863        if (ret)
1864                return ret;
1865
1866        if (OCTEON_CN23XX_PF(oct)) {
1867                if (!oct->msix_on)
1868                        cleanup_tx_poll_fn(netdev);
1869        } else {
1870                cleanup_tx_poll_fn(netdev);
1871        }
1872
1873        cancel_delayed_work_sync(&lio->stats_wk.work);
1874
1875        if (lio->ptp_clock) {
1876                ptp_clock_unregister(lio->ptp_clock);
1877                lio->ptp_clock = NULL;
1878        }
1879
1880        /* Wait for any pending Rx descriptors */
1881        if (lio_wait_for_clean_oq(oct))
1882                netif_info(lio, rx_err, lio->netdev,
1883                           "Proceeding with stop interface after partial RX desc processing\n");
1884
1885        if (oct->props[lio->ifidx].napi_enabled == 1) {
1886                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1887                        napi_disable(napi);
1888
1889                oct->props[lio->ifidx].napi_enabled = 0;
1890
1891                if (OCTEON_CN23XX_PF(oct))
1892                        oct->droq[0]->ops.poll_mode = 0;
1893
1894                tasklet_enable(&oct_priv->droq_tasklet);
1895        }
1896
1897        dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1898
1899        return ret;
1900}
1901
1902/**
1903 * get_new_flags - Converts a mask based on net device flags
1904 * @netdev: network device
1905 *
1906 * This routine generates a octnet_ifflags mask from the net device flags
1907 * received from the OS.
1908 */
1909static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1910{
1911        enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1912
1913        if (netdev->flags & IFF_PROMISC)
1914                f |= OCTNET_IFFLAG_PROMISC;
1915
1916        if (netdev->flags & IFF_ALLMULTI)
1917                f |= OCTNET_IFFLAG_ALLMULTI;
1918
1919        if (netdev->flags & IFF_MULTICAST) {
1920                f |= OCTNET_IFFLAG_MULTICAST;
1921
1922                /* Accept all multicast addresses if there are more than we
1923                 * can handle
1924                 */
1925                if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1926                        f |= OCTNET_IFFLAG_ALLMULTI;
1927        }
1928
1929        if (netdev->flags & IFF_BROADCAST)
1930                f |= OCTNET_IFFLAG_BROADCAST;
1931
1932        return f;
1933}
1934
1935/**
1936 * liquidio_set_mcast_list - Net device set_multicast_list
1937 * @netdev: network device
1938 */
1939static void liquidio_set_mcast_list(struct net_device *netdev)
1940{
1941        struct lio *lio = GET_LIO(netdev);
1942        struct octeon_device *oct = lio->oct_dev;
1943        struct octnic_ctrl_pkt nctrl;
1944        struct netdev_hw_addr *ha;
1945        u64 *mc;
1946        int ret;
1947        int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1948
1949        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1950
1951        /* Create a ctrl pkt command to be sent to core app. */
1952        nctrl.ncmd.u64 = 0;
1953        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1954        nctrl.ncmd.s.param1 = get_new_flags(netdev);
1955        nctrl.ncmd.s.param2 = mc_count;
1956        nctrl.ncmd.s.more = mc_count;
1957        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1958        nctrl.netpndev = (u64)netdev;
1959        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1960
1961        /* copy all the addresses into the udd */
1962        mc = &nctrl.udd[0];
1963        netdev_for_each_mc_addr(ha, netdev) {
1964                *mc = 0;
1965                memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1966                /* no need to swap bytes */
1967
1968                if (++mc > &nctrl.udd[mc_count])
1969                        break;
1970        }
1971
1972        /* Apparently, any activity in this call from the kernel has to
1973         * be atomic. So we won't wait for response.
1974         */
1975
1976        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1977        if (ret) {
1978                dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1979                        ret);
1980        }
1981}
1982
1983/**
1984 * liquidio_set_mac - Net device set_mac_address
1985 * @netdev: network device
1986 * @p: pointer to sockaddr
1987 */
1988static int liquidio_set_mac(struct net_device *netdev, void *p)
1989{
1990        int ret = 0;
1991        struct lio *lio = GET_LIO(netdev);
1992        struct octeon_device *oct = lio->oct_dev;
1993        struct sockaddr *addr = (struct sockaddr *)p;
1994        struct octnic_ctrl_pkt nctrl;
1995
1996        if (!is_valid_ether_addr(addr->sa_data))
1997                return -EADDRNOTAVAIL;
1998
1999        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2000
2001        nctrl.ncmd.u64 = 0;
2002        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2003        nctrl.ncmd.s.param1 = 0;
2004        nctrl.ncmd.s.more = 1;
2005        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2006        nctrl.netpndev = (u64)netdev;
2007
2008        nctrl.udd[0] = 0;
2009        /* The MAC Address is presented in network byte order. */
2010        memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2011
2012        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2013        if (ret < 0) {
2014                dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2015                return -ENOMEM;
2016        }
2017
2018        if (nctrl.sc_status) {
2019                dev_err(&oct->pci_dev->dev,
2020                        "%s: MAC Address change failed. sc return=%x\n",
2021                         __func__, nctrl.sc_status);
2022                return -EIO;
2023        }
2024
2025        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2026        memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2027
2028        return 0;
2029}
2030
2031static void
2032liquidio_get_stats64(struct net_device *netdev,
2033                     struct rtnl_link_stats64 *lstats)
2034{
2035        struct lio *lio = GET_LIO(netdev);
2036        struct octeon_device *oct;
2037        u64 pkts = 0, drop = 0, bytes = 0;
2038        struct oct_droq_stats *oq_stats;
2039        struct oct_iq_stats *iq_stats;
2040        int i, iq_no, oq_no;
2041
2042        oct = lio->oct_dev;
2043
2044        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2045                return;
2046
2047        for (i = 0; i < oct->num_iqs; i++) {
2048                iq_no = lio->linfo.txpciq[i].s.q_no;
2049                iq_stats = &oct->instr_queue[iq_no]->stats;
2050                pkts += iq_stats->tx_done;
2051                drop += iq_stats->tx_dropped;
2052                bytes += iq_stats->tx_tot_bytes;
2053        }
2054
2055        lstats->tx_packets = pkts;
2056        lstats->tx_bytes = bytes;
2057        lstats->tx_dropped = drop;
2058
2059        pkts = 0;
2060        drop = 0;
2061        bytes = 0;
2062
2063        for (i = 0; i < oct->num_oqs; i++) {
2064                oq_no = lio->linfo.rxpciq[i].s.q_no;
2065                oq_stats = &oct->droq[oq_no]->stats;
2066                pkts += oq_stats->rx_pkts_received;
2067                drop += (oq_stats->rx_dropped +
2068                         oq_stats->dropped_nodispatch +
2069                         oq_stats->dropped_toomany +
2070                         oq_stats->dropped_nomem);
2071                bytes += oq_stats->rx_bytes_received;
2072        }
2073
2074        lstats->rx_bytes = bytes;
2075        lstats->rx_packets = pkts;
2076        lstats->rx_dropped = drop;
2077
2078        lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2079        lstats->collisions = oct->link_stats.fromhost.total_collisions;
2080
2081        /* detailed rx_errors: */
2082        lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2083        /* recved pkt with crc error    */
2084        lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2085        /* recv'd frame alignment error */
2086        lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2087        /* recv'r fifo overrun */
2088        lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2089
2090        lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2091                lstats->rx_frame_errors + lstats->rx_fifo_errors;
2092
2093        /* detailed tx_errors */
2094        lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2095        lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2096        lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2097
2098        lstats->tx_errors = lstats->tx_aborted_errors +
2099                lstats->tx_carrier_errors +
2100                lstats->tx_fifo_errors;
2101}
2102
2103/**
2104 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2105 * @netdev: network device
2106 * @ifr: interface request
2107 */
2108static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2109{
2110        struct hwtstamp_config conf;
2111        struct lio *lio = GET_LIO(netdev);
2112
2113        if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2114                return -EFAULT;
2115
2116        if (conf.flags)
2117                return -EINVAL;
2118
2119        switch (conf.tx_type) {
2120        case HWTSTAMP_TX_ON:
2121        case HWTSTAMP_TX_OFF:
2122                break;
2123        default:
2124                return -ERANGE;
2125        }
2126
2127        switch (conf.rx_filter) {
2128        case HWTSTAMP_FILTER_NONE:
2129                break;
2130        case HWTSTAMP_FILTER_ALL:
2131        case HWTSTAMP_FILTER_SOME:
2132        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2133        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2134        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2135        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2136        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2137        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2138        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2139        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2140        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2141        case HWTSTAMP_FILTER_PTP_V2_EVENT:
2142        case HWTSTAMP_FILTER_PTP_V2_SYNC:
2143        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2144        case HWTSTAMP_FILTER_NTP_ALL:
2145                conf.rx_filter = HWTSTAMP_FILTER_ALL;
2146                break;
2147        default:
2148                return -ERANGE;
2149        }
2150
2151        if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2152                ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2153
2154        else
2155                ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2156
2157        return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2158}
2159
2160/**
2161 * liquidio_ioctl - ioctl handler
2162 * @netdev: network device
2163 * @ifr: interface request
2164 * @cmd: command
2165 */
2166static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2167{
2168        struct lio *lio = GET_LIO(netdev);
2169
2170        switch (cmd) {
2171        case SIOCSHWTSTAMP:
2172                if (lio->oct_dev->ptp_enable)
2173                        return hwtstamp_ioctl(netdev, ifr);
2174                fallthrough;
2175        default:
2176                return -EOPNOTSUPP;
2177        }
2178}
2179
2180/**
2181 * handle_timestamp - handle a Tx timestamp response
2182 * @oct: octeon device
2183 * @status: response status
2184 * @buf: pointer to skb
2185 */
2186static void handle_timestamp(struct octeon_device *oct,
2187                             u32 status,
2188                             void *buf)
2189{
2190        struct octnet_buf_free_info *finfo;
2191        struct octeon_soft_command *sc;
2192        struct oct_timestamp_resp *resp;
2193        struct lio *lio;
2194        struct sk_buff *skb = (struct sk_buff *)buf;
2195
2196        finfo = (struct octnet_buf_free_info *)skb->cb;
2197        lio = finfo->lio;
2198        sc = finfo->sc;
2199        oct = lio->oct_dev;
2200        resp = (struct oct_timestamp_resp *)sc->virtrptr;
2201
2202        if (status != OCTEON_REQUEST_DONE) {
2203                dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2204                        CVM_CAST64(status));
2205                resp->timestamp = 0;
2206        }
2207
2208        octeon_swap_8B_data(&resp->timestamp, 1);
2209
2210        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2211                struct skb_shared_hwtstamps ts;
2212                u64 ns = resp->timestamp;
2213
2214                netif_info(lio, tx_done, lio->netdev,
2215                           "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2216                           skb, (unsigned long long)ns);
2217                ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2218                skb_tstamp_tx(skb, &ts);
2219        }
2220
2221        octeon_free_soft_command(oct, sc);
2222        tx_buffer_free(skb);
2223}
2224
2225/**
2226 * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2227 * @oct: octeon device
2228 * @ndata: pointer to network data
2229 * @finfo: pointer to private network data
2230 * @xmit_more: more is coming
2231 */
2232static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2233                                         struct octnic_data_pkt *ndata,
2234                                         struct octnet_buf_free_info *finfo,
2235                                         int xmit_more)
2236{
2237        int retval;
2238        struct octeon_soft_command *sc;
2239        struct lio *lio;
2240        int ring_doorbell;
2241        u32 len;
2242
2243        lio = finfo->lio;
2244
2245        sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2246                                            sizeof(struct oct_timestamp_resp));
2247        finfo->sc = sc;
2248
2249        if (!sc) {
2250                dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2251                return IQ_SEND_FAILED;
2252        }
2253
2254        if (ndata->reqtype == REQTYPE_NORESP_NET)
2255                ndata->reqtype = REQTYPE_RESP_NET;
2256        else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2257                ndata->reqtype = REQTYPE_RESP_NET_SG;
2258
2259        sc->callback = handle_timestamp;
2260        sc->callback_arg = finfo->skb;
2261        sc->iq_no = ndata->q_no;
2262
2263        if (OCTEON_CN23XX_PF(oct))
2264                len = (u32)((struct octeon_instr_ih3 *)
2265                            (&sc->cmd.cmd3.ih3))->dlengsz;
2266        else
2267                len = (u32)((struct octeon_instr_ih2 *)
2268                            (&sc->cmd.cmd2.ih2))->dlengsz;
2269
2270        ring_doorbell = !xmit_more;
2271
2272        retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2273                                     sc, len, ndata->reqtype);
2274
2275        if (retval == IQ_SEND_FAILED) {
2276                dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2277                        retval);
2278                octeon_free_soft_command(oct, sc);
2279        } else {
2280                netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2281        }
2282
2283        return retval;
2284}
2285
2286/**
2287 * liquidio_xmit - Transmit networks packets to the Octeon interface
2288 * @skb: skbuff struct to be passed to network layer.
2289 * @netdev: pointer to network device
2290 *
2291 * Return: whether the packet was transmitted to the device okay or not
2292 *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2293 */
2294static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2295{
2296        struct lio *lio;
2297        struct octnet_buf_free_info *finfo;
2298        union octnic_cmd_setup cmdsetup;
2299        struct octnic_data_pkt ndata;
2300        struct octeon_device *oct;
2301        struct oct_iq_stats *stats;
2302        struct octeon_instr_irh *irh;
2303        union tx_info *tx_info;
2304        int status = 0;
2305        int q_idx = 0, iq_no = 0;
2306        int j, xmit_more = 0;
2307        u64 dptr = 0;
2308        u32 tag = 0;
2309
2310        lio = GET_LIO(netdev);
2311        oct = lio->oct_dev;
2312
2313        q_idx = skb_iq(oct, skb);
2314        tag = q_idx;
2315        iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2316
2317        stats = &oct->instr_queue[iq_no]->stats;
2318
2319        /* Check for all conditions in which the current packet cannot be
2320         * transmitted.
2321         */
2322        if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2323            (!lio->linfo.link.s.link_up) ||
2324            (skb->len <= 0)) {
2325                netif_info(lio, tx_err, lio->netdev,
2326                           "Transmit failed link_status : %d\n",
2327                           lio->linfo.link.s.link_up);
2328                goto lio_xmit_failed;
2329        }
2330
2331        /* Use space in skb->cb to store info used to unmap and
2332         * free the buffers.
2333         */
2334        finfo = (struct octnet_buf_free_info *)skb->cb;
2335        finfo->lio = lio;
2336        finfo->skb = skb;
2337        finfo->sc = NULL;
2338
2339        /* Prepare the attributes for the data to be passed to OSI. */
2340        memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2341
2342        ndata.buf = (void *)finfo;
2343
2344        ndata.q_no = iq_no;
2345
2346        if (octnet_iq_is_full(oct, ndata.q_no)) {
2347                /* defer sending if queue is full */
2348                netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2349                           ndata.q_no);
2350                stats->tx_iq_busy++;
2351                return NETDEV_TX_BUSY;
2352        }
2353
2354        /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2355         *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2356         */
2357
2358        ndata.datasize = skb->len;
2359
2360        cmdsetup.u64 = 0;
2361        cmdsetup.s.iq_no = iq_no;
2362
2363        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2364                if (skb->encapsulation) {
2365                        cmdsetup.s.tnl_csum = 1;
2366                        stats->tx_vxlan++;
2367                } else {
2368                        cmdsetup.s.transport_csum = 1;
2369                }
2370        }
2371        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2372                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2373                cmdsetup.s.timestamp = 1;
2374        }
2375
2376        if (skb_shinfo(skb)->nr_frags == 0) {
2377                cmdsetup.s.u.datasize = skb->len;
2378                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2379
2380                /* Offload checksum calculation for TCP/UDP packets */
2381                dptr = dma_map_single(&oct->pci_dev->dev,
2382                                      skb->data,
2383                                      skb->len,
2384                                      DMA_TO_DEVICE);
2385                if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2386                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2387                                __func__);
2388                        stats->tx_dmamap_fail++;
2389                        return NETDEV_TX_BUSY;
2390                }
2391
2392                if (OCTEON_CN23XX_PF(oct))
2393                        ndata.cmd.cmd3.dptr = dptr;
2394                else
2395                        ndata.cmd.cmd2.dptr = dptr;
2396                finfo->dptr = dptr;
2397                ndata.reqtype = REQTYPE_NORESP_NET;
2398
2399        } else {
2400                int i, frags;
2401                skb_frag_t *frag;
2402                struct octnic_gather *g;
2403
2404                spin_lock(&lio->glist_lock[q_idx]);
2405                g = (struct octnic_gather *)
2406                        lio_list_delete_head(&lio->glist[q_idx]);
2407                spin_unlock(&lio->glist_lock[q_idx]);
2408
2409                if (!g) {
2410                        netif_info(lio, tx_err, lio->netdev,
2411                                   "Transmit scatter gather: glist null!\n");
2412                        goto lio_xmit_failed;
2413                }
2414
2415                cmdsetup.s.gather = 1;
2416                cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2417                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2418
2419                memset(g->sg, 0, g->sg_size);
2420
2421                g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2422                                                 skb->data,
2423                                                 (skb->len - skb->data_len),
2424                                                 DMA_TO_DEVICE);
2425                if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2426                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2427                                __func__);
2428                        stats->tx_dmamap_fail++;
2429                        return NETDEV_TX_BUSY;
2430                }
2431                add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2432
2433                frags = skb_shinfo(skb)->nr_frags;
2434                i = 1;
2435                while (frags--) {
2436                        frag = &skb_shinfo(skb)->frags[i - 1];
2437
2438                        g->sg[(i >> 2)].ptr[(i & 3)] =
2439                                skb_frag_dma_map(&oct->pci_dev->dev,
2440                                                 frag, 0, skb_frag_size(frag),
2441                                                 DMA_TO_DEVICE);
2442
2443                        if (dma_mapping_error(&oct->pci_dev->dev,
2444                                              g->sg[i >> 2].ptr[i & 3])) {
2445                                dma_unmap_single(&oct->pci_dev->dev,
2446                                                 g->sg[0].ptr[0],
2447                                                 skb->len - skb->data_len,
2448                                                 DMA_TO_DEVICE);
2449                                for (j = 1; j < i; j++) {
2450                                        frag = &skb_shinfo(skb)->frags[j - 1];
2451                                        dma_unmap_page(&oct->pci_dev->dev,
2452                                                       g->sg[j >> 2].ptr[j & 3],
2453                                                       skb_frag_size(frag),
2454                                                       DMA_TO_DEVICE);
2455                                }
2456                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2457                                        __func__);
2458                                return NETDEV_TX_BUSY;
2459                        }
2460
2461                        add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2462                                    (i & 3));
2463                        i++;
2464                }
2465
2466                dptr = g->sg_dma_ptr;
2467
2468                if (OCTEON_CN23XX_PF(oct))
2469                        ndata.cmd.cmd3.dptr = dptr;
2470                else
2471                        ndata.cmd.cmd2.dptr = dptr;
2472                finfo->dptr = dptr;
2473                finfo->g = g;
2474
2475                ndata.reqtype = REQTYPE_NORESP_NET_SG;
2476        }
2477
2478        if (OCTEON_CN23XX_PF(oct)) {
2479                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2480                tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2481        } else {
2482                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2483                tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2484        }
2485
2486        if (skb_shinfo(skb)->gso_size) {
2487                tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2488                tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2489                stats->tx_gso++;
2490        }
2491
2492        /* HW insert VLAN tag */
2493        if (skb_vlan_tag_present(skb)) {
2494                irh->priority = skb_vlan_tag_get(skb) >> 13;
2495                irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2496        }
2497
2498        xmit_more = netdev_xmit_more();
2499
2500        if (unlikely(cmdsetup.s.timestamp))
2501                status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2502        else
2503                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2504        if (status == IQ_SEND_FAILED)
2505                goto lio_xmit_failed;
2506
2507        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2508
2509        if (status == IQ_SEND_STOP)
2510                netif_stop_subqueue(netdev, q_idx);
2511
2512        netif_trans_update(netdev);
2513
2514        if (tx_info->s.gso_segs)
2515                stats->tx_done += tx_info->s.gso_segs;
2516        else
2517                stats->tx_done++;
2518        stats->tx_tot_bytes += ndata.datasize;
2519
2520        return NETDEV_TX_OK;
2521
2522lio_xmit_failed:
2523        stats->tx_dropped++;
2524        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2525                   iq_no, stats->tx_dropped);
2526        if (dptr)
2527                dma_unmap_single(&oct->pci_dev->dev, dptr,
2528                                 ndata.datasize, DMA_TO_DEVICE);
2529
2530        octeon_ring_doorbell_locked(oct, iq_no);
2531
2532        tx_buffer_free(skb);
2533        return NETDEV_TX_OK;
2534}
2535
2536/**
2537 * liquidio_tx_timeout - Network device Tx timeout
2538 * @netdev:    pointer to network device
2539 * @txqueue: index of the hung transmit queue
2540 */
2541static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2542{
2543        struct lio *lio;
2544
2545        lio = GET_LIO(netdev);
2546
2547        netif_info(lio, tx_err, lio->netdev,
2548                   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2549                   netdev->stats.tx_dropped);
2550        netif_trans_update(netdev);
2551        wake_txqs(netdev);
2552}
2553
2554static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2555                                    __be16 proto __attribute__((unused)),
2556                                    u16 vid)
2557{
2558        struct lio *lio = GET_LIO(netdev);
2559        struct octeon_device *oct = lio->oct_dev;
2560        struct octnic_ctrl_pkt nctrl;
2561        int ret = 0;
2562
2563        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2564
2565        nctrl.ncmd.u64 = 0;
2566        nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2567        nctrl.ncmd.s.param1 = vid;
2568        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2569        nctrl.netpndev = (u64)netdev;
2570        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2571
2572        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2573        if (ret) {
2574                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2575                        ret);
2576                if (ret > 0)
2577                        ret = -EIO;
2578        }
2579
2580        return ret;
2581}
2582
2583static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2584                                     __be16 proto __attribute__((unused)),
2585                                     u16 vid)
2586{
2587        struct lio *lio = GET_LIO(netdev);
2588        struct octeon_device *oct = lio->oct_dev;
2589        struct octnic_ctrl_pkt nctrl;
2590        int ret = 0;
2591
2592        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2593
2594        nctrl.ncmd.u64 = 0;
2595        nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2596        nctrl.ncmd.s.param1 = vid;
2597        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2598        nctrl.netpndev = (u64)netdev;
2599        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2600
2601        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2602        if (ret) {
2603                dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2604                        ret);
2605                if (ret > 0)
2606                        ret = -EIO;
2607        }
2608        return ret;
2609}
2610
2611/**
2612 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2613 * @netdev:                pointer to network device
2614 * @command:               OCTNET_CMD_TNL_RX_CSUM_CTL
2615 * @rx_cmd:                OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2616 * Returns:                SUCCESS or FAILURE
2617 */
2618static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2619                                       u8 rx_cmd)
2620{
2621        struct lio *lio = GET_LIO(netdev);
2622        struct octeon_device *oct = lio->oct_dev;
2623        struct octnic_ctrl_pkt nctrl;
2624        int ret = 0;
2625
2626        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2627
2628        nctrl.ncmd.u64 = 0;
2629        nctrl.ncmd.s.cmd = command;
2630        nctrl.ncmd.s.param1 = rx_cmd;
2631        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2632        nctrl.netpndev = (u64)netdev;
2633        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2634
2635        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2636        if (ret) {
2637                dev_err(&oct->pci_dev->dev,
2638                        "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2639                        ret);
2640                if (ret > 0)
2641                        ret = -EIO;
2642        }
2643        return ret;
2644}
2645
2646/**
2647 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2648 * @netdev:                pointer to network device
2649 * @command:               OCTNET_CMD_VXLAN_PORT_CONFIG
2650 * @vxlan_port:            VxLAN port to be added or deleted
2651 * @vxlan_cmd_bit:         OCTNET_CMD_VXLAN_PORT_ADD,
2652 *                              OCTNET_CMD_VXLAN_PORT_DEL
2653 * Return:                     SUCCESS or FAILURE
2654 */
2655static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2656                                       u16 vxlan_port, u8 vxlan_cmd_bit)
2657{
2658        struct lio *lio = GET_LIO(netdev);
2659        struct octeon_device *oct = lio->oct_dev;
2660        struct octnic_ctrl_pkt nctrl;
2661        int ret = 0;
2662
2663        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2664
2665        nctrl.ncmd.u64 = 0;
2666        nctrl.ncmd.s.cmd = command;
2667        nctrl.ncmd.s.more = vxlan_cmd_bit;
2668        nctrl.ncmd.s.param1 = vxlan_port;
2669        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2670        nctrl.netpndev = (u64)netdev;
2671        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2672
2673        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2674        if (ret) {
2675                dev_err(&oct->pci_dev->dev,
2676                        "VxLAN port add/delete failed in core (ret:0x%x)\n",
2677                        ret);
2678                if (ret > 0)
2679                        ret = -EIO;
2680        }
2681        return ret;
2682}
2683
2684static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2685                                        unsigned int table, unsigned int entry,
2686                                        struct udp_tunnel_info *ti)
2687{
2688        return liquidio_vxlan_port_command(netdev,
2689                                           OCTNET_CMD_VXLAN_PORT_CONFIG,
2690                                           htons(ti->port),
2691                                           OCTNET_CMD_VXLAN_PORT_ADD);
2692}
2693
2694static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2695                                          unsigned int table,
2696                                          unsigned int entry,
2697                                          struct udp_tunnel_info *ti)
2698{
2699        return liquidio_vxlan_port_command(netdev,
2700                                           OCTNET_CMD_VXLAN_PORT_CONFIG,
2701                                           htons(ti->port),
2702                                           OCTNET_CMD_VXLAN_PORT_DEL);
2703}
2704
2705static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2706        .set_port       = liquidio_udp_tunnel_set_port,
2707        .unset_port     = liquidio_udp_tunnel_unset_port,
2708        .tables         = {
2709                { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2710        },
2711};
2712
2713/**
2714 * liquidio_fix_features - Net device fix features
2715 * @netdev:  pointer to network device
2716 * @request: features requested
2717 * Return: updated features list
2718 */
2719static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2720                                               netdev_features_t request)
2721{
2722        struct lio *lio = netdev_priv(netdev);
2723
2724        if ((request & NETIF_F_RXCSUM) &&
2725            !(lio->dev_capability & NETIF_F_RXCSUM))
2726                request &= ~NETIF_F_RXCSUM;
2727
2728        if ((request & NETIF_F_HW_CSUM) &&
2729            !(lio->dev_capability & NETIF_F_HW_CSUM))
2730                request &= ~NETIF_F_HW_CSUM;
2731
2732        if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2733                request &= ~NETIF_F_TSO;
2734
2735        if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2736                request &= ~NETIF_F_TSO6;
2737
2738        if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2739                request &= ~NETIF_F_LRO;
2740
2741        /*Disable LRO if RXCSUM is off */
2742        if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2743            (lio->dev_capability & NETIF_F_LRO))
2744                request &= ~NETIF_F_LRO;
2745
2746        if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2747            !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2748                request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2749
2750        return request;
2751}
2752
2753/**
2754 * liquidio_set_features - Net device set features
2755 * @netdev:  pointer to network device
2756 * @features: features to enable/disable
2757 */
2758static int liquidio_set_features(struct net_device *netdev,
2759                                 netdev_features_t features)
2760{
2761        struct lio *lio = netdev_priv(netdev);
2762
2763        if ((features & NETIF_F_LRO) &&
2764            (lio->dev_capability & NETIF_F_LRO) &&
2765            !(netdev->features & NETIF_F_LRO))
2766                liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2767                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2768        else if (!(features & NETIF_F_LRO) &&
2769                 (lio->dev_capability & NETIF_F_LRO) &&
2770                 (netdev->features & NETIF_F_LRO))
2771                liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2772                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2773
2774        /* Sending command to firmware to enable/disable RX checksum
2775         * offload settings using ethtool
2776         */
2777        if (!(netdev->features & NETIF_F_RXCSUM) &&
2778            (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2779            (features & NETIF_F_RXCSUM))
2780                liquidio_set_rxcsum_command(netdev,
2781                                            OCTNET_CMD_TNL_RX_CSUM_CTL,
2782                                            OCTNET_CMD_RXCSUM_ENABLE);
2783        else if ((netdev->features & NETIF_F_RXCSUM) &&
2784                 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2785                 !(features & NETIF_F_RXCSUM))
2786                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2787                                            OCTNET_CMD_RXCSUM_DISABLE);
2788
2789        if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2790            (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2791            !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2792                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2793                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
2794        else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2795                 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2796                 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2797                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2798                                     OCTNET_CMD_VLAN_FILTER_DISABLE);
2799
2800        return 0;
2801}
2802
2803static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2804                                 u8 *mac, bool is_admin_assigned)
2805{
2806        struct lio *lio = GET_LIO(netdev);
2807        struct octeon_device *oct = lio->oct_dev;
2808        struct octnic_ctrl_pkt nctrl;
2809        int ret = 0;
2810
2811        if (!is_valid_ether_addr(mac))
2812                return -EINVAL;
2813
2814        if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2815                return -EINVAL;
2816
2817        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2818
2819        nctrl.ncmd.u64 = 0;
2820        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2821        /* vfidx is 0 based, but vf_num (param1) is 1 based */
2822        nctrl.ncmd.s.param1 = vfidx + 1;
2823        nctrl.ncmd.s.more = 1;
2824        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2825        nctrl.netpndev = (u64)netdev;
2826        if (is_admin_assigned) {
2827                nctrl.ncmd.s.param2 = true;
2828                nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2829        }
2830
2831        nctrl.udd[0] = 0;
2832        /* The MAC Address is presented in network byte order. */
2833        ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2834
2835        oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2836
2837        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2838        if (ret > 0)
2839                ret = -EIO;
2840
2841        return ret;
2842}
2843
2844static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2845{
2846        struct lio *lio = GET_LIO(netdev);
2847        struct octeon_device *oct = lio->oct_dev;
2848        int retval;
2849
2850        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2851                return -EINVAL;
2852
2853        retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2854        if (!retval)
2855                cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2856
2857        return retval;
2858}
2859
2860static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2861                                    bool enable)
2862{
2863        struct lio *lio = GET_LIO(netdev);
2864        struct octeon_device *oct = lio->oct_dev;
2865        struct octnic_ctrl_pkt nctrl;
2866        int retval;
2867
2868        if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2869                netif_info(lio, drv, lio->netdev,
2870                           "firmware does not support spoofchk\n");
2871                return -EOPNOTSUPP;
2872        }
2873
2874        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2875                netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2876                return -EINVAL;
2877        }
2878
2879        if (enable) {
2880                if (oct->sriov_info.vf_spoofchk[vfidx])
2881                        return 0;
2882        } else {
2883                /* Clear */
2884                if (!oct->sriov_info.vf_spoofchk[vfidx])
2885                        return 0;
2886        }
2887
2888        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2889        nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2890        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2891        nctrl.ncmd.s.param1 =
2892                vfidx + 1; /* vfidx is 0 based,
2893                            * but vf_num (param1) is 1 based
2894                            */
2895        nctrl.ncmd.s.param2 = enable;
2896        nctrl.ncmd.s.more = 0;
2897        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2898        nctrl.cb_fn = NULL;
2899
2900        retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2901
2902        if (retval) {
2903                netif_info(lio, drv, lio->netdev,
2904                           "Failed to set VF %d spoofchk %s\n", vfidx,
2905                        enable ? "on" : "off");
2906                return -1;
2907        }
2908
2909        oct->sriov_info.vf_spoofchk[vfidx] = enable;
2910        netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2911                   enable ? "on" : "off");
2912
2913        return 0;
2914}
2915
2916static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2917                                u16 vlan, u8 qos, __be16 vlan_proto)
2918{
2919        struct lio *lio = GET_LIO(netdev);
2920        struct octeon_device *oct = lio->oct_dev;
2921        struct octnic_ctrl_pkt nctrl;
2922        u16 vlantci;
2923        int ret = 0;
2924
2925        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2926                return -EINVAL;
2927
2928        if (vlan_proto != htons(ETH_P_8021Q))
2929                return -EPROTONOSUPPORT;
2930
2931        if (vlan >= VLAN_N_VID || qos > 7)
2932                return -EINVAL;
2933
2934        if (vlan)
2935                vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2936        else
2937                vlantci = 0;
2938
2939        if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2940                return 0;
2941
2942        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2943
2944        if (vlan)
2945                nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2946        else
2947                nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2948
2949        nctrl.ncmd.s.param1 = vlantci;
2950        nctrl.ncmd.s.param2 =
2951            vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2952        nctrl.ncmd.s.more = 0;
2953        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2954        nctrl.cb_fn = NULL;
2955
2956        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2957        if (ret) {
2958                if (ret > 0)
2959                        ret = -EIO;
2960                return ret;
2961        }
2962
2963        oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2964
2965        return ret;
2966}
2967
2968static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2969                                  struct ifla_vf_info *ivi)
2970{
2971        struct lio *lio = GET_LIO(netdev);
2972        struct octeon_device *oct = lio->oct_dev;
2973        u8 *macaddr;
2974
2975        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2976                return -EINVAL;
2977
2978        memset(ivi, 0, sizeof(struct ifla_vf_info));
2979
2980        ivi->vf = vfidx;
2981        macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2982        ether_addr_copy(&ivi->mac[0], macaddr);
2983        ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2984        ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2985        if (oct->sriov_info.trusted_vf.active &&
2986            oct->sriov_info.trusted_vf.id == vfidx)
2987                ivi->trusted = true;
2988        else
2989                ivi->trusted = false;
2990        ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2991        ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
2992        ivi->max_tx_rate = lio->linfo.link.s.speed;
2993        ivi->min_tx_rate = 0;
2994
2995        return 0;
2996}
2997
2998static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2999{
3000        struct octeon_device *oct = lio->oct_dev;
3001        struct octeon_soft_command *sc;
3002        int retval;
3003
3004        sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3005        if (!sc)
3006                return -ENOMEM;
3007
3008        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3009
3010        /* vfidx is 0 based, but vf_num (param1) is 1 based */
3011        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3012                                    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3013                                    trusted);
3014
3015        init_completion(&sc->complete);
3016        sc->sc_status = OCTEON_REQUEST_PENDING;
3017
3018        retval = octeon_send_soft_command(oct, sc);
3019        if (retval == IQ_SEND_FAILED) {
3020                octeon_free_soft_command(oct, sc);
3021                retval = -1;
3022        } else {
3023                /* Wait for response or timeout */
3024                retval = wait_for_sc_completion_timeout(oct, sc, 0);
3025                if (retval)
3026                        return (retval);
3027
3028                WRITE_ONCE(sc->caller_is_done, true);
3029        }
3030
3031        return retval;
3032}
3033
3034static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3035                                 bool setting)
3036{
3037        struct lio *lio = GET_LIO(netdev);
3038        struct octeon_device *oct = lio->oct_dev;
3039
3040        if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3041                /* trusted vf is not supported by firmware older than 1.7.1 */
3042                return -EOPNOTSUPP;
3043        }
3044
3045        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3046                netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3047                return -EINVAL;
3048        }
3049
3050        if (setting) {
3051                /* Set */
3052
3053                if (oct->sriov_info.trusted_vf.active &&
3054                    oct->sriov_info.trusted_vf.id == vfidx)
3055                        return 0;
3056
3057                if (oct->sriov_info.trusted_vf.active) {
3058                        netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3059                        return -EPERM;
3060                }
3061        } else {
3062                /* Clear */
3063
3064                if (!oct->sriov_info.trusted_vf.active)
3065                        return 0;
3066        }
3067
3068        if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3069                if (setting) {
3070                        oct->sriov_info.trusted_vf.id = vfidx;
3071                        oct->sriov_info.trusted_vf.active = true;
3072                } else {
3073                        oct->sriov_info.trusted_vf.active = false;
3074                }
3075
3076                netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3077                           setting ? "" : "not ");
3078        } else {
3079                netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3080                return -1;
3081        }
3082
3083        return 0;
3084}
3085
3086static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3087                                      int linkstate)
3088{
3089        struct lio *lio = GET_LIO(netdev);
3090        struct octeon_device *oct = lio->oct_dev;
3091        struct octnic_ctrl_pkt nctrl;
3092        int ret = 0;
3093
3094        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3095                return -EINVAL;
3096
3097        if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3098                return 0;
3099
3100        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3101        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3102        nctrl.ncmd.s.param1 =
3103            vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3104        nctrl.ncmd.s.param2 = linkstate;
3105        nctrl.ncmd.s.more = 0;
3106        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3107        nctrl.cb_fn = NULL;
3108
3109        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3110
3111        if (!ret)
3112                oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3113        else if (ret > 0)
3114                ret = -EIO;
3115
3116        return ret;
3117}
3118
3119static int
3120liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3121{
3122        struct lio_devlink_priv *priv;
3123        struct octeon_device *oct;
3124
3125        priv = devlink_priv(devlink);
3126        oct = priv->oct;
3127
3128        *mode = oct->eswitch_mode;
3129
3130        return 0;
3131}
3132
3133static int
3134liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3135                          struct netlink_ext_ack *extack)
3136{
3137        struct lio_devlink_priv *priv;
3138        struct octeon_device *oct;
3139        int ret = 0;
3140
3141        priv = devlink_priv(devlink);
3142        oct = priv->oct;
3143
3144        if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3145                return -EINVAL;
3146
3147        if (oct->eswitch_mode == mode)
3148                return 0;
3149
3150        switch (mode) {
3151        case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3152                oct->eswitch_mode = mode;
3153                ret = lio_vf_rep_create(oct);
3154                break;
3155
3156        case DEVLINK_ESWITCH_MODE_LEGACY:
3157                lio_vf_rep_destroy(oct);
3158                oct->eswitch_mode = mode;
3159                break;
3160
3161        default:
3162                ret = -EINVAL;
3163        }
3164
3165        return ret;
3166}
3167
3168static const struct devlink_ops liquidio_devlink_ops = {
3169        .eswitch_mode_get = liquidio_eswitch_mode_get,
3170        .eswitch_mode_set = liquidio_eswitch_mode_set,
3171};
3172
3173static int
3174liquidio_get_port_parent_id(struct net_device *dev,
3175                            struct netdev_phys_item_id *ppid)
3176{
3177        struct lio *lio = GET_LIO(dev);
3178        struct octeon_device *oct = lio->oct_dev;
3179
3180        if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3181                return -EOPNOTSUPP;
3182
3183        ppid->id_len = ETH_ALEN;
3184        ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3185
3186        return 0;
3187}
3188
3189static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3190                                 struct ifla_vf_stats *vf_stats)
3191{
3192        struct lio *lio = GET_LIO(netdev);
3193        struct octeon_device *oct = lio->oct_dev;
3194        struct oct_vf_stats stats;
3195        int ret;
3196
3197        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3198                return -EINVAL;
3199
3200        memset(&stats, 0, sizeof(struct oct_vf_stats));
3201        ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3202        if (!ret) {
3203                vf_stats->rx_packets = stats.rx_packets;
3204                vf_stats->tx_packets = stats.tx_packets;
3205                vf_stats->rx_bytes = stats.rx_bytes;
3206                vf_stats->tx_bytes = stats.tx_bytes;
3207                vf_stats->broadcast = stats.broadcast;
3208                vf_stats->multicast = stats.multicast;
3209        }
3210
3211        return ret;
3212}
3213
3214static const struct net_device_ops lionetdevops = {
3215        .ndo_open               = liquidio_open,
3216        .ndo_stop               = liquidio_stop,
3217        .ndo_start_xmit         = liquidio_xmit,
3218        .ndo_get_stats64        = liquidio_get_stats64,
3219        .ndo_set_mac_address    = liquidio_set_mac,
3220        .ndo_set_rx_mode        = liquidio_set_mcast_list,
3221        .ndo_tx_timeout         = liquidio_tx_timeout,
3222
3223        .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3224        .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3225        .ndo_change_mtu         = liquidio_change_mtu,
3226        .ndo_eth_ioctl          = liquidio_ioctl,
3227        .ndo_fix_features       = liquidio_fix_features,
3228        .ndo_set_features       = liquidio_set_features,
3229        .ndo_set_vf_mac         = liquidio_set_vf_mac,
3230        .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
3231        .ndo_get_vf_config      = liquidio_get_vf_config,
3232        .ndo_set_vf_spoofchk    = liquidio_set_vf_spoofchk,
3233        .ndo_set_vf_trust       = liquidio_set_vf_trust,
3234        .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3235        .ndo_get_vf_stats       = liquidio_get_vf_stats,
3236        .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3237};
3238
3239/**
3240 * liquidio_init - Entry point for the liquidio module
3241 */
3242static int __init liquidio_init(void)
3243{
3244        int i;
3245        struct handshake *hs;
3246
3247        init_completion(&first_stage);
3248
3249        octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3250
3251        if (liquidio_init_pci())
3252                return -EINVAL;
3253
3254        wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3255
3256        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3257                hs = &handshake[i];
3258                if (hs->pci_dev) {
3259                        wait_for_completion(&hs->init);
3260                        if (!hs->init_ok) {
3261                                /* init handshake failed */
3262                                dev_err(&hs->pci_dev->dev,
3263                                        "Failed to init device\n");
3264                                liquidio_deinit_pci();
3265                                return -EIO;
3266                        }
3267                }
3268        }
3269
3270        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3271                hs = &handshake[i];
3272                if (hs->pci_dev) {
3273                        wait_for_completion_timeout(&hs->started,
3274                                                    msecs_to_jiffies(30000));
3275                        if (!hs->started_ok) {
3276                                /* starter handshake failed */
3277                                dev_err(&hs->pci_dev->dev,
3278                                        "Firmware failed to start\n");
3279                                liquidio_deinit_pci();
3280                                return -EIO;
3281                        }
3282                }
3283        }
3284
3285        return 0;
3286}
3287
3288static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3289{
3290        struct octeon_device *oct = (struct octeon_device *)buf;
3291        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3292        int gmxport = 0;
3293        union oct_link_status *ls;
3294        int i;
3295
3296        if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3297                dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3298                        recv_pkt->buffer_size[0],
3299                        recv_pkt->rh.r_nic_info.gmxport);
3300                goto nic_info_err;
3301        }
3302
3303        gmxport = recv_pkt->rh.r_nic_info.gmxport;
3304        ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3305                OCT_DROQ_INFO_SIZE);
3306
3307        octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3308        for (i = 0; i < oct->ifcount; i++) {
3309                if (oct->props[i].gmxport == gmxport) {
3310                        update_link_status(oct->props[i].netdev, ls);
3311                        break;
3312                }
3313        }
3314
3315nic_info_err:
3316        for (i = 0; i < recv_pkt->buffer_count; i++)
3317                recv_buffer_free(recv_pkt->buffer_ptr[i]);
3318        octeon_free_recv_info(recv_info);
3319        return 0;
3320}
3321
3322/**
3323 * setup_nic_devices - Setup network interfaces
3324 * @octeon_dev:  octeon device
3325 *
3326 * Called during init time for each device. It assumes the NIC
3327 * is already up and running.  The link information for each
3328 * interface is passed in link_info.
3329 */
3330static int setup_nic_devices(struct octeon_device *octeon_dev)
3331{
3332        struct lio *lio = NULL;
3333        struct net_device *netdev;
3334        u8 mac[6], i, j, *fw_ver, *micro_ver;
3335        unsigned long micro;
3336        u32 cur_ver;
3337        struct octeon_soft_command *sc;
3338        struct liquidio_if_cfg_resp *resp;
3339        struct octdev_props *props;
3340        int retval, num_iqueues, num_oqueues;
3341        int max_num_queues = 0;
3342        union oct_nic_if_cfg if_cfg;
3343        unsigned int base_queue;
3344        unsigned int gmx_port_id;
3345        u32 resp_size, data_size;
3346        u32 ifidx_or_pfnum;
3347        struct lio_version *vdata;
3348        struct devlink *devlink;
3349        struct lio_devlink_priv *lio_devlink;
3350
3351        /* This is to handle link status changes */
3352        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3353                                    OPCODE_NIC_INFO,
3354                                    lio_nic_info, octeon_dev);
3355
3356        /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3357         * They are handled directly.
3358         */
3359        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3360                                        free_netbuf);
3361
3362        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3363                                        free_netsgbuf);
3364
3365        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3366                                        free_netsgbuf_with_resp);
3367
3368        for (i = 0; i < octeon_dev->ifcount; i++) {
3369                resp_size = sizeof(struct liquidio_if_cfg_resp);
3370                data_size = sizeof(struct lio_version);
3371                sc = (struct octeon_soft_command *)
3372                        octeon_alloc_soft_command(octeon_dev, data_size,
3373                                                  resp_size, 0);
3374                resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3375                vdata = (struct lio_version *)sc->virtdptr;
3376
3377                *((u64 *)vdata) = 0;
3378                vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3379                vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3380                vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3381
3382                if (OCTEON_CN23XX_PF(octeon_dev)) {
3383                        num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3384                        num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3385                        base_queue = octeon_dev->sriov_info.pf_srn;
3386
3387                        gmx_port_id = octeon_dev->pf_num;
3388                        ifidx_or_pfnum = octeon_dev->pf_num;
3389                } else {
3390                        num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3391                                                octeon_get_conf(octeon_dev), i);
3392                        num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3393                                                octeon_get_conf(octeon_dev), i);
3394                        base_queue = CFG_GET_BASE_QUE_NIC_IF(
3395                                                octeon_get_conf(octeon_dev), i);
3396                        gmx_port_id = CFG_GET_GMXID_NIC_IF(
3397                                                octeon_get_conf(octeon_dev), i);
3398                        ifidx_or_pfnum = i;
3399                }
3400
3401                dev_dbg(&octeon_dev->pci_dev->dev,
3402                        "requesting config for interface %d, iqs %d, oqs %d\n",
3403                        ifidx_or_pfnum, num_iqueues, num_oqueues);
3404
3405                if_cfg.u64 = 0;
3406                if_cfg.s.num_iqueues = num_iqueues;
3407                if_cfg.s.num_oqueues = num_oqueues;
3408                if_cfg.s.base_queue = base_queue;
3409                if_cfg.s.gmx_port_id = gmx_port_id;
3410
3411                sc->iq_no = 0;
3412
3413                octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3414                                            OPCODE_NIC_IF_CFG, 0,
3415                                            if_cfg.u64, 0);
3416
3417                init_completion(&sc->complete);
3418                sc->sc_status = OCTEON_REQUEST_PENDING;
3419
3420                retval = octeon_send_soft_command(octeon_dev, sc);
3421                if (retval == IQ_SEND_FAILED) {
3422                        dev_err(&octeon_dev->pci_dev->dev,
3423                                "iq/oq config failed status: %x\n",
3424                                retval);
3425                        /* Soft instr is freed by driver in case of failure. */
3426                        octeon_free_soft_command(octeon_dev, sc);
3427                        return(-EIO);
3428                }
3429
3430                /* Sleep on a wait queue till the cond flag indicates that the
3431                 * response arrived or timed-out.
3432                 */
3433                retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3434                if (retval)
3435                        return retval;
3436
3437                retval = resp->status;
3438                if (retval) {
3439                        dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3440                        WRITE_ONCE(sc->caller_is_done, true);
3441                        goto setup_nic_dev_done;
3442                }
3443                snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3444                         32, "%s",
3445                         resp->cfg_info.liquidio_firmware_version);
3446
3447                /* Verify f/w version (in case of 'auto' loading from flash) */
3448                fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3449                if (memcmp(LIQUIDIO_BASE_VERSION,
3450                           fw_ver,
3451                           strlen(LIQUIDIO_BASE_VERSION))) {
3452                        dev_err(&octeon_dev->pci_dev->dev,
3453                                "Unmatched firmware version. Expected %s.x, got %s.\n",
3454                                LIQUIDIO_BASE_VERSION, fw_ver);
3455                        WRITE_ONCE(sc->caller_is_done, true);
3456                        goto setup_nic_dev_done;
3457                } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3458                           FW_IS_PRELOADED) {
3459                        dev_info(&octeon_dev->pci_dev->dev,
3460                                 "Using auto-loaded firmware version %s.\n",
3461                                 fw_ver);
3462                }
3463
3464                /* extract micro version field; point past '<maj>.<min>.' */
3465                micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3466                if (kstrtoul(micro_ver, 10, &micro) != 0)
3467                        micro = 0;
3468                octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3469                octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3470                octeon_dev->fw_info.ver.rev = micro;
3471
3472                octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3473                                    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3474
3475                num_iqueues = hweight64(resp->cfg_info.iqmask);
3476                num_oqueues = hweight64(resp->cfg_info.oqmask);
3477
3478                if (!(num_iqueues) || !(num_oqueues)) {
3479                        dev_err(&octeon_dev->pci_dev->dev,
3480                                "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3481                                resp->cfg_info.iqmask,
3482                                resp->cfg_info.oqmask);
3483                        WRITE_ONCE(sc->caller_is_done, true);
3484                        goto setup_nic_dev_done;
3485                }
3486
3487                if (OCTEON_CN6XXX(octeon_dev)) {
3488                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3489                                                                    cn6xxx));
3490                } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3491                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3492                                                                    cn23xx_pf));
3493                }
3494
3495                dev_dbg(&octeon_dev->pci_dev->dev,
3496                        "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3497                        i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3498                        num_iqueues, num_oqueues, max_num_queues);
3499                netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3500
3501                if (!netdev) {
3502                        dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3503                        WRITE_ONCE(sc->caller_is_done, true);
3504                        goto setup_nic_dev_done;
3505                }
3506
3507                SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3508
3509                /* Associate the routines that will handle different
3510                 * netdev tasks.
3511                 */
3512                netdev->netdev_ops = &lionetdevops;
3513
3514                retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3515                if (retval) {
3516                        dev_err(&octeon_dev->pci_dev->dev,
3517                                "setting real number rx failed\n");
3518                        WRITE_ONCE(sc->caller_is_done, true);
3519                        goto setup_nic_dev_free;
3520                }
3521
3522                retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3523                if (retval) {
3524                        dev_err(&octeon_dev->pci_dev->dev,
3525                                "setting real number tx failed\n");
3526                        WRITE_ONCE(sc->caller_is_done, true);
3527                        goto setup_nic_dev_free;
3528                }
3529
3530                lio = GET_LIO(netdev);
3531
3532                memset(lio, 0, sizeof(struct lio));
3533
3534                lio->ifidx = ifidx_or_pfnum;
3535
3536                props = &octeon_dev->props[i];
3537                props->gmxport = resp->cfg_info.linfo.gmxport;
3538                props->netdev = netdev;
3539
3540                lio->linfo.num_rxpciq = num_oqueues;
3541                lio->linfo.num_txpciq = num_iqueues;
3542                for (j = 0; j < num_oqueues; j++) {
3543                        lio->linfo.rxpciq[j].u64 =
3544                                resp->cfg_info.linfo.rxpciq[j].u64;
3545                }
3546                for (j = 0; j < num_iqueues; j++) {
3547                        lio->linfo.txpciq[j].u64 =
3548                                resp->cfg_info.linfo.txpciq[j].u64;
3549                }
3550                lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3551                lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3552                lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3553
3554                WRITE_ONCE(sc->caller_is_done, true);
3555
3556                lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3557
3558                if (OCTEON_CN23XX_PF(octeon_dev) ||
3559                    OCTEON_CN6XXX(octeon_dev)) {
3560                        lio->dev_capability = NETIF_F_HIGHDMA
3561                                              | NETIF_F_IP_CSUM
3562                                              | NETIF_F_IPV6_CSUM
3563                                              | NETIF_F_SG | NETIF_F_RXCSUM
3564                                              | NETIF_F_GRO
3565                                              | NETIF_F_TSO | NETIF_F_TSO6
3566                                              | NETIF_F_LRO;
3567                }
3568                netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3569
3570                /*  Copy of transmit encapsulation capabilities:
3571                 *  TSO, TSO6, Checksums for this device
3572                 */
3573                lio->enc_dev_capability = NETIF_F_IP_CSUM
3574                                          | NETIF_F_IPV6_CSUM
3575                                          | NETIF_F_GSO_UDP_TUNNEL
3576                                          | NETIF_F_HW_CSUM | NETIF_F_SG
3577                                          | NETIF_F_RXCSUM
3578                                          | NETIF_F_TSO | NETIF_F_TSO6
3579                                          | NETIF_F_LRO;
3580
3581                netdev->hw_enc_features = (lio->enc_dev_capability &
3582                                           ~NETIF_F_LRO);
3583
3584                netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3585
3586                lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3587
3588                netdev->vlan_features = lio->dev_capability;
3589                /* Add any unchangeable hw features */
3590                lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3591                                        NETIF_F_HW_VLAN_CTAG_RX |
3592                                        NETIF_F_HW_VLAN_CTAG_TX;
3593
3594                netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3595
3596                netdev->hw_features = lio->dev_capability;
3597                /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3598                netdev->hw_features = netdev->hw_features &
3599                        ~NETIF_F_HW_VLAN_CTAG_RX;
3600
3601                /* MTU range: 68 - 16000 */
3602                netdev->min_mtu = LIO_MIN_MTU_SIZE;
3603                netdev->max_mtu = LIO_MAX_MTU_SIZE;
3604
3605                /* Point to the  properties for octeon device to which this
3606                 * interface belongs.
3607                 */
3608                lio->oct_dev = octeon_dev;
3609                lio->octprops = props;
3610                lio->netdev = netdev;
3611
3612                dev_dbg(&octeon_dev->pci_dev->dev,
3613                        "if%d gmx: %d hw_addr: 0x%llx\n", i,
3614                        lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3615
3616                for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3617                        u8 vfmac[ETH_ALEN];
3618
3619                        eth_random_addr(vfmac);
3620                        if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3621                                dev_err(&octeon_dev->pci_dev->dev,
3622                                        "Error setting VF%d MAC address\n",
3623                                        j);
3624                                goto setup_nic_dev_free;
3625                        }
3626                }
3627
3628                /* 64-bit swap required on LE machines */
3629                octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3630                for (j = 0; j < 6; j++)
3631                        mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3632
3633                /* Copy MAC Address to OS network device structure */
3634
3635                ether_addr_copy(netdev->dev_addr, mac);
3636
3637                /* By default all interfaces on a single Octeon uses the same
3638                 * tx and rx queues
3639                 */
3640                lio->txq = lio->linfo.txpciq[0].s.q_no;
3641                lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3642                if (liquidio_setup_io_queues(octeon_dev, i,
3643                                             lio->linfo.num_txpciq,
3644                                             lio->linfo.num_rxpciq)) {
3645                        dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3646                        goto setup_nic_dev_free;
3647                }
3648
3649                ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3650
3651                lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3652                lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3653
3654                if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3655                        dev_err(&octeon_dev->pci_dev->dev,
3656                                "Gather list allocation failed\n");
3657                        goto setup_nic_dev_free;
3658                }
3659
3660                /* Register ethtool support */
3661                liquidio_set_ethtool_ops(netdev);
3662                if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3663                        octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3664                else
3665                        octeon_dev->priv_flags = 0x0;
3666
3667                if (netdev->features & NETIF_F_LRO)
3668                        liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3669                                             OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3670
3671                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3672                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
3673
3674                if ((debug != -1) && (debug & NETIF_MSG_HW))
3675                        liquidio_set_feature(netdev,
3676                                             OCTNET_CMD_VERBOSE_ENABLE, 0);
3677
3678                if (setup_link_status_change_wq(netdev))
3679                        goto setup_nic_dev_free;
3680
3681                if ((octeon_dev->fw_info.app_cap_flags &
3682                     LIQUIDIO_TIME_SYNC_CAP) &&
3683                    setup_sync_octeon_time_wq(netdev))
3684                        goto setup_nic_dev_free;
3685
3686                if (setup_rx_oom_poll_fn(netdev))
3687                        goto setup_nic_dev_free;
3688
3689                /* Register the network device with the OS */
3690                if (register_netdev(netdev)) {
3691                        dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3692                        goto setup_nic_dev_free;
3693                }
3694
3695                dev_dbg(&octeon_dev->pci_dev->dev,
3696                        "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3697                        i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3698                netif_carrier_off(netdev);
3699                lio->link_changes++;
3700
3701                ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3702
3703                /* Sending command to firmware to enable Rx checksum offload
3704                 * by default at the time of setup of Liquidio driver for
3705                 * this device
3706                 */
3707                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3708                                            OCTNET_CMD_RXCSUM_ENABLE);
3709                liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3710                                     OCTNET_CMD_TXCSUM_ENABLE);
3711
3712                dev_dbg(&octeon_dev->pci_dev->dev,
3713                        "NIC ifidx:%d Setup successful\n", i);
3714
3715                if (octeon_dev->subsystem_id ==
3716                        OCTEON_CN2350_25GB_SUBSYS_ID ||
3717                    octeon_dev->subsystem_id ==
3718                        OCTEON_CN2360_25GB_SUBSYS_ID) {
3719                        cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3720                                             octeon_dev->fw_info.ver.min,
3721                                             octeon_dev->fw_info.ver.rev);
3722
3723                        /* speed control unsupported in f/w older than 1.7.2 */
3724                        if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3725                                dev_info(&octeon_dev->pci_dev->dev,
3726                                         "speed setting not supported by f/w.");
3727                                octeon_dev->speed_setting = 25;
3728                                octeon_dev->no_speed_setting = 1;
3729                        } else {
3730                                liquidio_get_speed(lio);
3731                        }
3732
3733                        if (octeon_dev->speed_setting == 0) {
3734                                octeon_dev->speed_setting = 25;
3735                                octeon_dev->no_speed_setting = 1;
3736                        }
3737                } else {
3738                        octeon_dev->no_speed_setting = 1;
3739                        octeon_dev->speed_setting = 10;
3740                }
3741                octeon_dev->speed_boot = octeon_dev->speed_setting;
3742
3743                /* don't read FEC setting if unsupported by f/w (see above) */
3744                if (octeon_dev->speed_boot == 25 &&
3745                    !octeon_dev->no_speed_setting) {
3746                        liquidio_get_fec(lio);
3747                        octeon_dev->props[lio->ifidx].fec_boot =
3748                                octeon_dev->props[lio->ifidx].fec;
3749                }
3750        }
3751
3752        devlink = devlink_alloc(&liquidio_devlink_ops,
3753                                sizeof(struct lio_devlink_priv),
3754                                &octeon_dev->pci_dev->dev);
3755        if (!devlink) {
3756                dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3757                goto setup_nic_dev_free;
3758        }
3759
3760        lio_devlink = devlink_priv(devlink);
3761        lio_devlink->oct = octeon_dev;
3762
3763        if (devlink_register(devlink)) {
3764                devlink_free(devlink);
3765                dev_err(&octeon_dev->pci_dev->dev,
3766                        "devlink registration failed\n");
3767                goto setup_nic_dev_free;
3768        }
3769
3770        octeon_dev->devlink = devlink;
3771        octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3772
3773        return 0;
3774
3775setup_nic_dev_free:
3776
3777        while (i--) {
3778                dev_err(&octeon_dev->pci_dev->dev,
3779                        "NIC ifidx:%d Setup failed\n", i);
3780                liquidio_destroy_nic_device(octeon_dev, i);
3781        }
3782
3783setup_nic_dev_done:
3784
3785        return -ENODEV;
3786}
3787
3788#ifdef CONFIG_PCI_IOV
3789static int octeon_enable_sriov(struct octeon_device *oct)
3790{
3791        unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3792        struct pci_dev *vfdev;
3793        int err;
3794        u32 u;
3795
3796        if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3797                err = pci_enable_sriov(oct->pci_dev,
3798                                       oct->sriov_info.num_vfs_alloced);
3799                if (err) {
3800                        dev_err(&oct->pci_dev->dev,
3801                                "OCTEON: Failed to enable PCI sriov: %d\n",
3802                                err);
3803                        oct->sriov_info.num_vfs_alloced = 0;
3804                        return err;
3805                }
3806                oct->sriov_info.sriov_enabled = 1;
3807
3808                /* init lookup table that maps DPI ring number to VF pci_dev
3809                 * struct pointer
3810                 */
3811                u = 0;
3812                vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3813                                       OCTEON_CN23XX_VF_VID, NULL);
3814                while (vfdev) {
3815                        if (vfdev->is_virtfn &&
3816                            (vfdev->physfn == oct->pci_dev)) {
3817                                oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3818                                        vfdev;
3819                                u += oct->sriov_info.rings_per_vf;
3820                        }
3821                        vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3822                                               OCTEON_CN23XX_VF_VID, vfdev);
3823                }
3824        }
3825
3826        return num_vfs_alloced;
3827}
3828
3829static int lio_pci_sriov_disable(struct octeon_device *oct)
3830{
3831        int u;
3832
3833        if (pci_vfs_assigned(oct->pci_dev)) {
3834                dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3835                return -EPERM;
3836        }
3837
3838        pci_disable_sriov(oct->pci_dev);
3839
3840        u = 0;
3841        while (u < MAX_POSSIBLE_VFS) {
3842                oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3843                u += oct->sriov_info.rings_per_vf;
3844        }
3845
3846        oct->sriov_info.num_vfs_alloced = 0;
3847        dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3848                 oct->pf_num);
3849
3850        return 0;
3851}
3852
3853static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3854{
3855        struct octeon_device *oct = pci_get_drvdata(dev);
3856        int ret = 0;
3857
3858        if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3859            (oct->sriov_info.sriov_enabled)) {
3860                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3861                         oct->pf_num, num_vfs);
3862                return 0;
3863        }
3864
3865        if (!num_vfs) {
3866                lio_vf_rep_destroy(oct);
3867                ret = lio_pci_sriov_disable(oct);
3868        } else if (num_vfs > oct->sriov_info.max_vfs) {
3869                dev_err(&oct->pci_dev->dev,
3870                        "OCTEON: Max allowed VFs:%d user requested:%d",
3871                        oct->sriov_info.max_vfs, num_vfs);
3872                ret = -EPERM;
3873        } else {
3874                oct->sriov_info.num_vfs_alloced = num_vfs;
3875                ret = octeon_enable_sriov(oct);
3876                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3877                         oct->pf_num, num_vfs);
3878                ret = lio_vf_rep_create(oct);
3879                if (ret)
3880                        dev_info(&oct->pci_dev->dev,
3881                                 "vf representor create failed");
3882        }
3883
3884        return ret;
3885}
3886#endif
3887
3888/**
3889 * liquidio_init_nic_module - initialize the NIC
3890 * @oct: octeon device
3891 *
3892 * This initialization routine is called once the Octeon device application is
3893 * up and running
3894 */
3895static int liquidio_init_nic_module(struct octeon_device *oct)
3896{
3897        int i, retval = 0;
3898        int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3899
3900        dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3901
3902        /* only default iq and oq were initialized
3903         * initialize the rest as well
3904         */
3905        /* run port_config command for each port */
3906        oct->ifcount = num_nic_ports;
3907
3908        memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3909
3910        for (i = 0; i < MAX_OCTEON_LINKS; i++)
3911                oct->props[i].gmxport = -1;
3912
3913        retval = setup_nic_devices(oct);
3914        if (retval) {
3915                dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3916                goto octnet_init_failure;
3917        }
3918
3919        /* Call vf_rep_modinit if the firmware is switchdev capable
3920         * and do it from the first liquidio function probed.
3921         */
3922        if (!oct->octeon_id &&
3923            oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3924                retval = lio_vf_rep_modinit();
3925                if (retval) {
3926                        liquidio_stop_nic_module(oct);
3927                        goto octnet_init_failure;
3928                }
3929        }
3930
3931        liquidio_ptp_init(oct);
3932
3933        dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3934
3935        return retval;
3936
3937octnet_init_failure:
3938
3939        oct->ifcount = 0;
3940
3941        return retval;
3942}
3943
3944/**
3945 * nic_starter - finish init
3946 * @work:  work struct work_struct
3947 *
3948 * starter callback that invokes the remaining initialization work after the NIC is up and running.
3949 */
3950static void nic_starter(struct work_struct *work)
3951{
3952        struct octeon_device *oct;
3953        struct cavium_wk *wk = (struct cavium_wk *)work;
3954
3955        oct = (struct octeon_device *)wk->ctxptr;
3956
3957        if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3958                return;
3959
3960        /* If the status of the device is CORE_OK, the core
3961         * application has reported its application type. Call
3962         * any registered handlers now and move to the RUNNING
3963         * state.
3964         */
3965        if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3966                schedule_delayed_work(&oct->nic_poll_work.work,
3967                                      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3968                return;
3969        }
3970
3971        atomic_set(&oct->status, OCT_DEV_RUNNING);
3972
3973        if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3974                dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3975
3976                if (liquidio_init_nic_module(oct))
3977                        dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3978                else
3979                        handshake[oct->octeon_id].started_ok = 1;
3980        } else {
3981                dev_err(&oct->pci_dev->dev,
3982                        "Unexpected application running on NIC (%d). Check firmware.\n",
3983                        oct->app_mode);
3984        }
3985
3986        complete(&handshake[oct->octeon_id].started);
3987}
3988
3989static int
3990octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3991{
3992        struct octeon_device *oct = (struct octeon_device *)buf;
3993        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3994        int i, notice, vf_idx;
3995        bool cores_crashed;
3996        u64 *data, vf_num;
3997
3998        notice = recv_pkt->rh.r.ossp;
3999        data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4000
4001        /* the first 64-bit word of data is the vf_num */
4002        vf_num = data[0];
4003        octeon_swap_8B_data(&vf_num, 1);
4004        vf_idx = (int)vf_num - 1;
4005
4006        cores_crashed = READ_ONCE(oct->cores_crashed);
4007
4008        if (notice == VF_DRV_LOADED) {
4009                if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4010                        oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4011                        dev_info(&oct->pci_dev->dev,
4012                                 "driver for VF%d was loaded\n", vf_idx);
4013                        if (!cores_crashed)
4014                                try_module_get(THIS_MODULE);
4015                }
4016        } else if (notice == VF_DRV_REMOVED) {
4017                if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4018                        oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4019                        dev_info(&oct->pci_dev->dev,
4020                                 "driver for VF%d was removed\n", vf_idx);
4021                        if (!cores_crashed)
4022                                module_put(THIS_MODULE);
4023                }
4024        } else if (notice == VF_DRV_MACADDR_CHANGED) {
4025                u8 *b = (u8 *)&data[1];
4026
4027                oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4028                dev_info(&oct->pci_dev->dev,
4029                         "VF driver changed VF%d's MAC address to %pM\n",
4030                         vf_idx, b + 2);
4031        }
4032
4033        for (i = 0; i < recv_pkt->buffer_count; i++)
4034                recv_buffer_free(recv_pkt->buffer_ptr[i]);
4035        octeon_free_recv_info(recv_info);
4036
4037        return 0;
4038}
4039
4040/**
4041 * octeon_device_init - Device initialization for each Octeon device that is probed
4042 * @octeon_dev:  octeon device
4043 */
4044static int octeon_device_init(struct octeon_device *octeon_dev)
4045{
4046        int j, ret;
4047        char bootcmd[] = "\n";
4048        char *dbg_enb = NULL;
4049        enum lio_fw_state fw_state;
4050        struct octeon_device_priv *oct_priv =
4051                (struct octeon_device_priv *)octeon_dev->priv;
4052        atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4053
4054        /* Enable access to the octeon device and make its DMA capability
4055         * known to the OS.
4056         */
4057        if (octeon_pci_os_setup(octeon_dev))
4058                return 1;
4059
4060        atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4061
4062        /* Identify the Octeon type and map the BAR address space. */
4063        if (octeon_chip_specific_setup(octeon_dev)) {
4064                dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4065                return 1;
4066        }
4067
4068        atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4069
4070        /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4071         * since that is what is required for the reference to be removed
4072         * during de-initialization (see 'octeon_destroy_resources').
4073         */
4074        octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4075                               PCI_SLOT(octeon_dev->pci_dev->devfn),
4076                               PCI_FUNC(octeon_dev->pci_dev->devfn),
4077                               true);
4078
4079        octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4080
4081        /* CN23XX supports preloaded firmware if the following is true:
4082         *
4083         * The adapter indicates that firmware is currently running AND
4084         * 'fw_type' is 'auto'.
4085         *
4086         * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4087         */
4088        if (OCTEON_CN23XX_PF(octeon_dev) &&
4089            cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4090                atomic_cmpxchg(octeon_dev->adapter_fw_state,
4091                               FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4092        }
4093
4094        /* If loading firmware, only first device of adapter needs to do so. */
4095        fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4096                                  FW_NEEDS_TO_BE_LOADED,
4097                                  FW_IS_BEING_LOADED);
4098
4099        /* Here, [local variable] 'fw_state' is set to one of:
4100         *
4101         *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4102         *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4103         *                          firmware to the adapter.
4104         *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4105         *                          firmware to the adapter.
4106         */
4107
4108        /* Prior to f/w load, perform a soft reset of the Octeon device;
4109         * if error resetting, return w/error.
4110         */
4111        if (fw_state == FW_NEEDS_TO_BE_LOADED)
4112                if (octeon_dev->fn_list.soft_reset(octeon_dev))
4113                        return 1;
4114
4115        /* Initialize the dispatch mechanism used to push packets arriving on
4116         * Octeon Output queues.
4117         */
4118        if (octeon_init_dispatch_list(octeon_dev))
4119                return 1;
4120
4121        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4122                                    OPCODE_NIC_CORE_DRV_ACTIVE,
4123                                    octeon_core_drv_init,
4124                                    octeon_dev);
4125
4126        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4127                                    OPCODE_NIC_VF_DRV_NOTICE,
4128                                    octeon_recv_vf_drv_notice, octeon_dev);
4129        INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4130        octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4131        schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4132                              LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4133
4134        atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4135
4136        if (octeon_set_io_queues_off(octeon_dev)) {
4137                dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4138                return 1;
4139        }
4140
4141        if (OCTEON_CN23XX_PF(octeon_dev)) {
4142                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4143                if (ret) {
4144                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4145                        return ret;
4146                }
4147        }
4148
4149        /* Initialize soft command buffer pool
4150         */
4151        if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4152                dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4153                return 1;
4154        }
4155        atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4156
4157        /*  Setup the data structures that manage this Octeon's Input queues. */
4158        if (octeon_setup_instr_queues(octeon_dev)) {
4159                dev_err(&octeon_dev->pci_dev->dev,
4160                        "instruction queue initialization failed\n");
4161                return 1;
4162        }
4163        atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4164
4165        /* Initialize lists to manage the requests of different types that
4166         * arrive from user & kernel applications for this octeon device.
4167         */
4168        if (octeon_setup_response_list(octeon_dev)) {
4169                dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4170                return 1;
4171        }
4172        atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4173
4174        if (octeon_setup_output_queues(octeon_dev)) {
4175                dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4176                return 1;
4177        }
4178
4179        atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4180
4181        if (OCTEON_CN23XX_PF(octeon_dev)) {
4182                if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4183                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4184                        return 1;
4185                }
4186                atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4187
4188                if (octeon_allocate_ioq_vector
4189                                (octeon_dev,
4190                                 octeon_dev->sriov_info.num_pf_rings)) {
4191                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4192                        return 1;
4193                }
4194                atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4195
4196        } else {
4197                /* The input and output queue registers were setup earlier (the
4198                 * queues were not enabled). Any additional registers
4199                 * that need to be programmed should be done now.
4200                 */
4201                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4202                if (ret) {
4203                        dev_err(&octeon_dev->pci_dev->dev,
4204                                "Failed to configure device registers\n");
4205                        return ret;
4206                }
4207        }
4208
4209        /* Initialize the tasklet that handles output queue packet processing.*/
4210        dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4211        tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4212
4213        /* Setup the interrupt handler and record the INT SUM register address
4214         */
4215        if (octeon_setup_interrupt(octeon_dev,
4216                                   octeon_dev->sriov_info.num_pf_rings))
4217                return 1;
4218
4219        /* Enable Octeon device interrupts */
4220        octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4221
4222        atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4223
4224        /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4225         * the output queue is enabled.
4226         * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4227         * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4228         * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4229         * before any credits have been issued, causing the ring to be reset
4230         * (and the f/w appear to never have started).
4231         */
4232        for (j = 0; j < octeon_dev->num_oqs; j++)
4233                writel(octeon_dev->droq[j]->max_count,
4234                       octeon_dev->droq[j]->pkts_credit_reg);
4235
4236        /* Enable the input and output queues for this Octeon device */
4237        ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4238        if (ret) {
4239                dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4240                return ret;
4241        }
4242
4243        atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4244
4245        if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4246                dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4247                if (!ddr_timeout) {
4248                        dev_info(&octeon_dev->pci_dev->dev,
4249                                 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4250                }
4251
4252                schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4253
4254                /* Wait for the octeon to initialize DDR after the soft-reset.*/
4255                while (!ddr_timeout) {
4256                        set_current_state(TASK_INTERRUPTIBLE);
4257                        if (schedule_timeout(HZ / 10)) {
4258                                /* user probably pressed Control-C */
4259                                return 1;
4260                        }
4261                }
4262                ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4263                if (ret) {
4264                        dev_err(&octeon_dev->pci_dev->dev,
4265                                "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4266                                ret);
4267                        return 1;
4268                }
4269
4270                if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4271                        dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4272                        return 1;
4273                }
4274
4275                /* Divert uboot to take commands from host instead. */
4276                ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4277
4278                dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4279                ret = octeon_init_consoles(octeon_dev);
4280                if (ret) {
4281                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4282                        return 1;
4283                }
4284                /* If console debug enabled, specify empty string to use default
4285                 * enablement ELSE specify NULL string for 'disabled'.
4286                 */
4287                dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4288                ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4289                if (ret) {
4290                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4291                        return 1;
4292                } else if (octeon_console_debug_enabled(0)) {
4293                        /* If console was added AND we're logging console output
4294                         * then set our console print function.
4295                         */
4296                        octeon_dev->console[0].print = octeon_dbg_console_print;
4297                }
4298
4299                atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4300
4301                dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4302                ret = load_firmware(octeon_dev);
4303                if (ret) {
4304                        dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4305                        return 1;
4306                }
4307
4308                atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4309        }
4310
4311        handshake[octeon_dev->octeon_id].init_ok = 1;
4312        complete(&handshake[octeon_dev->octeon_id].init);
4313
4314        atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4315        oct_priv->dev = octeon_dev;
4316
4317        return 0;
4318}
4319
4320/**
4321 * octeon_dbg_console_print - Debug console print function
4322 * @oct:  octeon device
4323 * @console_num: console number
4324 * @prefix:      first portion of line to display
4325 * @suffix:      second portion of line to display
4326 *
4327 * The OCTEON debug console outputs entire lines (excluding '\n').
4328 * Normally, the line will be passed in the 'prefix' parameter.
4329 * However, due to buffering, it is possible for a line to be split into two
4330 * parts, in which case they will be passed as the 'prefix' parameter and
4331 * 'suffix' parameter.
4332 */
4333static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4334                                    char *prefix, char *suffix)
4335{
4336        if (prefix && suffix)
4337                dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4338                         suffix);
4339        else if (prefix)
4340                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4341        else if (suffix)
4342                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4343
4344        return 0;
4345}
4346
4347/**
4348 * liquidio_exit - Exits the module
4349 */
4350static void __exit liquidio_exit(void)
4351{
4352        liquidio_deinit_pci();
4353
4354        pr_info("LiquidIO network module is now unloaded\n");
4355}
4356
4357module_init(liquidio_init);
4358module_exit(liquidio_exit);
4359