linux/drivers/net/ethernet/cavium/liquidio/lio_main.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/module.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <linux/firmware.h>
  22#include <net/vxlan.h>
  23#include <linux/kthread.h>
  24#include "liquidio_common.h"
  25#include "octeon_droq.h"
  26#include "octeon_iq.h"
  27#include "response_manager.h"
  28#include "octeon_device.h"
  29#include "octeon_nic.h"
  30#include "octeon_main.h"
  31#include "octeon_network.h"
  32#include "cn66xx_regs.h"
  33#include "cn66xx_device.h"
  34#include "cn68xx_device.h"
  35#include "cn23xx_pf_device.h"
  36#include "liquidio_image.h"
  37#include "lio_vf_rep.h"
  38
  39MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  40MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
  41MODULE_LICENSE("GPL");
  42MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
  43                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  44MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
  45                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  46MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
  47                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  48MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
  49                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  50
  51static int ddr_timeout = 10000;
  52module_param(ddr_timeout, int, 0644);
  53MODULE_PARM_DESC(ddr_timeout,
  54                 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
  55
  56#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  57
  58static int debug = -1;
  59module_param(debug, int, 0644);
  60MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  61
  62static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
  63module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
  64MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
  65
  66static u32 console_bitmask;
  67module_param(console_bitmask, int, 0644);
  68MODULE_PARM_DESC(console_bitmask,
  69                 "Bitmask indicating which consoles have debug output redirected to syslog.");
  70
  71/**
  72 * \brief determines if a given console has debug enabled.
  73 * @param console console to check
  74 * @returns  1 = enabled. 0 otherwise
  75 */
  76static int octeon_console_debug_enabled(u32 console)
  77{
  78        return (console_bitmask >> (console)) & 0x1;
  79}
  80
  81/* Polling interval for determining when NIC application is alive */
  82#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
  83
  84/* runtime link query interval */
  85#define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
  86/* update localtime to octeon firmware every 60 seconds.
  87 * make firmware to use same time reference, so that it will be easy to
  88 * correlate firmware logged events/errors with host events, for debugging.
  89 */
  90#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
  91
  92/* time to wait for possible in-flight requests in milliseconds */
  93#define WAIT_INFLIGHT_REQUEST   msecs_to_jiffies(1000)
  94
  95struct lio_trusted_vf_ctx {
  96        struct completion complete;
  97        int status;
  98};
  99
 100struct oct_link_status_resp {
 101        u64 rh;
 102        struct oct_link_info link_info;
 103        u64 status;
 104};
 105
 106struct oct_timestamp_resp {
 107        u64 rh;
 108        u64 timestamp;
 109        u64 status;
 110};
 111
 112#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
 113
 114union tx_info {
 115        u64 u64;
 116        struct {
 117#ifdef __BIG_ENDIAN_BITFIELD
 118                u16 gso_size;
 119                u16 gso_segs;
 120                u32 reserved;
 121#else
 122                u32 reserved;
 123                u16 gso_segs;
 124                u16 gso_size;
 125#endif
 126        } s;
 127};
 128
 129/** Octeon device properties to be used by the NIC module.
 130 * Each octeon device in the system will be represented
 131 * by this structure in the NIC module.
 132 */
 133
 134#define OCTNIC_GSO_MAX_HEADER_SIZE 128
 135#define OCTNIC_GSO_MAX_SIZE                                                    \
 136        (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
 137
 138struct handshake {
 139        struct completion init;
 140        struct completion started;
 141        struct pci_dev *pci_dev;
 142        int init_ok;
 143        int started_ok;
 144};
 145
 146#ifdef CONFIG_PCI_IOV
 147static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
 148#endif
 149
 150static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
 151                                    char *prefix, char *suffix);
 152
 153static int octeon_device_init(struct octeon_device *);
 154static int liquidio_stop(struct net_device *netdev);
 155static void liquidio_remove(struct pci_dev *pdev);
 156static int liquidio_probe(struct pci_dev *pdev,
 157                          const struct pci_device_id *ent);
 158static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
 159                                      int linkstate);
 160
 161static struct handshake handshake[MAX_OCTEON_DEVICES];
 162static struct completion first_stage;
 163
 164static void octeon_droq_bh(unsigned long pdev)
 165{
 166        int q_no;
 167        int reschedule = 0;
 168        struct octeon_device *oct = (struct octeon_device *)pdev;
 169        struct octeon_device_priv *oct_priv =
 170                (struct octeon_device_priv *)oct->priv;
 171
 172        for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
 173                if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
 174                        continue;
 175                reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
 176                                                          MAX_PACKET_BUDGET);
 177                lio_enable_irq(oct->droq[q_no], NULL);
 178
 179                if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
 180                        /* set time and cnt interrupt thresholds for this DROQ
 181                         * for NAPI
 182                         */
 183                        int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
 184
 185                        octeon_write_csr64(
 186                            oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
 187                            0x5700000040ULL);
 188                        octeon_write_csr64(
 189                            oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
 190                }
 191        }
 192
 193        if (reschedule)
 194                tasklet_schedule(&oct_priv->droq_tasklet);
 195}
 196
 197static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 198{
 199        struct octeon_device_priv *oct_priv =
 200                (struct octeon_device_priv *)oct->priv;
 201        int retry = 100, pkt_cnt = 0, pending_pkts = 0;
 202        int i;
 203
 204        do {
 205                pending_pkts = 0;
 206
 207                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 208                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
 209                                continue;
 210                        pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
 211                }
 212                if (pkt_cnt > 0) {
 213                        pending_pkts += pkt_cnt;
 214                        tasklet_schedule(&oct_priv->droq_tasklet);
 215                }
 216                pkt_cnt = 0;
 217                schedule_timeout_uninterruptible(1);
 218
 219        } while (retry-- && pending_pkts);
 220
 221        return pkt_cnt;
 222}
 223
 224/**
 225 * \brief Forces all IO queues off on a given device
 226 * @param oct Pointer to Octeon device
 227 */
 228static void force_io_queues_off(struct octeon_device *oct)
 229{
 230        if ((oct->chip_id == OCTEON_CN66XX) ||
 231            (oct->chip_id == OCTEON_CN68XX)) {
 232                /* Reset the Enable bits for Input Queues. */
 233                octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
 234
 235                /* Reset the Enable bits for Output Queues. */
 236                octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
 237        }
 238}
 239
 240/**
 241 * \brief Cause device to go quiet so it can be safely removed/reset/etc
 242 * @param oct Pointer to Octeon device
 243 */
 244static inline void pcierror_quiesce_device(struct octeon_device *oct)
 245{
 246        int i;
 247
 248        /* Disable the input and output queues now. No more packets will
 249         * arrive from Octeon, but we should wait for all packet processing
 250         * to finish.
 251         */
 252        force_io_queues_off(oct);
 253
 254        /* To allow for in-flight requests */
 255        schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
 256
 257        if (wait_for_pending_requests(oct))
 258                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 259
 260        /* Force all requests waiting to be fetched by OCTEON to complete. */
 261        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 262                struct octeon_instr_queue *iq;
 263
 264                if (!(oct->io_qmask.iq & BIT_ULL(i)))
 265                        continue;
 266                iq = oct->instr_queue[i];
 267
 268                if (atomic_read(&iq->instr_pending)) {
 269                        spin_lock_bh(&iq->lock);
 270                        iq->fill_cnt = 0;
 271                        iq->octeon_read_index = iq->host_write_index;
 272                        iq->stats.instr_processed +=
 273                                atomic_read(&iq->instr_pending);
 274                        lio_process_iq_request_list(oct, iq, 0);
 275                        spin_unlock_bh(&iq->lock);
 276                }
 277        }
 278
 279        /* Force all pending ordered list requests to time out. */
 280        lio_process_ordered_list(oct, 1);
 281
 282        /* We do not need to wait for output queue packets to be processed. */
 283}
 284
 285/**
 286 * \brief Cleanup PCI AER uncorrectable error status
 287 * @param dev Pointer to PCI device
 288 */
 289static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 290{
 291        int pos = 0x100;
 292        u32 status, mask;
 293
 294        pr_info("%s :\n", __func__);
 295
 296        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 297        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 298        if (dev->error_state == pci_channel_io_normal)
 299                status &= ~mask;        /* Clear corresponding nonfatal bits */
 300        else
 301                status &= mask;         /* Clear corresponding fatal bits */
 302        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 303}
 304
 305/**
 306 * \brief Stop all PCI IO to a given device
 307 * @param dev Pointer to Octeon device
 308 */
 309static void stop_pci_io(struct octeon_device *oct)
 310{
 311        /* No more instructions will be forwarded. */
 312        atomic_set(&oct->status, OCT_DEV_IN_RESET);
 313
 314        pci_disable_device(oct->pci_dev);
 315
 316        /* Disable interrupts  */
 317        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 318
 319        pcierror_quiesce_device(oct);
 320
 321        /* Release the interrupt line */
 322        free_irq(oct->pci_dev->irq, oct);
 323
 324        if (oct->flags & LIO_FLAG_MSI_ENABLED)
 325                pci_disable_msi(oct->pci_dev);
 326
 327        dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 328                lio_get_state_string(&oct->status));
 329
 330        /* making it a common function for all OCTEON models */
 331        cleanup_aer_uncorrect_error_status(oct->pci_dev);
 332}
 333
 334/**
 335 * \brief called when PCI error is detected
 336 * @param pdev Pointer to PCI device
 337 * @param state The current pci connection state
 338 *
 339 * This function is called after a PCI bus error affecting
 340 * this device has been detected.
 341 */
 342static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
 343                                                     pci_channel_state_t state)
 344{
 345        struct octeon_device *oct = pci_get_drvdata(pdev);
 346
 347        /* Non-correctable Non-fatal errors */
 348        if (state == pci_channel_io_normal) {
 349                dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
 350                cleanup_aer_uncorrect_error_status(oct->pci_dev);
 351                return PCI_ERS_RESULT_CAN_RECOVER;
 352        }
 353
 354        /* Non-correctable Fatal errors */
 355        dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
 356        stop_pci_io(oct);
 357
 358        /* Always return a DISCONNECT. There is no support for recovery but only
 359         * for a clean shutdown.
 360         */
 361        return PCI_ERS_RESULT_DISCONNECT;
 362}
 363
 364/**
 365 * \brief mmio handler
 366 * @param pdev Pointer to PCI device
 367 */
 368static pci_ers_result_t liquidio_pcie_mmio_enabled(
 369                                struct pci_dev *pdev __attribute__((unused)))
 370{
 371        /* We should never hit this since we never ask for a reset for a Fatal
 372         * Error. We always return DISCONNECT in io_error above.
 373         * But play safe and return RECOVERED for now.
 374         */
 375        return PCI_ERS_RESULT_RECOVERED;
 376}
 377
 378/**
 379 * \brief called after the pci bus has been reset.
 380 * @param pdev Pointer to PCI device
 381 *
 382 * Restart the card from scratch, as if from a cold-boot. Implementation
 383 * resembles the first-half of the octeon_resume routine.
 384 */
 385static pci_ers_result_t liquidio_pcie_slot_reset(
 386                                struct pci_dev *pdev __attribute__((unused)))
 387{
 388        /* We should never hit this since we never ask for a reset for a Fatal
 389         * Error. We always return DISCONNECT in io_error above.
 390         * But play safe and return RECOVERED for now.
 391         */
 392        return PCI_ERS_RESULT_RECOVERED;
 393}
 394
 395/**
 396 * \brief called when traffic can start flowing again.
 397 * @param pdev Pointer to PCI device
 398 *
 399 * This callback is called when the error recovery driver tells us that
 400 * its OK to resume normal operation. Implementation resembles the
 401 * second-half of the octeon_resume routine.
 402 */
 403static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
 404{
 405        /* Nothing to be done here. */
 406}
 407
 408#define liquidio_suspend NULL
 409#define liquidio_resume NULL
 410
 411/* For PCI-E Advanced Error Recovery (AER) Interface */
 412static const struct pci_error_handlers liquidio_err_handler = {
 413        .error_detected = liquidio_pcie_error_detected,
 414        .mmio_enabled   = liquidio_pcie_mmio_enabled,
 415        .slot_reset     = liquidio_pcie_slot_reset,
 416        .resume         = liquidio_pcie_resume,
 417};
 418
 419static const struct pci_device_id liquidio_pci_tbl[] = {
 420        {       /* 68xx */
 421                PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 422        },
 423        {       /* 66xx */
 424                PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 425        },
 426        {       /* 23xx pf */
 427                PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 428        },
 429        {
 430                0, 0, 0, 0, 0, 0, 0
 431        }
 432};
 433MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
 434
 435static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
 436
 437static struct pci_driver liquidio_pci_driver = {
 438        .name           = "LiquidIO",
 439        .id_table       = liquidio_pci_tbl,
 440        .probe          = liquidio_probe,
 441        .remove         = liquidio_remove,
 442        .err_handler    = &liquidio_err_handler,    /* For AER */
 443        .driver.pm      = &liquidio_pm_ops,
 444#ifdef CONFIG_PCI_IOV
 445        .sriov_configure = liquidio_enable_sriov,
 446#endif
 447};
 448
 449/**
 450 * \brief register PCI driver
 451 */
 452static int liquidio_init_pci(void)
 453{
 454        return pci_register_driver(&liquidio_pci_driver);
 455}
 456
 457/**
 458 * \brief unregister PCI driver
 459 */
 460static void liquidio_deinit_pci(void)
 461{
 462        pci_unregister_driver(&liquidio_pci_driver);
 463}
 464
 465/**
 466 * \brief Check Tx queue status, and take appropriate action
 467 * @param lio per-network private data
 468 * @returns 0 if full, number of queues woken up otherwise
 469 */
 470static inline int check_txq_status(struct lio *lio)
 471{
 472        int numqs = lio->netdev->real_num_tx_queues;
 473        int ret_val = 0;
 474        int q, iq;
 475
 476        /* check each sub-queue state */
 477        for (q = 0; q < numqs; q++) {
 478                iq = lio->linfo.txpciq[q %
 479                        lio->oct_dev->num_iqs].s.q_no;
 480                if (octnet_iq_is_full(lio->oct_dev, iq))
 481                        continue;
 482                if (__netif_subqueue_stopped(lio->netdev, q)) {
 483                        netif_wake_subqueue(lio->netdev, q);
 484                        INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
 485                                                  tx_restart, 1);
 486                        ret_val++;
 487                }
 488        }
 489
 490        return ret_val;
 491}
 492
 493/**
 494 * \brief Print link information
 495 * @param netdev network device
 496 */
 497static void print_link_info(struct net_device *netdev)
 498{
 499        struct lio *lio = GET_LIO(netdev);
 500
 501        if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
 502            ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
 503                struct oct_link_info *linfo = &lio->linfo;
 504
 505                if (linfo->link.s.link_up) {
 506                        netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
 507                                   linfo->link.s.speed,
 508                                   (linfo->link.s.duplex) ? "Full" : "Half");
 509                } else {
 510                        netif_info(lio, link, lio->netdev, "Link Down\n");
 511                }
 512        }
 513}
 514
 515/**
 516 * \brief Routine to notify MTU change
 517 * @param work work_struct data structure
 518 */
 519static void octnet_link_status_change(struct work_struct *work)
 520{
 521        struct cavium_wk *wk = (struct cavium_wk *)work;
 522        struct lio *lio = (struct lio *)wk->ctxptr;
 523
 524        /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
 525         * this API is invoked only when new max-MTU of the interface is
 526         * less than current MTU.
 527         */
 528        rtnl_lock();
 529        dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
 530        rtnl_unlock();
 531}
 532
 533/**
 534 * \brief Sets up the mtu status change work
 535 * @param netdev network device
 536 */
 537static inline int setup_link_status_change_wq(struct net_device *netdev)
 538{
 539        struct lio *lio = GET_LIO(netdev);
 540        struct octeon_device *oct = lio->oct_dev;
 541
 542        lio->link_status_wq.wq = alloc_workqueue("link-status",
 543                                                 WQ_MEM_RECLAIM, 0);
 544        if (!lio->link_status_wq.wq) {
 545                dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
 546                return -1;
 547        }
 548        INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
 549                          octnet_link_status_change);
 550        lio->link_status_wq.wk.ctxptr = lio;
 551
 552        return 0;
 553}
 554
 555static inline void cleanup_link_status_change_wq(struct net_device *netdev)
 556{
 557        struct lio *lio = GET_LIO(netdev);
 558
 559        if (lio->link_status_wq.wq) {
 560                cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
 561                destroy_workqueue(lio->link_status_wq.wq);
 562        }
 563}
 564
 565/**
 566 * \brief Update link status
 567 * @param netdev network device
 568 * @param ls link status structure
 569 *
 570 * Called on receipt of a link status response from the core application to
 571 * update each interface's link status.
 572 */
 573static inline void update_link_status(struct net_device *netdev,
 574                                      union oct_link_status *ls)
 575{
 576        struct lio *lio = GET_LIO(netdev);
 577        int changed = (lio->linfo.link.u64 != ls->u64);
 578        int current_max_mtu = lio->linfo.link.s.mtu;
 579        struct octeon_device *oct = lio->oct_dev;
 580
 581        dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
 582                __func__, lio->linfo.link.u64, ls->u64);
 583        lio->linfo.link.u64 = ls->u64;
 584
 585        if ((lio->intf_open) && (changed)) {
 586                print_link_info(netdev);
 587                lio->link_changes++;
 588
 589                if (lio->linfo.link.s.link_up) {
 590                        dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
 591                        netif_carrier_on(netdev);
 592                        wake_txqs(netdev);
 593                } else {
 594                        dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
 595                        netif_carrier_off(netdev);
 596                        stop_txqs(netdev);
 597                }
 598                if (lio->linfo.link.s.mtu != current_max_mtu) {
 599                        netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
 600                                   current_max_mtu, lio->linfo.link.s.mtu);
 601                        netdev->max_mtu = lio->linfo.link.s.mtu;
 602                }
 603                if (lio->linfo.link.s.mtu < netdev->mtu) {
 604                        dev_warn(&oct->pci_dev->dev,
 605                                 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
 606                                     netdev->mtu, lio->linfo.link.s.mtu);
 607                        queue_delayed_work(lio->link_status_wq.wq,
 608                                           &lio->link_status_wq.wk.work, 0);
 609                }
 610        }
 611}
 612
 613/**
 614 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
 615 * firmware will correct it's time, in case there is a time skew
 616 *
 617 * @work: work scheduled to send time update to octeon firmware
 618 **/
 619static void lio_sync_octeon_time(struct work_struct *work)
 620{
 621        struct cavium_wk *wk = (struct cavium_wk *)work;
 622        struct lio *lio = (struct lio *)wk->ctxptr;
 623        struct octeon_device *oct = lio->oct_dev;
 624        struct octeon_soft_command *sc;
 625        struct timespec64 ts;
 626        struct lio_time *lt;
 627        int ret;
 628
 629        sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
 630        if (!sc) {
 631                dev_err(&oct->pci_dev->dev,
 632                        "Failed to sync time to octeon: soft command allocation failed\n");
 633                return;
 634        }
 635
 636        lt = (struct lio_time *)sc->virtdptr;
 637
 638        /* Get time of the day */
 639        ktime_get_real_ts64(&ts);
 640        lt->sec = ts.tv_sec;
 641        lt->nsec = ts.tv_nsec;
 642        octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
 643
 644        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 645        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 646                                    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
 647
 648        init_completion(&sc->complete);
 649        sc->sc_status = OCTEON_REQUEST_PENDING;
 650
 651        ret = octeon_send_soft_command(oct, sc);
 652        if (ret == IQ_SEND_FAILED) {
 653                dev_err(&oct->pci_dev->dev,
 654                        "Failed to sync time to octeon: failed to send soft command\n");
 655                octeon_free_soft_command(oct, sc);
 656        } else {
 657                WRITE_ONCE(sc->caller_is_done, true);
 658        }
 659
 660        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 661                           &lio->sync_octeon_time_wq.wk.work,
 662                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 663}
 664
 665/**
 666 * setup_sync_octeon_time_wq - Sets up the work to periodically update
 667 * local time to octeon firmware
 668 *
 669 * @netdev - network device which should send time update to firmware
 670 **/
 671static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
 672{
 673        struct lio *lio = GET_LIO(netdev);
 674        struct octeon_device *oct = lio->oct_dev;
 675
 676        lio->sync_octeon_time_wq.wq =
 677                alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
 678        if (!lio->sync_octeon_time_wq.wq) {
 679                dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
 680                return -1;
 681        }
 682        INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
 683                          lio_sync_octeon_time);
 684        lio->sync_octeon_time_wq.wk.ctxptr = lio;
 685        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 686                           &lio->sync_octeon_time_wq.wk.work,
 687                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 688
 689        return 0;
 690}
 691
 692/**
 693 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
 694 * to periodically update local time to octeon firmware
 695 *
 696 * @netdev - network device which should send time update to firmware
 697 **/
 698static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
 699{
 700        struct lio *lio = GET_LIO(netdev);
 701        struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
 702
 703        if (time_wq->wq) {
 704                cancel_delayed_work_sync(&time_wq->wk.work);
 705                destroy_workqueue(time_wq->wq);
 706        }
 707}
 708
 709static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
 710{
 711        struct octeon_device *other_oct;
 712
 713        other_oct = lio_get_device(oct->octeon_id + 1);
 714
 715        if (other_oct && other_oct->pci_dev) {
 716                int oct_busnum, other_oct_busnum;
 717
 718                oct_busnum = oct->pci_dev->bus->number;
 719                other_oct_busnum = other_oct->pci_dev->bus->number;
 720
 721                if (oct_busnum == other_oct_busnum) {
 722                        int oct_slot, other_oct_slot;
 723
 724                        oct_slot = PCI_SLOT(oct->pci_dev->devfn);
 725                        other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
 726
 727                        if (oct_slot == other_oct_slot)
 728                                return other_oct;
 729                }
 730        }
 731
 732        return NULL;
 733}
 734
 735static void disable_all_vf_links(struct octeon_device *oct)
 736{
 737        struct net_device *netdev;
 738        int max_vfs, vf, i;
 739
 740        if (!oct)
 741                return;
 742
 743        max_vfs = oct->sriov_info.max_vfs;
 744
 745        for (i = 0; i < oct->ifcount; i++) {
 746                netdev = oct->props[i].netdev;
 747                if (!netdev)
 748                        continue;
 749
 750                for (vf = 0; vf < max_vfs; vf++)
 751                        liquidio_set_vf_link_state(netdev, vf,
 752                                                   IFLA_VF_LINK_STATE_DISABLE);
 753        }
 754}
 755
 756static int liquidio_watchdog(void *param)
 757{
 758        bool err_msg_was_printed[LIO_MAX_CORES];
 759        u16 mask_of_crashed_or_stuck_cores = 0;
 760        bool all_vf_links_are_disabled = false;
 761        struct octeon_device *oct = param;
 762        struct octeon_device *other_oct;
 763#ifdef CONFIG_MODULE_UNLOAD
 764        long refcount, vfs_referencing_pf;
 765        u64 vfs_mask1, vfs_mask2;
 766#endif
 767        int core;
 768
 769        memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
 770
 771        while (!kthread_should_stop()) {
 772                /* sleep for a couple of seconds so that we don't hog the CPU */
 773                set_current_state(TASK_INTERRUPTIBLE);
 774                schedule_timeout(msecs_to_jiffies(2000));
 775
 776                mask_of_crashed_or_stuck_cores =
 777                    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
 778
 779                if (!mask_of_crashed_or_stuck_cores)
 780                        continue;
 781
 782                WRITE_ONCE(oct->cores_crashed, true);
 783                other_oct = get_other_octeon_device(oct);
 784                if (other_oct)
 785                        WRITE_ONCE(other_oct->cores_crashed, true);
 786
 787                for (core = 0; core < LIO_MAX_CORES; core++) {
 788                        bool core_crashed_or_got_stuck;
 789
 790                        core_crashed_or_got_stuck =
 791                                                (mask_of_crashed_or_stuck_cores
 792                                                 >> core) & 1;
 793
 794                        if (core_crashed_or_got_stuck &&
 795                            !err_msg_was_printed[core]) {
 796                                dev_err(&oct->pci_dev->dev,
 797                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
 798                                        core);
 799                                err_msg_was_printed[core] = true;
 800                        }
 801                }
 802
 803                if (all_vf_links_are_disabled)
 804                        continue;
 805
 806                disable_all_vf_links(oct);
 807                disable_all_vf_links(other_oct);
 808                all_vf_links_are_disabled = true;
 809
 810#ifdef CONFIG_MODULE_UNLOAD
 811                vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
 812                vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
 813
 814                vfs_referencing_pf  = hweight64(vfs_mask1);
 815                vfs_referencing_pf += hweight64(vfs_mask2);
 816
 817                refcount = module_refcount(THIS_MODULE);
 818                if (refcount >= vfs_referencing_pf) {
 819                        while (vfs_referencing_pf) {
 820                                module_put(THIS_MODULE);
 821                                vfs_referencing_pf--;
 822                        }
 823                }
 824#endif
 825        }
 826
 827        return 0;
 828}
 829
 830/**
 831 * \brief PCI probe handler
 832 * @param pdev PCI device structure
 833 * @param ent unused
 834 */
 835static int
 836liquidio_probe(struct pci_dev *pdev,
 837               const struct pci_device_id *ent __attribute__((unused)))
 838{
 839        struct octeon_device *oct_dev = NULL;
 840        struct handshake *hs;
 841
 842        oct_dev = octeon_allocate_device(pdev->device,
 843                                         sizeof(struct octeon_device_priv));
 844        if (!oct_dev) {
 845                dev_err(&pdev->dev, "Unable to allocate device\n");
 846                return -ENOMEM;
 847        }
 848
 849        if (pdev->device == OCTEON_CN23XX_PF_VID)
 850                oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
 851
 852        /* Enable PTP for 6XXX Device */
 853        if (((pdev->device == OCTEON_CN66XX) ||
 854             (pdev->device == OCTEON_CN68XX)))
 855                oct_dev->ptp_enable = true;
 856        else
 857                oct_dev->ptp_enable = false;
 858
 859        dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 860                 (u32)pdev->vendor, (u32)pdev->device);
 861
 862        /* Assign octeon_device for this device to the private data area. */
 863        pci_set_drvdata(pdev, oct_dev);
 864
 865        /* set linux specific device pointer */
 866        oct_dev->pci_dev = (void *)pdev;
 867
 868        oct_dev->subsystem_id = pdev->subsystem_vendor |
 869                (pdev->subsystem_device << 16);
 870
 871        hs = &handshake[oct_dev->octeon_id];
 872        init_completion(&hs->init);
 873        init_completion(&hs->started);
 874        hs->pci_dev = pdev;
 875
 876        if (oct_dev->octeon_id == 0)
 877                /* first LiquidIO NIC is detected */
 878                complete(&first_stage);
 879
 880        if (octeon_device_init(oct_dev)) {
 881                complete(&hs->init);
 882                liquidio_remove(pdev);
 883                return -ENOMEM;
 884        }
 885
 886        if (OCTEON_CN23XX_PF(oct_dev)) {
 887                u8 bus, device, function;
 888
 889                if (atomic_read(oct_dev->adapter_refcount) == 1) {
 890                        /* Each NIC gets one watchdog kernel thread.  The first
 891                         * PF (of each NIC) that gets pci_driver->probe()'d
 892                         * creates that thread.
 893                         */
 894                        bus = pdev->bus->number;
 895                        device = PCI_SLOT(pdev->devfn);
 896                        function = PCI_FUNC(pdev->devfn);
 897                        oct_dev->watchdog_task = kthread_create(
 898                            liquidio_watchdog, oct_dev,
 899                            "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
 900                        if (!IS_ERR(oct_dev->watchdog_task)) {
 901                                wake_up_process(oct_dev->watchdog_task);
 902                        } else {
 903                                oct_dev->watchdog_task = NULL;
 904                                dev_err(&oct_dev->pci_dev->dev,
 905                                        "failed to create kernel_thread\n");
 906                                liquidio_remove(pdev);
 907                                return -1;
 908                        }
 909                }
 910        }
 911
 912        oct_dev->rx_pause = 1;
 913        oct_dev->tx_pause = 1;
 914
 915        dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
 916
 917        return 0;
 918}
 919
 920static bool fw_type_is_auto(void)
 921{
 922        return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
 923                       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
 924}
 925
 926/**
 927 * \brief PCI FLR for each Octeon device.
 928 * @param oct octeon device
 929 */
 930static void octeon_pci_flr(struct octeon_device *oct)
 931{
 932        int rc;
 933
 934        pci_save_state(oct->pci_dev);
 935
 936        pci_cfg_access_lock(oct->pci_dev);
 937
 938        /* Quiesce the device completely */
 939        pci_write_config_word(oct->pci_dev, PCI_COMMAND,
 940                              PCI_COMMAND_INTX_DISABLE);
 941
 942        rc = __pci_reset_function_locked(oct->pci_dev);
 943
 944        if (rc != 0)
 945                dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
 946                        rc, oct->pf_num);
 947
 948        pci_cfg_access_unlock(oct->pci_dev);
 949
 950        pci_restore_state(oct->pci_dev);
 951}
 952
 953/**
 954 *\brief Destroy resources associated with octeon device
 955 * @param pdev PCI device structure
 956 * @param ent unused
 957 */
 958static void octeon_destroy_resources(struct octeon_device *oct)
 959{
 960        int i, refcount;
 961        struct msix_entry *msix_entries;
 962        struct octeon_device_priv *oct_priv =
 963                (struct octeon_device_priv *)oct->priv;
 964
 965        struct handshake *hs;
 966
 967        switch (atomic_read(&oct->status)) {
 968        case OCT_DEV_RUNNING:
 969        case OCT_DEV_CORE_OK:
 970
 971                /* No more instructions will be forwarded. */
 972                atomic_set(&oct->status, OCT_DEV_IN_RESET);
 973
 974                oct->app_mode = CVM_DRV_INVALID_APP;
 975                dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 976                        lio_get_state_string(&oct->status));
 977
 978                schedule_timeout_uninterruptible(HZ / 10);
 979
 980                fallthrough;
 981        case OCT_DEV_HOST_OK:
 982
 983        case OCT_DEV_CONSOLE_INIT_DONE:
 984                /* Remove any consoles */
 985                octeon_remove_consoles(oct);
 986
 987                fallthrough;
 988        case OCT_DEV_IO_QUEUES_DONE:
 989                if (lio_wait_for_instr_fetch(oct))
 990                        dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
 991
 992                if (wait_for_pending_requests(oct))
 993                        dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 994
 995                /* Disable the input and output queues now. No more packets will
 996                 * arrive from Octeon, but we should wait for all packet
 997                 * processing to finish.
 998                 */
 999                oct->fn_list.disable_io_queues(oct);
1000
1001                if (lio_wait_for_oq_pkts(oct))
1002                        dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1003
1004                /* Force all requests waiting to be fetched by OCTEON to
1005                 * complete.
1006                 */
1007                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1008                        struct octeon_instr_queue *iq;
1009
1010                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
1011                                continue;
1012                        iq = oct->instr_queue[i];
1013
1014                        if (atomic_read(&iq->instr_pending)) {
1015                                spin_lock_bh(&iq->lock);
1016                                iq->fill_cnt = 0;
1017                                iq->octeon_read_index = iq->host_write_index;
1018                                iq->stats.instr_processed +=
1019                                        atomic_read(&iq->instr_pending);
1020                                lio_process_iq_request_list(oct, iq, 0);
1021                                spin_unlock_bh(&iq->lock);
1022                        }
1023                }
1024
1025                lio_process_ordered_list(oct, 1);
1026                octeon_free_sc_done_list(oct);
1027                octeon_free_sc_zombie_list(oct);
1028
1029                fallthrough;
1030        case OCT_DEV_INTR_SET_DONE:
1031                /* Disable interrupts  */
1032                oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1033
1034                if (oct->msix_on) {
1035                        msix_entries = (struct msix_entry *)oct->msix_entries;
1036                        for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1037                                if (oct->ioq_vector[i].vector) {
1038                                        /* clear the affinity_cpumask */
1039                                        irq_set_affinity_hint(
1040                                                        msix_entries[i].vector,
1041                                                        NULL);
1042                                        free_irq(msix_entries[i].vector,
1043                                                 &oct->ioq_vector[i]);
1044                                        oct->ioq_vector[i].vector = 0;
1045                                }
1046                        }
1047                        /* non-iov vector's argument is oct struct */
1048                        free_irq(msix_entries[i].vector, oct);
1049
1050                        pci_disable_msix(oct->pci_dev);
1051                        kfree(oct->msix_entries);
1052                        oct->msix_entries = NULL;
1053                } else {
1054                        /* Release the interrupt line */
1055                        free_irq(oct->pci_dev->irq, oct);
1056
1057                        if (oct->flags & LIO_FLAG_MSI_ENABLED)
1058                                pci_disable_msi(oct->pci_dev);
1059                }
1060
1061                kfree(oct->irq_name_storage);
1062                oct->irq_name_storage = NULL;
1063
1064                fallthrough;
1065        case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1066                if (OCTEON_CN23XX_PF(oct))
1067                        octeon_free_ioq_vector(oct);
1068
1069                fallthrough;
1070        case OCT_DEV_MBOX_SETUP_DONE:
1071                if (OCTEON_CN23XX_PF(oct))
1072                        oct->fn_list.free_mbox(oct);
1073
1074                fallthrough;
1075        case OCT_DEV_IN_RESET:
1076        case OCT_DEV_DROQ_INIT_DONE:
1077                /* Wait for any pending operations */
1078                mdelay(100);
1079                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1080                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
1081                                continue;
1082                        octeon_delete_droq(oct, i);
1083                }
1084
1085                /* Force any pending handshakes to complete */
1086                for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1087                        hs = &handshake[i];
1088
1089                        if (hs->pci_dev) {
1090                                handshake[oct->octeon_id].init_ok = 0;
1091                                complete(&handshake[oct->octeon_id].init);
1092                                handshake[oct->octeon_id].started_ok = 0;
1093                                complete(&handshake[oct->octeon_id].started);
1094                        }
1095                }
1096
1097                fallthrough;
1098        case OCT_DEV_RESP_LIST_INIT_DONE:
1099                octeon_delete_response_list(oct);
1100
1101                fallthrough;
1102        case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1103                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1104                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
1105                                continue;
1106                        octeon_delete_instr_queue(oct, i);
1107                }
1108#ifdef CONFIG_PCI_IOV
1109                if (oct->sriov_info.sriov_enabled)
1110                        pci_disable_sriov(oct->pci_dev);
1111#endif
1112                fallthrough;
1113        case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1114                octeon_free_sc_buffer_pool(oct);
1115
1116                fallthrough;
1117        case OCT_DEV_DISPATCH_INIT_DONE:
1118                octeon_delete_dispatch_list(oct);
1119                cancel_delayed_work_sync(&oct->nic_poll_work.work);
1120
1121                fallthrough;
1122        case OCT_DEV_PCI_MAP_DONE:
1123                refcount = octeon_deregister_device(oct);
1124
1125                /* Soft reset the octeon device before exiting.
1126                 * However, if fw was loaded from card (i.e. autoboot),
1127                 * perform an FLR instead.
1128                 * Implementation note: only soft-reset the device
1129                 * if it is a CN6XXX OR the LAST CN23XX device.
1130                 */
1131                if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1132                        octeon_pci_flr(oct);
1133                else if (OCTEON_CN6XXX(oct) || !refcount)
1134                        oct->fn_list.soft_reset(oct);
1135
1136                octeon_unmap_pci_barx(oct, 0);
1137                octeon_unmap_pci_barx(oct, 1);
1138
1139                fallthrough;
1140        case OCT_DEV_PCI_ENABLE_DONE:
1141                pci_clear_master(oct->pci_dev);
1142                /* Disable the device, releasing the PCI INT */
1143                pci_disable_device(oct->pci_dev);
1144
1145                fallthrough;
1146        case OCT_DEV_BEGIN_STATE:
1147                /* Nothing to be done here either */
1148                break;
1149        }                       /* end switch (oct->status) */
1150
1151        tasklet_kill(&oct_priv->droq_tasklet);
1152}
1153
1154/**
1155 * \brief Send Rx control command
1156 * @param lio per-network private data
1157 * @param start_stop whether to start or stop
1158 */
1159static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1160{
1161        struct octeon_soft_command *sc;
1162        union octnet_cmd *ncmd;
1163        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1164        int retval;
1165
1166        if (oct->props[lio->ifidx].rx_on == start_stop)
1167                return;
1168
1169        sc = (struct octeon_soft_command *)
1170                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1171                                          16, 0);
1172        if (!sc) {
1173                netif_info(lio, rx_err, lio->netdev,
1174                           "Failed to allocate octeon_soft_command\n");
1175                return;
1176        }
1177
1178        ncmd = (union octnet_cmd *)sc->virtdptr;
1179
1180        ncmd->u64 = 0;
1181        ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1182        ncmd->s.param1 = start_stop;
1183
1184        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1185
1186        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1187
1188        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1189                                    OPCODE_NIC_CMD, 0, 0, 0);
1190
1191        init_completion(&sc->complete);
1192        sc->sc_status = OCTEON_REQUEST_PENDING;
1193
1194        retval = octeon_send_soft_command(oct, sc);
1195        if (retval == IQ_SEND_FAILED) {
1196                netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1197                octeon_free_soft_command(oct, sc);
1198                return;
1199        } else {
1200                /* Sleep on a wait queue till the cond flag indicates that the
1201                 * response arrived or timed-out.
1202                 */
1203                retval = wait_for_sc_completion_timeout(oct, sc, 0);
1204                if (retval)
1205                        return;
1206
1207                oct->props[lio->ifidx].rx_on = start_stop;
1208                WRITE_ONCE(sc->caller_is_done, true);
1209        }
1210}
1211
1212/**
1213 * \brief Destroy NIC device interface
1214 * @param oct octeon device
1215 * @param ifidx which interface to destroy
1216 *
1217 * Cleanup associated with each interface for an Octeon device  when NIC
1218 * module is being unloaded or if initialization fails during load.
1219 */
1220static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1221{
1222        struct net_device *netdev = oct->props[ifidx].netdev;
1223        struct octeon_device_priv *oct_priv =
1224                (struct octeon_device_priv *)oct->priv;
1225        struct napi_struct *napi, *n;
1226        struct lio *lio;
1227
1228        if (!netdev) {
1229                dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1230                        __func__, ifidx);
1231                return;
1232        }
1233
1234        lio = GET_LIO(netdev);
1235
1236        dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1237
1238        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1239                liquidio_stop(netdev);
1240
1241        if (oct->props[lio->ifidx].napi_enabled == 1) {
1242                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1243                        napi_disable(napi);
1244
1245                oct->props[lio->ifidx].napi_enabled = 0;
1246
1247                if (OCTEON_CN23XX_PF(oct))
1248                        oct->droq[0]->ops.poll_mode = 0;
1249        }
1250
1251        /* Delete NAPI */
1252        list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1253                netif_napi_del(napi);
1254
1255        tasklet_enable(&oct_priv->droq_tasklet);
1256
1257        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1258                unregister_netdev(netdev);
1259
1260        cleanup_sync_octeon_time_wq(netdev);
1261        cleanup_link_status_change_wq(netdev);
1262
1263        cleanup_rx_oom_poll_fn(netdev);
1264
1265        lio_delete_glists(lio);
1266
1267        free_netdev(netdev);
1268
1269        oct->props[ifidx].gmxport = -1;
1270
1271        oct->props[ifidx].netdev = NULL;
1272}
1273
1274/**
1275 * \brief Stop complete NIC functionality
1276 * @param oct octeon device
1277 */
1278static int liquidio_stop_nic_module(struct octeon_device *oct)
1279{
1280        int i, j;
1281        struct lio *lio;
1282
1283        dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1284        if (!oct->ifcount) {
1285                dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1286                return 1;
1287        }
1288
1289        spin_lock_bh(&oct->cmd_resp_wqlock);
1290        oct->cmd_resp_state = OCT_DRV_OFFLINE;
1291        spin_unlock_bh(&oct->cmd_resp_wqlock);
1292
1293        lio_vf_rep_destroy(oct);
1294
1295        for (i = 0; i < oct->ifcount; i++) {
1296                lio = GET_LIO(oct->props[i].netdev);
1297                for (j = 0; j < oct->num_oqs; j++)
1298                        octeon_unregister_droq_ops(oct,
1299                                                   lio->linfo.rxpciq[j].s.q_no);
1300        }
1301
1302        for (i = 0; i < oct->ifcount; i++)
1303                liquidio_destroy_nic_device(oct, i);
1304
1305        if (oct->devlink) {
1306                devlink_unregister(oct->devlink);
1307                devlink_free(oct->devlink);
1308                oct->devlink = NULL;
1309        }
1310
1311        dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1312        return 0;
1313}
1314
1315/**
1316 * \brief Cleans up resources at unload time
1317 * @param pdev PCI device structure
1318 */
1319static void liquidio_remove(struct pci_dev *pdev)
1320{
1321        struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1322
1323        dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1324
1325        if (oct_dev->watchdog_task)
1326                kthread_stop(oct_dev->watchdog_task);
1327
1328        if (!oct_dev->octeon_id &&
1329            oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1330                lio_vf_rep_modexit();
1331
1332        if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1333                liquidio_stop_nic_module(oct_dev);
1334
1335        /* Reset the octeon device and cleanup all memory allocated for
1336         * the octeon device by driver.
1337         */
1338        octeon_destroy_resources(oct_dev);
1339
1340        dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1341
1342        /* This octeon device has been removed. Update the global
1343         * data structure to reflect this. Free the device structure.
1344         */
1345        octeon_free_device_mem(oct_dev);
1346}
1347
1348/**
1349 * \brief Identify the Octeon device and to map the BAR address space
1350 * @param oct octeon device
1351 */
1352static int octeon_chip_specific_setup(struct octeon_device *oct)
1353{
1354        u32 dev_id, rev_id;
1355        int ret = 1;
1356
1357        pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1358        pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1359        oct->rev_id = rev_id & 0xff;
1360
1361        switch (dev_id) {
1362        case OCTEON_CN68XX_PCIID:
1363                oct->chip_id = OCTEON_CN68XX;
1364                ret = lio_setup_cn68xx_octeon_device(oct);
1365                break;
1366
1367        case OCTEON_CN66XX_PCIID:
1368                oct->chip_id = OCTEON_CN66XX;
1369                ret = lio_setup_cn66xx_octeon_device(oct);
1370                break;
1371
1372        case OCTEON_CN23XX_PCIID_PF:
1373                oct->chip_id = OCTEON_CN23XX_PF_VID;
1374                ret = setup_cn23xx_octeon_pf_device(oct);
1375                if (ret)
1376                        break;
1377#ifdef CONFIG_PCI_IOV
1378                if (!ret)
1379                        pci_sriov_set_totalvfs(oct->pci_dev,
1380                                               oct->sriov_info.max_vfs);
1381#endif
1382                break;
1383
1384        default:
1385                dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1386                        dev_id);
1387        }
1388
1389        return ret;
1390}
1391
1392/**
1393 * \brief PCI initialization for each Octeon device.
1394 * @param oct octeon device
1395 */
1396static int octeon_pci_os_setup(struct octeon_device *oct)
1397{
1398        /* setup PCI stuff first */
1399        if (pci_enable_device(oct->pci_dev)) {
1400                dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1401                return 1;
1402        }
1403
1404        if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1405                dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1406                pci_disable_device(oct->pci_dev);
1407                return 1;
1408        }
1409
1410        /* Enable PCI DMA Master. */
1411        pci_set_master(oct->pci_dev);
1412
1413        return 0;
1414}
1415
1416/**
1417 * \brief Unmap and free network buffer
1418 * @param buf buffer
1419 */
1420static void free_netbuf(void *buf)
1421{
1422        struct sk_buff *skb;
1423        struct octnet_buf_free_info *finfo;
1424        struct lio *lio;
1425
1426        finfo = (struct octnet_buf_free_info *)buf;
1427        skb = finfo->skb;
1428        lio = finfo->lio;
1429
1430        dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1431                         DMA_TO_DEVICE);
1432
1433        tx_buffer_free(skb);
1434}
1435
1436/**
1437 * \brief Unmap and free gather buffer
1438 * @param buf buffer
1439 */
1440static void free_netsgbuf(void *buf)
1441{
1442        struct octnet_buf_free_info *finfo;
1443        struct sk_buff *skb;
1444        struct lio *lio;
1445        struct octnic_gather *g;
1446        int i, frags, iq;
1447
1448        finfo = (struct octnet_buf_free_info *)buf;
1449        skb = finfo->skb;
1450        lio = finfo->lio;
1451        g = finfo->g;
1452        frags = skb_shinfo(skb)->nr_frags;
1453
1454        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1455                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1456                         DMA_TO_DEVICE);
1457
1458        i = 1;
1459        while (frags--) {
1460                skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1461
1462                pci_unmap_page((lio->oct_dev)->pci_dev,
1463                               g->sg[(i >> 2)].ptr[(i & 3)],
1464                               skb_frag_size(frag), DMA_TO_DEVICE);
1465                i++;
1466        }
1467
1468        iq = skb_iq(lio->oct_dev, skb);
1469        spin_lock(&lio->glist_lock[iq]);
1470        list_add_tail(&g->list, &lio->glist[iq]);
1471        spin_unlock(&lio->glist_lock[iq]);
1472
1473        tx_buffer_free(skb);
1474}
1475
1476/**
1477 * \brief Unmap and free gather buffer with response
1478 * @param buf buffer
1479 */
1480static void free_netsgbuf_with_resp(void *buf)
1481{
1482        struct octeon_soft_command *sc;
1483        struct octnet_buf_free_info *finfo;
1484        struct sk_buff *skb;
1485        struct lio *lio;
1486        struct octnic_gather *g;
1487        int i, frags, iq;
1488
1489        sc = (struct octeon_soft_command *)buf;
1490        skb = (struct sk_buff *)sc->callback_arg;
1491        finfo = (struct octnet_buf_free_info *)&skb->cb;
1492
1493        lio = finfo->lio;
1494        g = finfo->g;
1495        frags = skb_shinfo(skb)->nr_frags;
1496
1497        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1498                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1499                         DMA_TO_DEVICE);
1500
1501        i = 1;
1502        while (frags--) {
1503                skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1504
1505                pci_unmap_page((lio->oct_dev)->pci_dev,
1506                               g->sg[(i >> 2)].ptr[(i & 3)],
1507                               skb_frag_size(frag), DMA_TO_DEVICE);
1508                i++;
1509        }
1510
1511        iq = skb_iq(lio->oct_dev, skb);
1512
1513        spin_lock(&lio->glist_lock[iq]);
1514        list_add_tail(&g->list, &lio->glist[iq]);
1515        spin_unlock(&lio->glist_lock[iq]);
1516
1517        /* Don't free the skb yet */
1518}
1519
1520/**
1521 * \brief Adjust ptp frequency
1522 * @param ptp PTP clock info
1523 * @param ppb how much to adjust by, in parts-per-billion
1524 */
1525static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1526{
1527        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1528        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1529        u64 comp, delta;
1530        unsigned long flags;
1531        bool neg_adj = false;
1532
1533        if (ppb < 0) {
1534                neg_adj = true;
1535                ppb = -ppb;
1536        }
1537
1538        /* The hardware adds the clock compensation value to the
1539         * PTP clock on every coprocessor clock cycle, so we
1540         * compute the delta in terms of coprocessor clocks.
1541         */
1542        delta = (u64)ppb << 32;
1543        do_div(delta, oct->coproc_clock_rate);
1544
1545        spin_lock_irqsave(&lio->ptp_lock, flags);
1546        comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1547        if (neg_adj)
1548                comp -= delta;
1549        else
1550                comp += delta;
1551        lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1552        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1553
1554        return 0;
1555}
1556
1557/**
1558 * \brief Adjust ptp time
1559 * @param ptp PTP clock info
1560 * @param delta how much to adjust by, in nanosecs
1561 */
1562static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1563{
1564        unsigned long flags;
1565        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1566
1567        spin_lock_irqsave(&lio->ptp_lock, flags);
1568        lio->ptp_adjust += delta;
1569        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1570
1571        return 0;
1572}
1573
1574/**
1575 * \brief Get hardware clock time, including any adjustment
1576 * @param ptp PTP clock info
1577 * @param ts timespec
1578 */
1579static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1580                                struct timespec64 *ts)
1581{
1582        u64 ns;
1583        unsigned long flags;
1584        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1585        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1586
1587        spin_lock_irqsave(&lio->ptp_lock, flags);
1588        ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1589        ns += lio->ptp_adjust;
1590        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1591
1592        *ts = ns_to_timespec64(ns);
1593
1594        return 0;
1595}
1596
1597/**
1598 * \brief Set hardware clock time. Reset adjustment
1599 * @param ptp PTP clock info
1600 * @param ts timespec
1601 */
1602static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1603                                const struct timespec64 *ts)
1604{
1605        u64 ns;
1606        unsigned long flags;
1607        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1608        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1609
1610        ns = timespec64_to_ns(ts);
1611
1612        spin_lock_irqsave(&lio->ptp_lock, flags);
1613        lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1614        lio->ptp_adjust = 0;
1615        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1616
1617        return 0;
1618}
1619
1620/**
1621 * \brief Check if PTP is enabled
1622 * @param ptp PTP clock info
1623 * @param rq request
1624 * @param on is it on
1625 */
1626static int
1627liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1628                    struct ptp_clock_request *rq __attribute__((unused)),
1629                    int on __attribute__((unused)))
1630{
1631        return -EOPNOTSUPP;
1632}
1633
1634/**
1635 * \brief Open PTP clock source
1636 * @param netdev network device
1637 */
1638static void oct_ptp_open(struct net_device *netdev)
1639{
1640        struct lio *lio = GET_LIO(netdev);
1641        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1642
1643        spin_lock_init(&lio->ptp_lock);
1644
1645        snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1646        lio->ptp_info.owner = THIS_MODULE;
1647        lio->ptp_info.max_adj = 250000000;
1648        lio->ptp_info.n_alarm = 0;
1649        lio->ptp_info.n_ext_ts = 0;
1650        lio->ptp_info.n_per_out = 0;
1651        lio->ptp_info.pps = 0;
1652        lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1653        lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1654        lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1655        lio->ptp_info.settime64 = liquidio_ptp_settime;
1656        lio->ptp_info.enable = liquidio_ptp_enable;
1657
1658        lio->ptp_adjust = 0;
1659
1660        lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1661                                             &oct->pci_dev->dev);
1662
1663        if (IS_ERR(lio->ptp_clock))
1664                lio->ptp_clock = NULL;
1665}
1666
1667/**
1668 * \brief Init PTP clock
1669 * @param oct octeon device
1670 */
1671static void liquidio_ptp_init(struct octeon_device *oct)
1672{
1673        u64 clock_comp, cfg;
1674
1675        clock_comp = (u64)NSEC_PER_SEC << 32;
1676        do_div(clock_comp, oct->coproc_clock_rate);
1677        lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1678
1679        /* Enable */
1680        cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1681        lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1682}
1683
1684/**
1685 * \brief Load firmware to device
1686 * @param oct octeon device
1687 *
1688 * Maps device to firmware filename, requests firmware, and downloads it
1689 */
1690static int load_firmware(struct octeon_device *oct)
1691{
1692        int ret = 0;
1693        const struct firmware *fw;
1694        char fw_name[LIO_MAX_FW_FILENAME_LEN];
1695        char *tmp_fw_type;
1696
1697        if (fw_type_is_auto()) {
1698                tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1699                strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1700        } else {
1701                tmp_fw_type = fw_type;
1702        }
1703
1704        sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1705                octeon_get_conf(oct)->card_name, tmp_fw_type,
1706                LIO_FW_NAME_SUFFIX);
1707
1708        ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1709        if (ret) {
1710                dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1711                        fw_name);
1712                release_firmware(fw);
1713                return ret;
1714        }
1715
1716        ret = octeon_download_firmware(oct, fw->data, fw->size);
1717
1718        release_firmware(fw);
1719
1720        return ret;
1721}
1722
1723/**
1724 * \brief Poll routine for checking transmit queue status
1725 * @param work work_struct data structure
1726 */
1727static void octnet_poll_check_txq_status(struct work_struct *work)
1728{
1729        struct cavium_wk *wk = (struct cavium_wk *)work;
1730        struct lio *lio = (struct lio *)wk->ctxptr;
1731
1732        if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1733                return;
1734
1735        check_txq_status(lio);
1736        queue_delayed_work(lio->txq_status_wq.wq,
1737                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1738}
1739
1740/**
1741 * \brief Sets up the txq poll check
1742 * @param netdev network device
1743 */
1744static inline int setup_tx_poll_fn(struct net_device *netdev)
1745{
1746        struct lio *lio = GET_LIO(netdev);
1747        struct octeon_device *oct = lio->oct_dev;
1748
1749        lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1750                                                WQ_MEM_RECLAIM, 0);
1751        if (!lio->txq_status_wq.wq) {
1752                dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1753                return -1;
1754        }
1755        INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1756                          octnet_poll_check_txq_status);
1757        lio->txq_status_wq.wk.ctxptr = lio;
1758        queue_delayed_work(lio->txq_status_wq.wq,
1759                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1760        return 0;
1761}
1762
1763static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1764{
1765        struct lio *lio = GET_LIO(netdev);
1766
1767        if (lio->txq_status_wq.wq) {
1768                cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1769                destroy_workqueue(lio->txq_status_wq.wq);
1770        }
1771}
1772
1773/**
1774 * \brief Net device open for LiquidIO
1775 * @param netdev network device
1776 */
1777static int liquidio_open(struct net_device *netdev)
1778{
1779        struct lio *lio = GET_LIO(netdev);
1780        struct octeon_device *oct = lio->oct_dev;
1781        struct octeon_device_priv *oct_priv =
1782                (struct octeon_device_priv *)oct->priv;
1783        struct napi_struct *napi, *n;
1784
1785        if (oct->props[lio->ifidx].napi_enabled == 0) {
1786                tasklet_disable(&oct_priv->droq_tasklet);
1787
1788                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1789                        napi_enable(napi);
1790
1791                oct->props[lio->ifidx].napi_enabled = 1;
1792
1793                if (OCTEON_CN23XX_PF(oct))
1794                        oct->droq[0]->ops.poll_mode = 1;
1795        }
1796
1797        if (oct->ptp_enable)
1798                oct_ptp_open(netdev);
1799
1800        ifstate_set(lio, LIO_IFSTATE_RUNNING);
1801
1802        if (OCTEON_CN23XX_PF(oct)) {
1803                if (!oct->msix_on)
1804                        if (setup_tx_poll_fn(netdev))
1805                                return -1;
1806        } else {
1807                if (setup_tx_poll_fn(netdev))
1808                        return -1;
1809        }
1810
1811        netif_tx_start_all_queues(netdev);
1812
1813        /* Ready for link status updates */
1814        lio->intf_open = 1;
1815
1816        netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1817
1818        /* tell Octeon to start forwarding packets to host */
1819        send_rx_ctrl_cmd(lio, 1);
1820
1821        /* start periodical statistics fetch */
1822        INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1823        lio->stats_wk.ctxptr = lio;
1824        schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1825                                        (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1826
1827        dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1828                 netdev->name);
1829
1830        return 0;
1831}
1832
1833/**
1834 * \brief Net device stop for LiquidIO
1835 * @param netdev network device
1836 */
1837static int liquidio_stop(struct net_device *netdev)
1838{
1839        struct lio *lio = GET_LIO(netdev);
1840        struct octeon_device *oct = lio->oct_dev;
1841        struct octeon_device_priv *oct_priv =
1842                (struct octeon_device_priv *)oct->priv;
1843        struct napi_struct *napi, *n;
1844
1845        ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1846
1847        /* Stop any link updates */
1848        lio->intf_open = 0;
1849
1850        stop_txqs(netdev);
1851
1852        /* Inform that netif carrier is down */
1853        netif_carrier_off(netdev);
1854        netif_tx_disable(netdev);
1855
1856        lio->linfo.link.s.link_up = 0;
1857        lio->link_changes++;
1858
1859        /* Tell Octeon that nic interface is down. */
1860        send_rx_ctrl_cmd(lio, 0);
1861
1862        if (OCTEON_CN23XX_PF(oct)) {
1863                if (!oct->msix_on)
1864                        cleanup_tx_poll_fn(netdev);
1865        } else {
1866                cleanup_tx_poll_fn(netdev);
1867        }
1868
1869        cancel_delayed_work_sync(&lio->stats_wk.work);
1870
1871        if (lio->ptp_clock) {
1872                ptp_clock_unregister(lio->ptp_clock);
1873                lio->ptp_clock = NULL;
1874        }
1875
1876        /* Wait for any pending Rx descriptors */
1877        if (lio_wait_for_clean_oq(oct))
1878                netif_info(lio, rx_err, lio->netdev,
1879                           "Proceeding with stop interface after partial RX desc processing\n");
1880
1881        if (oct->props[lio->ifidx].napi_enabled == 1) {
1882                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1883                        napi_disable(napi);
1884
1885                oct->props[lio->ifidx].napi_enabled = 0;
1886
1887                if (OCTEON_CN23XX_PF(oct))
1888                        oct->droq[0]->ops.poll_mode = 0;
1889
1890                tasklet_enable(&oct_priv->droq_tasklet);
1891        }
1892
1893        dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1894
1895        return 0;
1896}
1897
1898/**
1899 * \brief Converts a mask based on net device flags
1900 * @param netdev network device
1901 *
1902 * This routine generates a octnet_ifflags mask from the net device flags
1903 * received from the OS.
1904 */
1905static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1906{
1907        enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1908
1909        if (netdev->flags & IFF_PROMISC)
1910                f |= OCTNET_IFFLAG_PROMISC;
1911
1912        if (netdev->flags & IFF_ALLMULTI)
1913                f |= OCTNET_IFFLAG_ALLMULTI;
1914
1915        if (netdev->flags & IFF_MULTICAST) {
1916                f |= OCTNET_IFFLAG_MULTICAST;
1917
1918                /* Accept all multicast addresses if there are more than we
1919                 * can handle
1920                 */
1921                if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1922                        f |= OCTNET_IFFLAG_ALLMULTI;
1923        }
1924
1925        if (netdev->flags & IFF_BROADCAST)
1926                f |= OCTNET_IFFLAG_BROADCAST;
1927
1928        return f;
1929}
1930
1931/**
1932 * \brief Net device set_multicast_list
1933 * @param netdev network device
1934 */
1935static void liquidio_set_mcast_list(struct net_device *netdev)
1936{
1937        struct lio *lio = GET_LIO(netdev);
1938        struct octeon_device *oct = lio->oct_dev;
1939        struct octnic_ctrl_pkt nctrl;
1940        struct netdev_hw_addr *ha;
1941        u64 *mc;
1942        int ret;
1943        int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1944
1945        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1946
1947        /* Create a ctrl pkt command to be sent to core app. */
1948        nctrl.ncmd.u64 = 0;
1949        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1950        nctrl.ncmd.s.param1 = get_new_flags(netdev);
1951        nctrl.ncmd.s.param2 = mc_count;
1952        nctrl.ncmd.s.more = mc_count;
1953        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1954        nctrl.netpndev = (u64)netdev;
1955        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1956
1957        /* copy all the addresses into the udd */
1958        mc = &nctrl.udd[0];
1959        netdev_for_each_mc_addr(ha, netdev) {
1960                *mc = 0;
1961                memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1962                /* no need to swap bytes */
1963
1964                if (++mc > &nctrl.udd[mc_count])
1965                        break;
1966        }
1967
1968        /* Apparently, any activity in this call from the kernel has to
1969         * be atomic. So we won't wait for response.
1970         */
1971
1972        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1973        if (ret) {
1974                dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1975                        ret);
1976        }
1977}
1978
1979/**
1980 * \brief Net device set_mac_address
1981 * @param netdev network device
1982 */
1983static int liquidio_set_mac(struct net_device *netdev, void *p)
1984{
1985        int ret = 0;
1986        struct lio *lio = GET_LIO(netdev);
1987        struct octeon_device *oct = lio->oct_dev;
1988        struct sockaddr *addr = (struct sockaddr *)p;
1989        struct octnic_ctrl_pkt nctrl;
1990
1991        if (!is_valid_ether_addr(addr->sa_data))
1992                return -EADDRNOTAVAIL;
1993
1994        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1995
1996        nctrl.ncmd.u64 = 0;
1997        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1998        nctrl.ncmd.s.param1 = 0;
1999        nctrl.ncmd.s.more = 1;
2000        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2001        nctrl.netpndev = (u64)netdev;
2002
2003        nctrl.udd[0] = 0;
2004        /* The MAC Address is presented in network byte order. */
2005        memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2006
2007        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2008        if (ret < 0) {
2009                dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2010                return -ENOMEM;
2011        }
2012
2013        if (nctrl.sc_status) {
2014                dev_err(&oct->pci_dev->dev,
2015                        "%s: MAC Address change failed. sc return=%x\n",
2016                         __func__, nctrl.sc_status);
2017                return -EIO;
2018        }
2019
2020        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2021        memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2022
2023        return 0;
2024}
2025
2026static void
2027liquidio_get_stats64(struct net_device *netdev,
2028                     struct rtnl_link_stats64 *lstats)
2029{
2030        struct lio *lio = GET_LIO(netdev);
2031        struct octeon_device *oct;
2032        u64 pkts = 0, drop = 0, bytes = 0;
2033        struct oct_droq_stats *oq_stats;
2034        struct oct_iq_stats *iq_stats;
2035        int i, iq_no, oq_no;
2036
2037        oct = lio->oct_dev;
2038
2039        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2040                return;
2041
2042        for (i = 0; i < oct->num_iqs; i++) {
2043                iq_no = lio->linfo.txpciq[i].s.q_no;
2044                iq_stats = &oct->instr_queue[iq_no]->stats;
2045                pkts += iq_stats->tx_done;
2046                drop += iq_stats->tx_dropped;
2047                bytes += iq_stats->tx_tot_bytes;
2048        }
2049
2050        lstats->tx_packets = pkts;
2051        lstats->tx_bytes = bytes;
2052        lstats->tx_dropped = drop;
2053
2054        pkts = 0;
2055        drop = 0;
2056        bytes = 0;
2057
2058        for (i = 0; i < oct->num_oqs; i++) {
2059                oq_no = lio->linfo.rxpciq[i].s.q_no;
2060                oq_stats = &oct->droq[oq_no]->stats;
2061                pkts += oq_stats->rx_pkts_received;
2062                drop += (oq_stats->rx_dropped +
2063                         oq_stats->dropped_nodispatch +
2064                         oq_stats->dropped_toomany +
2065                         oq_stats->dropped_nomem);
2066                bytes += oq_stats->rx_bytes_received;
2067        }
2068
2069        lstats->rx_bytes = bytes;
2070        lstats->rx_packets = pkts;
2071        lstats->rx_dropped = drop;
2072
2073        lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2074        lstats->collisions = oct->link_stats.fromhost.total_collisions;
2075
2076        /* detailed rx_errors: */
2077        lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2078        /* recved pkt with crc error    */
2079        lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2080        /* recv'd frame alignment error */
2081        lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2082        /* recv'r fifo overrun */
2083        lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2084
2085        lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2086                lstats->rx_frame_errors + lstats->rx_fifo_errors;
2087
2088        /* detailed tx_errors */
2089        lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2090        lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2091        lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2092
2093        lstats->tx_errors = lstats->tx_aborted_errors +
2094                lstats->tx_carrier_errors +
2095                lstats->tx_fifo_errors;
2096}
2097
2098/**
2099 * \brief Handler for SIOCSHWTSTAMP ioctl
2100 * @param netdev network device
2101 * @param ifr interface request
2102 * @param cmd command
2103 */
2104static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2105{
2106        struct hwtstamp_config conf;
2107        struct lio *lio = GET_LIO(netdev);
2108
2109        if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2110                return -EFAULT;
2111
2112        if (conf.flags)
2113                return -EINVAL;
2114
2115        switch (conf.tx_type) {
2116        case HWTSTAMP_TX_ON:
2117        case HWTSTAMP_TX_OFF:
2118                break;
2119        default:
2120                return -ERANGE;
2121        }
2122
2123        switch (conf.rx_filter) {
2124        case HWTSTAMP_FILTER_NONE:
2125                break;
2126        case HWTSTAMP_FILTER_ALL:
2127        case HWTSTAMP_FILTER_SOME:
2128        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2129        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2130        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2131        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2132        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2133        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2134        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2135        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2136        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2137        case HWTSTAMP_FILTER_PTP_V2_EVENT:
2138        case HWTSTAMP_FILTER_PTP_V2_SYNC:
2139        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2140        case HWTSTAMP_FILTER_NTP_ALL:
2141                conf.rx_filter = HWTSTAMP_FILTER_ALL;
2142                break;
2143        default:
2144                return -ERANGE;
2145        }
2146
2147        if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2148                ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2149
2150        else
2151                ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2152
2153        return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2154}
2155
2156/**
2157 * \brief ioctl handler
2158 * @param netdev network device
2159 * @param ifr interface request
2160 * @param cmd command
2161 */
2162static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2163{
2164        struct lio *lio = GET_LIO(netdev);
2165
2166        switch (cmd) {
2167        case SIOCSHWTSTAMP:
2168                if (lio->oct_dev->ptp_enable)
2169                        return hwtstamp_ioctl(netdev, ifr);
2170                fallthrough;
2171        default:
2172                return -EOPNOTSUPP;
2173        }
2174}
2175
2176/**
2177 * \brief handle a Tx timestamp response
2178 * @param status response status
2179 * @param buf pointer to skb
2180 */
2181static void handle_timestamp(struct octeon_device *oct,
2182                             u32 status,
2183                             void *buf)
2184{
2185        struct octnet_buf_free_info *finfo;
2186        struct octeon_soft_command *sc;
2187        struct oct_timestamp_resp *resp;
2188        struct lio *lio;
2189        struct sk_buff *skb = (struct sk_buff *)buf;
2190
2191        finfo = (struct octnet_buf_free_info *)skb->cb;
2192        lio = finfo->lio;
2193        sc = finfo->sc;
2194        oct = lio->oct_dev;
2195        resp = (struct oct_timestamp_resp *)sc->virtrptr;
2196
2197        if (status != OCTEON_REQUEST_DONE) {
2198                dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2199                        CVM_CAST64(status));
2200                resp->timestamp = 0;
2201        }
2202
2203        octeon_swap_8B_data(&resp->timestamp, 1);
2204
2205        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2206                struct skb_shared_hwtstamps ts;
2207                u64 ns = resp->timestamp;
2208
2209                netif_info(lio, tx_done, lio->netdev,
2210                           "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2211                           skb, (unsigned long long)ns);
2212                ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2213                skb_tstamp_tx(skb, &ts);
2214        }
2215
2216        octeon_free_soft_command(oct, sc);
2217        tx_buffer_free(skb);
2218}
2219
2220/* \brief Send a data packet that will be timestamped
2221 * @param oct octeon device
2222 * @param ndata pointer to network data
2223 * @param finfo pointer to private network data
2224 */
2225static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2226                                         struct octnic_data_pkt *ndata,
2227                                         struct octnet_buf_free_info *finfo,
2228                                         int xmit_more)
2229{
2230        int retval;
2231        struct octeon_soft_command *sc;
2232        struct lio *lio;
2233        int ring_doorbell;
2234        u32 len;
2235
2236        lio = finfo->lio;
2237
2238        sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2239                                            sizeof(struct oct_timestamp_resp));
2240        finfo->sc = sc;
2241
2242        if (!sc) {
2243                dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2244                return IQ_SEND_FAILED;
2245        }
2246
2247        if (ndata->reqtype == REQTYPE_NORESP_NET)
2248                ndata->reqtype = REQTYPE_RESP_NET;
2249        else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2250                ndata->reqtype = REQTYPE_RESP_NET_SG;
2251
2252        sc->callback = handle_timestamp;
2253        sc->callback_arg = finfo->skb;
2254        sc->iq_no = ndata->q_no;
2255
2256        if (OCTEON_CN23XX_PF(oct))
2257                len = (u32)((struct octeon_instr_ih3 *)
2258                            (&sc->cmd.cmd3.ih3))->dlengsz;
2259        else
2260                len = (u32)((struct octeon_instr_ih2 *)
2261                            (&sc->cmd.cmd2.ih2))->dlengsz;
2262
2263        ring_doorbell = !xmit_more;
2264
2265        retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2266                                     sc, len, ndata->reqtype);
2267
2268        if (retval == IQ_SEND_FAILED) {
2269                dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2270                        retval);
2271                octeon_free_soft_command(oct, sc);
2272        } else {
2273                netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2274        }
2275
2276        return retval;
2277}
2278
2279/** \brief Transmit networks packets to the Octeon interface
2280 * @param skbuff   skbuff struct to be passed to network layer.
2281 * @param netdev    pointer to network device
2282 * @returns whether the packet was transmitted to the device okay or not
2283 *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2284 */
2285static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2286{
2287        struct lio *lio;
2288        struct octnet_buf_free_info *finfo;
2289        union octnic_cmd_setup cmdsetup;
2290        struct octnic_data_pkt ndata;
2291        struct octeon_device *oct;
2292        struct oct_iq_stats *stats;
2293        struct octeon_instr_irh *irh;
2294        union tx_info *tx_info;
2295        int status = 0;
2296        int q_idx = 0, iq_no = 0;
2297        int j, xmit_more = 0;
2298        u64 dptr = 0;
2299        u32 tag = 0;
2300
2301        lio = GET_LIO(netdev);
2302        oct = lio->oct_dev;
2303
2304        q_idx = skb_iq(oct, skb);
2305        tag = q_idx;
2306        iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2307
2308        stats = &oct->instr_queue[iq_no]->stats;
2309
2310        /* Check for all conditions in which the current packet cannot be
2311         * transmitted.
2312         */
2313        if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2314            (!lio->linfo.link.s.link_up) ||
2315            (skb->len <= 0)) {
2316                netif_info(lio, tx_err, lio->netdev,
2317                           "Transmit failed link_status : %d\n",
2318                           lio->linfo.link.s.link_up);
2319                goto lio_xmit_failed;
2320        }
2321
2322        /* Use space in skb->cb to store info used to unmap and
2323         * free the buffers.
2324         */
2325        finfo = (struct octnet_buf_free_info *)skb->cb;
2326        finfo->lio = lio;
2327        finfo->skb = skb;
2328        finfo->sc = NULL;
2329
2330        /* Prepare the attributes for the data to be passed to OSI. */
2331        memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2332
2333        ndata.buf = (void *)finfo;
2334
2335        ndata.q_no = iq_no;
2336
2337        if (octnet_iq_is_full(oct, ndata.q_no)) {
2338                /* defer sending if queue is full */
2339                netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2340                           ndata.q_no);
2341                stats->tx_iq_busy++;
2342                return NETDEV_TX_BUSY;
2343        }
2344
2345        /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2346         *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2347         */
2348
2349        ndata.datasize = skb->len;
2350
2351        cmdsetup.u64 = 0;
2352        cmdsetup.s.iq_no = iq_no;
2353
2354        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2355                if (skb->encapsulation) {
2356                        cmdsetup.s.tnl_csum = 1;
2357                        stats->tx_vxlan++;
2358                } else {
2359                        cmdsetup.s.transport_csum = 1;
2360                }
2361        }
2362        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2363                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2364                cmdsetup.s.timestamp = 1;
2365        }
2366
2367        if (skb_shinfo(skb)->nr_frags == 0) {
2368                cmdsetup.s.u.datasize = skb->len;
2369                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2370
2371                /* Offload checksum calculation for TCP/UDP packets */
2372                dptr = dma_map_single(&oct->pci_dev->dev,
2373                                      skb->data,
2374                                      skb->len,
2375                                      DMA_TO_DEVICE);
2376                if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2377                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2378                                __func__);
2379                        stats->tx_dmamap_fail++;
2380                        return NETDEV_TX_BUSY;
2381                }
2382
2383                if (OCTEON_CN23XX_PF(oct))
2384                        ndata.cmd.cmd3.dptr = dptr;
2385                else
2386                        ndata.cmd.cmd2.dptr = dptr;
2387                finfo->dptr = dptr;
2388                ndata.reqtype = REQTYPE_NORESP_NET;
2389
2390        } else {
2391                int i, frags;
2392                skb_frag_t *frag;
2393                struct octnic_gather *g;
2394
2395                spin_lock(&lio->glist_lock[q_idx]);
2396                g = (struct octnic_gather *)
2397                        lio_list_delete_head(&lio->glist[q_idx]);
2398                spin_unlock(&lio->glist_lock[q_idx]);
2399
2400                if (!g) {
2401                        netif_info(lio, tx_err, lio->netdev,
2402                                   "Transmit scatter gather: glist null!\n");
2403                        goto lio_xmit_failed;
2404                }
2405
2406                cmdsetup.s.gather = 1;
2407                cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2408                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2409
2410                memset(g->sg, 0, g->sg_size);
2411
2412                g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2413                                                 skb->data,
2414                                                 (skb->len - skb->data_len),
2415                                                 DMA_TO_DEVICE);
2416                if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2417                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2418                                __func__);
2419                        stats->tx_dmamap_fail++;
2420                        return NETDEV_TX_BUSY;
2421                }
2422                add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2423
2424                frags = skb_shinfo(skb)->nr_frags;
2425                i = 1;
2426                while (frags--) {
2427                        frag = &skb_shinfo(skb)->frags[i - 1];
2428
2429                        g->sg[(i >> 2)].ptr[(i & 3)] =
2430                                skb_frag_dma_map(&oct->pci_dev->dev,
2431                                                 frag, 0, skb_frag_size(frag),
2432                                                 DMA_TO_DEVICE);
2433
2434                        if (dma_mapping_error(&oct->pci_dev->dev,
2435                                              g->sg[i >> 2].ptr[i & 3])) {
2436                                dma_unmap_single(&oct->pci_dev->dev,
2437                                                 g->sg[0].ptr[0],
2438                                                 skb->len - skb->data_len,
2439                                                 DMA_TO_DEVICE);
2440                                for (j = 1; j < i; j++) {
2441                                        frag = &skb_shinfo(skb)->frags[j - 1];
2442                                        dma_unmap_page(&oct->pci_dev->dev,
2443                                                       g->sg[j >> 2].ptr[j & 3],
2444                                                       skb_frag_size(frag),
2445                                                       DMA_TO_DEVICE);
2446                                }
2447                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2448                                        __func__);
2449                                return NETDEV_TX_BUSY;
2450                        }
2451
2452                        add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2453                                    (i & 3));
2454                        i++;
2455                }
2456
2457                dptr = g->sg_dma_ptr;
2458
2459                if (OCTEON_CN23XX_PF(oct))
2460                        ndata.cmd.cmd3.dptr = dptr;
2461                else
2462                        ndata.cmd.cmd2.dptr = dptr;
2463                finfo->dptr = dptr;
2464                finfo->g = g;
2465
2466                ndata.reqtype = REQTYPE_NORESP_NET_SG;
2467        }
2468
2469        if (OCTEON_CN23XX_PF(oct)) {
2470                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2471                tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2472        } else {
2473                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2474                tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2475        }
2476
2477        if (skb_shinfo(skb)->gso_size) {
2478                tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2479                tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2480                stats->tx_gso++;
2481        }
2482
2483        /* HW insert VLAN tag */
2484        if (skb_vlan_tag_present(skb)) {
2485                irh->priority = skb_vlan_tag_get(skb) >> 13;
2486                irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2487        }
2488
2489        xmit_more = netdev_xmit_more();
2490
2491        if (unlikely(cmdsetup.s.timestamp))
2492                status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2493        else
2494                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2495        if (status == IQ_SEND_FAILED)
2496                goto lio_xmit_failed;
2497
2498        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2499
2500        if (status == IQ_SEND_STOP)
2501                netif_stop_subqueue(netdev, q_idx);
2502
2503        netif_trans_update(netdev);
2504
2505        if (tx_info->s.gso_segs)
2506                stats->tx_done += tx_info->s.gso_segs;
2507        else
2508                stats->tx_done++;
2509        stats->tx_tot_bytes += ndata.datasize;
2510
2511        return NETDEV_TX_OK;
2512
2513lio_xmit_failed:
2514        stats->tx_dropped++;
2515        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2516                   iq_no, stats->tx_dropped);
2517        if (dptr)
2518                dma_unmap_single(&oct->pci_dev->dev, dptr,
2519                                 ndata.datasize, DMA_TO_DEVICE);
2520
2521        octeon_ring_doorbell_locked(oct, iq_no);
2522
2523        tx_buffer_free(skb);
2524        return NETDEV_TX_OK;
2525}
2526
2527/** \brief Network device Tx timeout
2528 * @param netdev    pointer to network device
2529 */
2530static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2531{
2532        struct lio *lio;
2533
2534        lio = GET_LIO(netdev);
2535
2536        netif_info(lio, tx_err, lio->netdev,
2537                   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2538                   netdev->stats.tx_dropped);
2539        netif_trans_update(netdev);
2540        wake_txqs(netdev);
2541}
2542
2543static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2544                                    __be16 proto __attribute__((unused)),
2545                                    u16 vid)
2546{
2547        struct lio *lio = GET_LIO(netdev);
2548        struct octeon_device *oct = lio->oct_dev;
2549        struct octnic_ctrl_pkt nctrl;
2550        int ret = 0;
2551
2552        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2553
2554        nctrl.ncmd.u64 = 0;
2555        nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2556        nctrl.ncmd.s.param1 = vid;
2557        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2558        nctrl.netpndev = (u64)netdev;
2559        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2560
2561        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2562        if (ret) {
2563                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2564                        ret);
2565                if (ret > 0)
2566                        ret = -EIO;
2567        }
2568
2569        return ret;
2570}
2571
2572static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2573                                     __be16 proto __attribute__((unused)),
2574                                     u16 vid)
2575{
2576        struct lio *lio = GET_LIO(netdev);
2577        struct octeon_device *oct = lio->oct_dev;
2578        struct octnic_ctrl_pkt nctrl;
2579        int ret = 0;
2580
2581        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2582
2583        nctrl.ncmd.u64 = 0;
2584        nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2585        nctrl.ncmd.s.param1 = vid;
2586        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2587        nctrl.netpndev = (u64)netdev;
2588        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2589
2590        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2591        if (ret) {
2592                dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2593                        ret);
2594                if (ret > 0)
2595                        ret = -EIO;
2596        }
2597        return ret;
2598}
2599
2600/** Sending command to enable/disable RX checksum offload
2601 * @param netdev                pointer to network device
2602 * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2603 * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2604 *                              OCTNET_CMD_RXCSUM_DISABLE
2605 * @returns                     SUCCESS or FAILURE
2606 */
2607static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2608                                       u8 rx_cmd)
2609{
2610        struct lio *lio = GET_LIO(netdev);
2611        struct octeon_device *oct = lio->oct_dev;
2612        struct octnic_ctrl_pkt nctrl;
2613        int ret = 0;
2614
2615        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2616
2617        nctrl.ncmd.u64 = 0;
2618        nctrl.ncmd.s.cmd = command;
2619        nctrl.ncmd.s.param1 = rx_cmd;
2620        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2621        nctrl.netpndev = (u64)netdev;
2622        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2623
2624        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2625        if (ret) {
2626                dev_err(&oct->pci_dev->dev,
2627                        "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2628                        ret);
2629                if (ret > 0)
2630                        ret = -EIO;
2631        }
2632        return ret;
2633}
2634
2635/** Sending command to add/delete VxLAN UDP port to firmware
2636 * @param netdev                pointer to network device
2637 * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2638 * @param vxlan_port            VxLAN port to be added or deleted
2639 * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2640 *                              OCTNET_CMD_VXLAN_PORT_DEL
2641 * @returns                     SUCCESS or FAILURE
2642 */
2643static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2644                                       u16 vxlan_port, u8 vxlan_cmd_bit)
2645{
2646        struct lio *lio = GET_LIO(netdev);
2647        struct octeon_device *oct = lio->oct_dev;
2648        struct octnic_ctrl_pkt nctrl;
2649        int ret = 0;
2650
2651        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2652
2653        nctrl.ncmd.u64 = 0;
2654        nctrl.ncmd.s.cmd = command;
2655        nctrl.ncmd.s.more = vxlan_cmd_bit;
2656        nctrl.ncmd.s.param1 = vxlan_port;
2657        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2658        nctrl.netpndev = (u64)netdev;
2659        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2660
2661        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2662        if (ret) {
2663                dev_err(&oct->pci_dev->dev,
2664                        "VxLAN port add/delete failed in core (ret:0x%x)\n",
2665                        ret);
2666                if (ret > 0)
2667                        ret = -EIO;
2668        }
2669        return ret;
2670}
2671
2672static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2673                                        unsigned int table, unsigned int entry,
2674                                        struct udp_tunnel_info *ti)
2675{
2676        return liquidio_vxlan_port_command(netdev,
2677                                           OCTNET_CMD_VXLAN_PORT_CONFIG,
2678                                           htons(ti->port),
2679                                           OCTNET_CMD_VXLAN_PORT_ADD);
2680}
2681
2682static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2683                                          unsigned int table,
2684                                          unsigned int entry,
2685                                          struct udp_tunnel_info *ti)
2686{
2687        return liquidio_vxlan_port_command(netdev,
2688                                           OCTNET_CMD_VXLAN_PORT_CONFIG,
2689                                           htons(ti->port),
2690                                           OCTNET_CMD_VXLAN_PORT_DEL);
2691}
2692
2693static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2694        .set_port       = liquidio_udp_tunnel_set_port,
2695        .unset_port     = liquidio_udp_tunnel_unset_port,
2696        .tables         = {
2697                { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2698        },
2699};
2700
2701/** \brief Net device fix features
2702 * @param netdev  pointer to network device
2703 * @param request features requested
2704 * @returns updated features list
2705 */
2706static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2707                                               netdev_features_t request)
2708{
2709        struct lio *lio = netdev_priv(netdev);
2710
2711        if ((request & NETIF_F_RXCSUM) &&
2712            !(lio->dev_capability & NETIF_F_RXCSUM))
2713                request &= ~NETIF_F_RXCSUM;
2714
2715        if ((request & NETIF_F_HW_CSUM) &&
2716            !(lio->dev_capability & NETIF_F_HW_CSUM))
2717                request &= ~NETIF_F_HW_CSUM;
2718
2719        if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2720                request &= ~NETIF_F_TSO;
2721
2722        if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2723                request &= ~NETIF_F_TSO6;
2724
2725        if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2726                request &= ~NETIF_F_LRO;
2727
2728        /*Disable LRO if RXCSUM is off */
2729        if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2730            (lio->dev_capability & NETIF_F_LRO))
2731                request &= ~NETIF_F_LRO;
2732
2733        if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2734            !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2735                request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2736
2737        return request;
2738}
2739
2740/** \brief Net device set features
2741 * @param netdev  pointer to network device
2742 * @param features features to enable/disable
2743 */
2744static int liquidio_set_features(struct net_device *netdev,
2745                                 netdev_features_t features)
2746{
2747        struct lio *lio = netdev_priv(netdev);
2748
2749        if ((features & NETIF_F_LRO) &&
2750            (lio->dev_capability & NETIF_F_LRO) &&
2751            !(netdev->features & NETIF_F_LRO))
2752                liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2753                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2754        else if (!(features & NETIF_F_LRO) &&
2755                 (lio->dev_capability & NETIF_F_LRO) &&
2756                 (netdev->features & NETIF_F_LRO))
2757                liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2758                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2759
2760        /* Sending command to firmware to enable/disable RX checksum
2761         * offload settings using ethtool
2762         */
2763        if (!(netdev->features & NETIF_F_RXCSUM) &&
2764            (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2765            (features & NETIF_F_RXCSUM))
2766                liquidio_set_rxcsum_command(netdev,
2767                                            OCTNET_CMD_TNL_RX_CSUM_CTL,
2768                                            OCTNET_CMD_RXCSUM_ENABLE);
2769        else if ((netdev->features & NETIF_F_RXCSUM) &&
2770                 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2771                 !(features & NETIF_F_RXCSUM))
2772                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2773                                            OCTNET_CMD_RXCSUM_DISABLE);
2774
2775        if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2776            (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2777            !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2778                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2779                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
2780        else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2781                 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2782                 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2783                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2784                                     OCTNET_CMD_VLAN_FILTER_DISABLE);
2785
2786        return 0;
2787}
2788
2789static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2790                                 u8 *mac, bool is_admin_assigned)
2791{
2792        struct lio *lio = GET_LIO(netdev);
2793        struct octeon_device *oct = lio->oct_dev;
2794        struct octnic_ctrl_pkt nctrl;
2795        int ret = 0;
2796
2797        if (!is_valid_ether_addr(mac))
2798                return -EINVAL;
2799
2800        if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2801                return -EINVAL;
2802
2803        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2804
2805        nctrl.ncmd.u64 = 0;
2806        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2807        /* vfidx is 0 based, but vf_num (param1) is 1 based */
2808        nctrl.ncmd.s.param1 = vfidx + 1;
2809        nctrl.ncmd.s.more = 1;
2810        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2811        nctrl.netpndev = (u64)netdev;
2812        if (is_admin_assigned) {
2813                nctrl.ncmd.s.param2 = true;
2814                nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2815        }
2816
2817        nctrl.udd[0] = 0;
2818        /* The MAC Address is presented in network byte order. */
2819        ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2820
2821        oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2822
2823        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2824        if (ret > 0)
2825                ret = -EIO;
2826
2827        return ret;
2828}
2829
2830static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2831{
2832        struct lio *lio = GET_LIO(netdev);
2833        struct octeon_device *oct = lio->oct_dev;
2834        int retval;
2835
2836        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2837                return -EINVAL;
2838
2839        retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2840        if (!retval)
2841                cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2842
2843        return retval;
2844}
2845
2846static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2847                                    bool enable)
2848{
2849        struct lio *lio = GET_LIO(netdev);
2850        struct octeon_device *oct = lio->oct_dev;
2851        struct octnic_ctrl_pkt nctrl;
2852        int retval;
2853
2854        if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2855                netif_info(lio, drv, lio->netdev,
2856                           "firmware does not support spoofchk\n");
2857                return -EOPNOTSUPP;
2858        }
2859
2860        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2861                netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2862                return -EINVAL;
2863        }
2864
2865        if (enable) {
2866                if (oct->sriov_info.vf_spoofchk[vfidx])
2867                        return 0;
2868        } else {
2869                /* Clear */
2870                if (!oct->sriov_info.vf_spoofchk[vfidx])
2871                        return 0;
2872        }
2873
2874        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2875        nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2876        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2877        nctrl.ncmd.s.param1 =
2878                vfidx + 1; /* vfidx is 0 based,
2879                            * but vf_num (param1) is 1 based
2880                            */
2881        nctrl.ncmd.s.param2 = enable;
2882        nctrl.ncmd.s.more = 0;
2883        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2884        nctrl.cb_fn = NULL;
2885
2886        retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2887
2888        if (retval) {
2889                netif_info(lio, drv, lio->netdev,
2890                           "Failed to set VF %d spoofchk %s\n", vfidx,
2891                        enable ? "on" : "off");
2892                return -1;
2893        }
2894
2895        oct->sriov_info.vf_spoofchk[vfidx] = enable;
2896        netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2897                   enable ? "on" : "off");
2898
2899        return 0;
2900}
2901
2902static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2903                                u16 vlan, u8 qos, __be16 vlan_proto)
2904{
2905        struct lio *lio = GET_LIO(netdev);
2906        struct octeon_device *oct = lio->oct_dev;
2907        struct octnic_ctrl_pkt nctrl;
2908        u16 vlantci;
2909        int ret = 0;
2910
2911        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2912                return -EINVAL;
2913
2914        if (vlan_proto != htons(ETH_P_8021Q))
2915                return -EPROTONOSUPPORT;
2916
2917        if (vlan >= VLAN_N_VID || qos > 7)
2918                return -EINVAL;
2919
2920        if (vlan)
2921                vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2922        else
2923                vlantci = 0;
2924
2925        if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2926                return 0;
2927
2928        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2929
2930        if (vlan)
2931                nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2932        else
2933                nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2934
2935        nctrl.ncmd.s.param1 = vlantci;
2936        nctrl.ncmd.s.param2 =
2937            vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2938        nctrl.ncmd.s.more = 0;
2939        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2940        nctrl.cb_fn = NULL;
2941
2942        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2943        if (ret) {
2944                if (ret > 0)
2945                        ret = -EIO;
2946                return ret;
2947        }
2948
2949        oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2950
2951        return ret;
2952}
2953
2954static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2955                                  struct ifla_vf_info *ivi)
2956{
2957        struct lio *lio = GET_LIO(netdev);
2958        struct octeon_device *oct = lio->oct_dev;
2959        u8 *macaddr;
2960
2961        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2962                return -EINVAL;
2963
2964        memset(ivi, 0, sizeof(struct ifla_vf_info));
2965
2966        ivi->vf = vfidx;
2967        macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2968        ether_addr_copy(&ivi->mac[0], macaddr);
2969        ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2970        ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2971        if (oct->sriov_info.trusted_vf.active &&
2972            oct->sriov_info.trusted_vf.id == vfidx)
2973                ivi->trusted = true;
2974        else
2975                ivi->trusted = false;
2976        ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2977        ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
2978        ivi->max_tx_rate = lio->linfo.link.s.speed;
2979        ivi->min_tx_rate = 0;
2980
2981        return 0;
2982}
2983
2984static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2985{
2986        struct octeon_device *oct = lio->oct_dev;
2987        struct octeon_soft_command *sc;
2988        int retval;
2989
2990        sc = octeon_alloc_soft_command(oct, 0, 16, 0);
2991        if (!sc)
2992                return -ENOMEM;
2993
2994        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2995
2996        /* vfidx is 0 based, but vf_num (param1) is 1 based */
2997        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
2998                                    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
2999                                    trusted);
3000
3001        init_completion(&sc->complete);
3002        sc->sc_status = OCTEON_REQUEST_PENDING;
3003
3004        retval = octeon_send_soft_command(oct, sc);
3005        if (retval == IQ_SEND_FAILED) {
3006                octeon_free_soft_command(oct, sc);
3007                retval = -1;
3008        } else {
3009                /* Wait for response or timeout */
3010                retval = wait_for_sc_completion_timeout(oct, sc, 0);
3011                if (retval)
3012                        return (retval);
3013
3014                WRITE_ONCE(sc->caller_is_done, true);
3015        }
3016
3017        return retval;
3018}
3019
3020static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3021                                 bool setting)
3022{
3023        struct lio *lio = GET_LIO(netdev);
3024        struct octeon_device *oct = lio->oct_dev;
3025
3026        if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3027                /* trusted vf is not supported by firmware older than 1.7.1 */
3028                return -EOPNOTSUPP;
3029        }
3030
3031        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3032                netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3033                return -EINVAL;
3034        }
3035
3036        if (setting) {
3037                /* Set */
3038
3039                if (oct->sriov_info.trusted_vf.active &&
3040                    oct->sriov_info.trusted_vf.id == vfidx)
3041                        return 0;
3042
3043                if (oct->sriov_info.trusted_vf.active) {
3044                        netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3045                        return -EPERM;
3046                }
3047        } else {
3048                /* Clear */
3049
3050                if (!oct->sriov_info.trusted_vf.active)
3051                        return 0;
3052        }
3053
3054        if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3055                if (setting) {
3056                        oct->sriov_info.trusted_vf.id = vfidx;
3057                        oct->sriov_info.trusted_vf.active = true;
3058                } else {
3059                        oct->sriov_info.trusted_vf.active = false;
3060                }
3061
3062                netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3063                           setting ? "" : "not ");
3064        } else {
3065                netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3066                return -1;
3067        }
3068
3069        return 0;
3070}
3071
3072static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3073                                      int linkstate)
3074{
3075        struct lio *lio = GET_LIO(netdev);
3076        struct octeon_device *oct = lio->oct_dev;
3077        struct octnic_ctrl_pkt nctrl;
3078        int ret = 0;
3079
3080        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3081                return -EINVAL;
3082
3083        if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3084                return 0;
3085
3086        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3087        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3088        nctrl.ncmd.s.param1 =
3089            vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3090        nctrl.ncmd.s.param2 = linkstate;
3091        nctrl.ncmd.s.more = 0;
3092        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3093        nctrl.cb_fn = NULL;
3094
3095        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3096
3097        if (!ret)
3098                oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3099        else if (ret > 0)
3100                ret = -EIO;
3101
3102        return ret;
3103}
3104
3105static int
3106liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3107{
3108        struct lio_devlink_priv *priv;
3109        struct octeon_device *oct;
3110
3111        priv = devlink_priv(devlink);
3112        oct = priv->oct;
3113
3114        *mode = oct->eswitch_mode;
3115
3116        return 0;
3117}
3118
3119static int
3120liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3121                          struct netlink_ext_ack *extack)
3122{
3123        struct lio_devlink_priv *priv;
3124        struct octeon_device *oct;
3125        int ret = 0;
3126
3127        priv = devlink_priv(devlink);
3128        oct = priv->oct;
3129
3130        if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3131                return -EINVAL;
3132
3133        if (oct->eswitch_mode == mode)
3134                return 0;
3135
3136        switch (mode) {
3137        case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3138                oct->eswitch_mode = mode;
3139                ret = lio_vf_rep_create(oct);
3140                break;
3141
3142        case DEVLINK_ESWITCH_MODE_LEGACY:
3143                lio_vf_rep_destroy(oct);
3144                oct->eswitch_mode = mode;
3145                break;
3146
3147        default:
3148                ret = -EINVAL;
3149        }
3150
3151        return ret;
3152}
3153
3154static const struct devlink_ops liquidio_devlink_ops = {
3155        .eswitch_mode_get = liquidio_eswitch_mode_get,
3156        .eswitch_mode_set = liquidio_eswitch_mode_set,
3157};
3158
3159static int
3160liquidio_get_port_parent_id(struct net_device *dev,
3161                            struct netdev_phys_item_id *ppid)
3162{
3163        struct lio *lio = GET_LIO(dev);
3164        struct octeon_device *oct = lio->oct_dev;
3165
3166        if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3167                return -EOPNOTSUPP;
3168
3169        ppid->id_len = ETH_ALEN;
3170        ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3171
3172        return 0;
3173}
3174
3175static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3176                                 struct ifla_vf_stats *vf_stats)
3177{
3178        struct lio *lio = GET_LIO(netdev);
3179        struct octeon_device *oct = lio->oct_dev;
3180        struct oct_vf_stats stats;
3181        int ret;
3182
3183        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3184                return -EINVAL;
3185
3186        memset(&stats, 0, sizeof(struct oct_vf_stats));
3187        ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3188        if (!ret) {
3189                vf_stats->rx_packets = stats.rx_packets;
3190                vf_stats->tx_packets = stats.tx_packets;
3191                vf_stats->rx_bytes = stats.rx_bytes;
3192                vf_stats->tx_bytes = stats.tx_bytes;
3193                vf_stats->broadcast = stats.broadcast;
3194                vf_stats->multicast = stats.multicast;
3195        }
3196
3197        return ret;
3198}
3199
3200static const struct net_device_ops lionetdevops = {
3201        .ndo_open               = liquidio_open,
3202        .ndo_stop               = liquidio_stop,
3203        .ndo_start_xmit         = liquidio_xmit,
3204        .ndo_get_stats64        = liquidio_get_stats64,
3205        .ndo_set_mac_address    = liquidio_set_mac,
3206        .ndo_set_rx_mode        = liquidio_set_mcast_list,
3207        .ndo_tx_timeout         = liquidio_tx_timeout,
3208
3209        .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3210        .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3211        .ndo_change_mtu         = liquidio_change_mtu,
3212        .ndo_do_ioctl           = liquidio_ioctl,
3213        .ndo_fix_features       = liquidio_fix_features,
3214        .ndo_set_features       = liquidio_set_features,
3215        .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
3216        .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
3217        .ndo_set_vf_mac         = liquidio_set_vf_mac,
3218        .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
3219        .ndo_get_vf_config      = liquidio_get_vf_config,
3220        .ndo_set_vf_spoofchk    = liquidio_set_vf_spoofchk,
3221        .ndo_set_vf_trust       = liquidio_set_vf_trust,
3222        .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3223        .ndo_get_vf_stats       = liquidio_get_vf_stats,
3224        .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3225};
3226
3227/** \brief Entry point for the liquidio module
3228 */
3229static int __init liquidio_init(void)
3230{
3231        int i;
3232        struct handshake *hs;
3233
3234        init_completion(&first_stage);
3235
3236        octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3237
3238        if (liquidio_init_pci())
3239                return -EINVAL;
3240
3241        wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3242
3243        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3244                hs = &handshake[i];
3245                if (hs->pci_dev) {
3246                        wait_for_completion(&hs->init);
3247                        if (!hs->init_ok) {
3248                                /* init handshake failed */
3249                                dev_err(&hs->pci_dev->dev,
3250                                        "Failed to init device\n");
3251                                liquidio_deinit_pci();
3252                                return -EIO;
3253                        }
3254                }
3255        }
3256
3257        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3258                hs = &handshake[i];
3259                if (hs->pci_dev) {
3260                        wait_for_completion_timeout(&hs->started,
3261                                                    msecs_to_jiffies(30000));
3262                        if (!hs->started_ok) {
3263                                /* starter handshake failed */
3264                                dev_err(&hs->pci_dev->dev,
3265                                        "Firmware failed to start\n");
3266                                liquidio_deinit_pci();
3267                                return -EIO;
3268                        }
3269                }
3270        }
3271
3272        return 0;
3273}
3274
3275static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3276{
3277        struct octeon_device *oct = (struct octeon_device *)buf;
3278        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3279        int gmxport = 0;
3280        union oct_link_status *ls;
3281        int i;
3282
3283        if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3284                dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3285                        recv_pkt->buffer_size[0],
3286                        recv_pkt->rh.r_nic_info.gmxport);
3287                goto nic_info_err;
3288        }
3289
3290        gmxport = recv_pkt->rh.r_nic_info.gmxport;
3291        ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3292                OCT_DROQ_INFO_SIZE);
3293
3294        octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3295        for (i = 0; i < oct->ifcount; i++) {
3296                if (oct->props[i].gmxport == gmxport) {
3297                        update_link_status(oct->props[i].netdev, ls);
3298                        break;
3299                }
3300        }
3301
3302nic_info_err:
3303        for (i = 0; i < recv_pkt->buffer_count; i++)
3304                recv_buffer_free(recv_pkt->buffer_ptr[i]);
3305        octeon_free_recv_info(recv_info);
3306        return 0;
3307}
3308
3309/**
3310 * \brief Setup network interfaces
3311 * @param octeon_dev  octeon device
3312 *
3313 * Called during init time for each device. It assumes the NIC
3314 * is already up and running.  The link information for each
3315 * interface is passed in link_info.
3316 */
3317static int setup_nic_devices(struct octeon_device *octeon_dev)
3318{
3319        struct lio *lio = NULL;
3320        struct net_device *netdev;
3321        u8 mac[6], i, j, *fw_ver, *micro_ver;
3322        unsigned long micro;
3323        u32 cur_ver;
3324        struct octeon_soft_command *sc;
3325        struct liquidio_if_cfg_resp *resp;
3326        struct octdev_props *props;
3327        int retval, num_iqueues, num_oqueues;
3328        int max_num_queues = 0;
3329        union oct_nic_if_cfg if_cfg;
3330        unsigned int base_queue;
3331        unsigned int gmx_port_id;
3332        u32 resp_size, data_size;
3333        u32 ifidx_or_pfnum;
3334        struct lio_version *vdata;
3335        struct devlink *devlink;
3336        struct lio_devlink_priv *lio_devlink;
3337
3338        /* This is to handle link status changes */
3339        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3340                                    OPCODE_NIC_INFO,
3341                                    lio_nic_info, octeon_dev);
3342
3343        /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3344         * They are handled directly.
3345         */
3346        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3347                                        free_netbuf);
3348
3349        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3350                                        free_netsgbuf);
3351
3352        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3353                                        free_netsgbuf_with_resp);
3354
3355        for (i = 0; i < octeon_dev->ifcount; i++) {
3356                resp_size = sizeof(struct liquidio_if_cfg_resp);
3357                data_size = sizeof(struct lio_version);
3358                sc = (struct octeon_soft_command *)
3359                        octeon_alloc_soft_command(octeon_dev, data_size,
3360                                                  resp_size, 0);
3361                resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3362                vdata = (struct lio_version *)sc->virtdptr;
3363
3364                *((u64 *)vdata) = 0;
3365                vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3366                vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3367                vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3368
3369                if (OCTEON_CN23XX_PF(octeon_dev)) {
3370                        num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3371                        num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3372                        base_queue = octeon_dev->sriov_info.pf_srn;
3373
3374                        gmx_port_id = octeon_dev->pf_num;
3375                        ifidx_or_pfnum = octeon_dev->pf_num;
3376                } else {
3377                        num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3378                                                octeon_get_conf(octeon_dev), i);
3379                        num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3380                                                octeon_get_conf(octeon_dev), i);
3381                        base_queue = CFG_GET_BASE_QUE_NIC_IF(
3382                                                octeon_get_conf(octeon_dev), i);
3383                        gmx_port_id = CFG_GET_GMXID_NIC_IF(
3384                                                octeon_get_conf(octeon_dev), i);
3385                        ifidx_or_pfnum = i;
3386                }
3387
3388                dev_dbg(&octeon_dev->pci_dev->dev,
3389                        "requesting config for interface %d, iqs %d, oqs %d\n",
3390                        ifidx_or_pfnum, num_iqueues, num_oqueues);
3391
3392                if_cfg.u64 = 0;
3393                if_cfg.s.num_iqueues = num_iqueues;
3394                if_cfg.s.num_oqueues = num_oqueues;
3395                if_cfg.s.base_queue = base_queue;
3396                if_cfg.s.gmx_port_id = gmx_port_id;
3397
3398                sc->iq_no = 0;
3399
3400                octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3401                                            OPCODE_NIC_IF_CFG, 0,
3402                                            if_cfg.u64, 0);
3403
3404                init_completion(&sc->complete);
3405                sc->sc_status = OCTEON_REQUEST_PENDING;
3406
3407                retval = octeon_send_soft_command(octeon_dev, sc);
3408                if (retval == IQ_SEND_FAILED) {
3409                        dev_err(&octeon_dev->pci_dev->dev,
3410                                "iq/oq config failed status: %x\n",
3411                                retval);
3412                        /* Soft instr is freed by driver in case of failure. */
3413                        octeon_free_soft_command(octeon_dev, sc);
3414                        return(-EIO);
3415                }
3416
3417                /* Sleep on a wait queue till the cond flag indicates that the
3418                 * response arrived or timed-out.
3419                 */
3420                retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3421                if (retval)
3422                        return retval;
3423
3424                retval = resp->status;
3425                if (retval) {
3426                        dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3427                        WRITE_ONCE(sc->caller_is_done, true);
3428                        goto setup_nic_dev_done;
3429                }
3430                snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3431                         32, "%s",
3432                         resp->cfg_info.liquidio_firmware_version);
3433
3434                /* Verify f/w version (in case of 'auto' loading from flash) */
3435                fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3436                if (memcmp(LIQUIDIO_BASE_VERSION,
3437                           fw_ver,
3438                           strlen(LIQUIDIO_BASE_VERSION))) {
3439                        dev_err(&octeon_dev->pci_dev->dev,
3440                                "Unmatched firmware version. Expected %s.x, got %s.\n",
3441                                LIQUIDIO_BASE_VERSION, fw_ver);
3442                        WRITE_ONCE(sc->caller_is_done, true);
3443                        goto setup_nic_dev_done;
3444                } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3445                           FW_IS_PRELOADED) {
3446                        dev_info(&octeon_dev->pci_dev->dev,
3447                                 "Using auto-loaded firmware version %s.\n",
3448                                 fw_ver);
3449                }
3450
3451                /* extract micro version field; point past '<maj>.<min>.' */
3452                micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3453                if (kstrtoul(micro_ver, 10, &micro) != 0)
3454                        micro = 0;
3455                octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3456                octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3457                octeon_dev->fw_info.ver.rev = micro;
3458
3459                octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3460                                    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3461
3462                num_iqueues = hweight64(resp->cfg_info.iqmask);
3463                num_oqueues = hweight64(resp->cfg_info.oqmask);
3464
3465                if (!(num_iqueues) || !(num_oqueues)) {
3466                        dev_err(&octeon_dev->pci_dev->dev,
3467                                "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3468                                resp->cfg_info.iqmask,
3469                                resp->cfg_info.oqmask);
3470                        WRITE_ONCE(sc->caller_is_done, true);
3471                        goto setup_nic_dev_done;
3472                }
3473
3474                if (OCTEON_CN6XXX(octeon_dev)) {
3475                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3476                                                                    cn6xxx));
3477                } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3478                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3479                                                                    cn23xx_pf));
3480                }
3481
3482                dev_dbg(&octeon_dev->pci_dev->dev,
3483                        "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3484                        i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3485                        num_iqueues, num_oqueues, max_num_queues);
3486                netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3487
3488                if (!netdev) {
3489                        dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3490                        WRITE_ONCE(sc->caller_is_done, true);
3491                        goto setup_nic_dev_done;
3492                }
3493
3494                SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3495
3496                /* Associate the routines that will handle different
3497                 * netdev tasks.
3498                 */
3499                netdev->netdev_ops = &lionetdevops;
3500
3501                retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3502                if (retval) {
3503                        dev_err(&octeon_dev->pci_dev->dev,
3504                                "setting real number rx failed\n");
3505                        WRITE_ONCE(sc->caller_is_done, true);
3506                        goto setup_nic_dev_free;
3507                }
3508
3509                retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3510                if (retval) {
3511                        dev_err(&octeon_dev->pci_dev->dev,
3512                                "setting real number tx failed\n");
3513                        WRITE_ONCE(sc->caller_is_done, true);
3514                        goto setup_nic_dev_free;
3515                }
3516
3517                lio = GET_LIO(netdev);
3518
3519                memset(lio, 0, sizeof(struct lio));
3520
3521                lio->ifidx = ifidx_or_pfnum;
3522
3523                props = &octeon_dev->props[i];
3524                props->gmxport = resp->cfg_info.linfo.gmxport;
3525                props->netdev = netdev;
3526
3527                lio->linfo.num_rxpciq = num_oqueues;
3528                lio->linfo.num_txpciq = num_iqueues;
3529                for (j = 0; j < num_oqueues; j++) {
3530                        lio->linfo.rxpciq[j].u64 =
3531                                resp->cfg_info.linfo.rxpciq[j].u64;
3532                }
3533                for (j = 0; j < num_iqueues; j++) {
3534                        lio->linfo.txpciq[j].u64 =
3535                                resp->cfg_info.linfo.txpciq[j].u64;
3536                }
3537                lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3538                lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3539                lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3540
3541                WRITE_ONCE(sc->caller_is_done, true);
3542
3543                lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3544
3545                if (OCTEON_CN23XX_PF(octeon_dev) ||
3546                    OCTEON_CN6XXX(octeon_dev)) {
3547                        lio->dev_capability = NETIF_F_HIGHDMA
3548                                              | NETIF_F_IP_CSUM
3549                                              | NETIF_F_IPV6_CSUM
3550                                              | NETIF_F_SG | NETIF_F_RXCSUM
3551                                              | NETIF_F_GRO
3552                                              | NETIF_F_TSO | NETIF_F_TSO6
3553                                              | NETIF_F_LRO;
3554                }
3555                netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3556
3557                /*  Copy of transmit encapsulation capabilities:
3558                 *  TSO, TSO6, Checksums for this device
3559                 */
3560                lio->enc_dev_capability = NETIF_F_IP_CSUM
3561                                          | NETIF_F_IPV6_CSUM
3562                                          | NETIF_F_GSO_UDP_TUNNEL
3563                                          | NETIF_F_HW_CSUM | NETIF_F_SG
3564                                          | NETIF_F_RXCSUM
3565                                          | NETIF_F_TSO | NETIF_F_TSO6
3566                                          | NETIF_F_LRO;
3567
3568                netdev->hw_enc_features = (lio->enc_dev_capability &
3569                                           ~NETIF_F_LRO);
3570
3571                netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3572
3573                lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3574
3575                netdev->vlan_features = lio->dev_capability;
3576                /* Add any unchangeable hw features */
3577                lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3578                                        NETIF_F_HW_VLAN_CTAG_RX |
3579                                        NETIF_F_HW_VLAN_CTAG_TX;
3580
3581                netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3582
3583                netdev->hw_features = lio->dev_capability;
3584                /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3585                netdev->hw_features = netdev->hw_features &
3586                        ~NETIF_F_HW_VLAN_CTAG_RX;
3587
3588                /* MTU range: 68 - 16000 */
3589                netdev->min_mtu = LIO_MIN_MTU_SIZE;
3590                netdev->max_mtu = LIO_MAX_MTU_SIZE;
3591
3592                /* Point to the  properties for octeon device to which this
3593                 * interface belongs.
3594                 */
3595                lio->oct_dev = octeon_dev;
3596                lio->octprops = props;
3597                lio->netdev = netdev;
3598
3599                dev_dbg(&octeon_dev->pci_dev->dev,
3600                        "if%d gmx: %d hw_addr: 0x%llx\n", i,
3601                        lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3602
3603                for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3604                        u8 vfmac[ETH_ALEN];
3605
3606                        eth_random_addr(vfmac);
3607                        if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3608                                dev_err(&octeon_dev->pci_dev->dev,
3609                                        "Error setting VF%d MAC address\n",
3610                                        j);
3611                                goto setup_nic_dev_free;
3612                        }
3613                }
3614
3615                /* 64-bit swap required on LE machines */
3616                octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3617                for (j = 0; j < 6; j++)
3618                        mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3619
3620                /* Copy MAC Address to OS network device structure */
3621
3622                ether_addr_copy(netdev->dev_addr, mac);
3623
3624                /* By default all interfaces on a single Octeon uses the same
3625                 * tx and rx queues
3626                 */
3627                lio->txq = lio->linfo.txpciq[0].s.q_no;
3628                lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3629                if (liquidio_setup_io_queues(octeon_dev, i,
3630                                             lio->linfo.num_txpciq,
3631                                             lio->linfo.num_rxpciq)) {
3632                        dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3633                        goto setup_nic_dev_free;
3634                }
3635
3636                ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3637
3638                lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3639                lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3640
3641                if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3642                        dev_err(&octeon_dev->pci_dev->dev,
3643                                "Gather list allocation failed\n");
3644                        goto setup_nic_dev_free;
3645                }
3646
3647                /* Register ethtool support */
3648                liquidio_set_ethtool_ops(netdev);
3649                if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3650                        octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3651                else
3652                        octeon_dev->priv_flags = 0x0;
3653
3654                if (netdev->features & NETIF_F_LRO)
3655                        liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3656                                             OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3657
3658                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3659                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
3660
3661                if ((debug != -1) && (debug & NETIF_MSG_HW))
3662                        liquidio_set_feature(netdev,
3663                                             OCTNET_CMD_VERBOSE_ENABLE, 0);
3664
3665                if (setup_link_status_change_wq(netdev))
3666                        goto setup_nic_dev_free;
3667
3668                if ((octeon_dev->fw_info.app_cap_flags &
3669                     LIQUIDIO_TIME_SYNC_CAP) &&
3670                    setup_sync_octeon_time_wq(netdev))
3671                        goto setup_nic_dev_free;
3672
3673                if (setup_rx_oom_poll_fn(netdev))
3674                        goto setup_nic_dev_free;
3675
3676                /* Register the network device with the OS */
3677                if (register_netdev(netdev)) {
3678                        dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3679                        goto setup_nic_dev_free;
3680                }
3681
3682                dev_dbg(&octeon_dev->pci_dev->dev,
3683                        "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3684                        i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3685                netif_carrier_off(netdev);
3686                lio->link_changes++;
3687
3688                ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3689
3690                /* Sending command to firmware to enable Rx checksum offload
3691                 * by default at the time of setup of Liquidio driver for
3692                 * this device
3693                 */
3694                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3695                                            OCTNET_CMD_RXCSUM_ENABLE);
3696                liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3697                                     OCTNET_CMD_TXCSUM_ENABLE);
3698
3699                dev_dbg(&octeon_dev->pci_dev->dev,
3700                        "NIC ifidx:%d Setup successful\n", i);
3701
3702                if (octeon_dev->subsystem_id ==
3703                        OCTEON_CN2350_25GB_SUBSYS_ID ||
3704                    octeon_dev->subsystem_id ==
3705                        OCTEON_CN2360_25GB_SUBSYS_ID) {
3706                        cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3707                                             octeon_dev->fw_info.ver.min,
3708                                             octeon_dev->fw_info.ver.rev);
3709
3710                        /* speed control unsupported in f/w older than 1.7.2 */
3711                        if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3712                                dev_info(&octeon_dev->pci_dev->dev,
3713                                         "speed setting not supported by f/w.");
3714                                octeon_dev->speed_setting = 25;
3715                                octeon_dev->no_speed_setting = 1;
3716                        } else {
3717                                liquidio_get_speed(lio);
3718                        }
3719
3720                        if (octeon_dev->speed_setting == 0) {
3721                                octeon_dev->speed_setting = 25;
3722                                octeon_dev->no_speed_setting = 1;
3723                        }
3724                } else {
3725                        octeon_dev->no_speed_setting = 1;
3726                        octeon_dev->speed_setting = 10;
3727                }
3728                octeon_dev->speed_boot = octeon_dev->speed_setting;
3729
3730                /* don't read FEC setting if unsupported by f/w (see above) */
3731                if (octeon_dev->speed_boot == 25 &&
3732                    !octeon_dev->no_speed_setting) {
3733                        liquidio_get_fec(lio);
3734                        octeon_dev->props[lio->ifidx].fec_boot =
3735                                octeon_dev->props[lio->ifidx].fec;
3736                }
3737        }
3738
3739        devlink = devlink_alloc(&liquidio_devlink_ops,
3740                                sizeof(struct lio_devlink_priv));
3741        if (!devlink) {
3742                dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3743                goto setup_nic_dev_free;
3744        }
3745
3746        lio_devlink = devlink_priv(devlink);
3747        lio_devlink->oct = octeon_dev;
3748
3749        if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3750                devlink_free(devlink);
3751                dev_err(&octeon_dev->pci_dev->dev,
3752                        "devlink registration failed\n");
3753                goto setup_nic_dev_free;
3754        }
3755
3756        octeon_dev->devlink = devlink;
3757        octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3758
3759        return 0;
3760
3761setup_nic_dev_free:
3762
3763        while (i--) {
3764                dev_err(&octeon_dev->pci_dev->dev,
3765                        "NIC ifidx:%d Setup failed\n", i);
3766                liquidio_destroy_nic_device(octeon_dev, i);
3767        }
3768
3769setup_nic_dev_done:
3770
3771        return -ENODEV;
3772}
3773
3774#ifdef CONFIG_PCI_IOV
3775static int octeon_enable_sriov(struct octeon_device *oct)
3776{
3777        unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3778        struct pci_dev *vfdev;
3779        int err;
3780        u32 u;
3781
3782        if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3783                err = pci_enable_sriov(oct->pci_dev,
3784                                       oct->sriov_info.num_vfs_alloced);
3785                if (err) {
3786                        dev_err(&oct->pci_dev->dev,
3787                                "OCTEON: Failed to enable PCI sriov: %d\n",
3788                                err);
3789                        oct->sriov_info.num_vfs_alloced = 0;
3790                        return err;
3791                }
3792                oct->sriov_info.sriov_enabled = 1;
3793
3794                /* init lookup table that maps DPI ring number to VF pci_dev
3795                 * struct pointer
3796                 */
3797                u = 0;
3798                vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3799                                       OCTEON_CN23XX_VF_VID, NULL);
3800                while (vfdev) {
3801                        if (vfdev->is_virtfn &&
3802                            (vfdev->physfn == oct->pci_dev)) {
3803                                oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3804                                        vfdev;
3805                                u += oct->sriov_info.rings_per_vf;
3806                        }
3807                        vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3808                                               OCTEON_CN23XX_VF_VID, vfdev);
3809                }
3810        }
3811
3812        return num_vfs_alloced;
3813}
3814
3815static int lio_pci_sriov_disable(struct octeon_device *oct)
3816{
3817        int u;
3818
3819        if (pci_vfs_assigned(oct->pci_dev)) {
3820                dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3821                return -EPERM;
3822        }
3823
3824        pci_disable_sriov(oct->pci_dev);
3825
3826        u = 0;
3827        while (u < MAX_POSSIBLE_VFS) {
3828                oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3829                u += oct->sriov_info.rings_per_vf;
3830        }
3831
3832        oct->sriov_info.num_vfs_alloced = 0;
3833        dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3834                 oct->pf_num);
3835
3836        return 0;
3837}
3838
3839static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3840{
3841        struct octeon_device *oct = pci_get_drvdata(dev);
3842        int ret = 0;
3843
3844        if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3845            (oct->sriov_info.sriov_enabled)) {
3846                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3847                         oct->pf_num, num_vfs);
3848                return 0;
3849        }
3850
3851        if (!num_vfs) {
3852                lio_vf_rep_destroy(oct);
3853                ret = lio_pci_sriov_disable(oct);
3854        } else if (num_vfs > oct->sriov_info.max_vfs) {
3855                dev_err(&oct->pci_dev->dev,
3856                        "OCTEON: Max allowed VFs:%d user requested:%d",
3857                        oct->sriov_info.max_vfs, num_vfs);
3858                ret = -EPERM;
3859        } else {
3860                oct->sriov_info.num_vfs_alloced = num_vfs;
3861                ret = octeon_enable_sriov(oct);
3862                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3863                         oct->pf_num, num_vfs);
3864                ret = lio_vf_rep_create(oct);
3865                if (ret)
3866                        dev_info(&oct->pci_dev->dev,
3867                                 "vf representor create failed");
3868        }
3869
3870        return ret;
3871}
3872#endif
3873
3874/**
3875 * \brief initialize the NIC
3876 * @param oct octeon device
3877 *
3878 * This initialization routine is called once the Octeon device application is
3879 * up and running
3880 */
3881static int liquidio_init_nic_module(struct octeon_device *oct)
3882{
3883        int i, retval = 0;
3884        int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3885
3886        dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3887
3888        /* only default iq and oq were initialized
3889         * initialize the rest as well
3890         */
3891        /* run port_config command for each port */
3892        oct->ifcount = num_nic_ports;
3893
3894        memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3895
3896        for (i = 0; i < MAX_OCTEON_LINKS; i++)
3897                oct->props[i].gmxport = -1;
3898
3899        retval = setup_nic_devices(oct);
3900        if (retval) {
3901                dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3902                goto octnet_init_failure;
3903        }
3904
3905        /* Call vf_rep_modinit if the firmware is switchdev capable
3906         * and do it from the first liquidio function probed.
3907         */
3908        if (!oct->octeon_id &&
3909            oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3910                retval = lio_vf_rep_modinit();
3911                if (retval) {
3912                        liquidio_stop_nic_module(oct);
3913                        goto octnet_init_failure;
3914                }
3915        }
3916
3917        liquidio_ptp_init(oct);
3918
3919        dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3920
3921        return retval;
3922
3923octnet_init_failure:
3924
3925        oct->ifcount = 0;
3926
3927        return retval;
3928}
3929
3930/**
3931 * \brief starter callback that invokes the remaining initialization work after
3932 * the NIC is up and running.
3933 * @param octptr  work struct work_struct
3934 */
3935static void nic_starter(struct work_struct *work)
3936{
3937        struct octeon_device *oct;
3938        struct cavium_wk *wk = (struct cavium_wk *)work;
3939
3940        oct = (struct octeon_device *)wk->ctxptr;
3941
3942        if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3943                return;
3944
3945        /* If the status of the device is CORE_OK, the core
3946         * application has reported its application type. Call
3947         * any registered handlers now and move to the RUNNING
3948         * state.
3949         */
3950        if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3951                schedule_delayed_work(&oct->nic_poll_work.work,
3952                                      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3953                return;
3954        }
3955
3956        atomic_set(&oct->status, OCT_DEV_RUNNING);
3957
3958        if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3959                dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3960
3961                if (liquidio_init_nic_module(oct))
3962                        dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3963                else
3964                        handshake[oct->octeon_id].started_ok = 1;
3965        } else {
3966                dev_err(&oct->pci_dev->dev,
3967                        "Unexpected application running on NIC (%d). Check firmware.\n",
3968                        oct->app_mode);
3969        }
3970
3971        complete(&handshake[oct->octeon_id].started);
3972}
3973
3974static int
3975octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3976{
3977        struct octeon_device *oct = (struct octeon_device *)buf;
3978        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3979        int i, notice, vf_idx;
3980        bool cores_crashed;
3981        u64 *data, vf_num;
3982
3983        notice = recv_pkt->rh.r.ossp;
3984        data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3985
3986        /* the first 64-bit word of data is the vf_num */
3987        vf_num = data[0];
3988        octeon_swap_8B_data(&vf_num, 1);
3989        vf_idx = (int)vf_num - 1;
3990
3991        cores_crashed = READ_ONCE(oct->cores_crashed);
3992
3993        if (notice == VF_DRV_LOADED) {
3994                if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
3995                        oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
3996                        dev_info(&oct->pci_dev->dev,
3997                                 "driver for VF%d was loaded\n", vf_idx);
3998                        if (!cores_crashed)
3999                                try_module_get(THIS_MODULE);
4000                }
4001        } else if (notice == VF_DRV_REMOVED) {
4002                if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4003                        oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4004                        dev_info(&oct->pci_dev->dev,
4005                                 "driver for VF%d was removed\n", vf_idx);
4006                        if (!cores_crashed)
4007                                module_put(THIS_MODULE);
4008                }
4009        } else if (notice == VF_DRV_MACADDR_CHANGED) {
4010                u8 *b = (u8 *)&data[1];
4011
4012                oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4013                dev_info(&oct->pci_dev->dev,
4014                         "VF driver changed VF%d's MAC address to %pM\n",
4015                         vf_idx, b + 2);
4016        }
4017
4018        for (i = 0; i < recv_pkt->buffer_count; i++)
4019                recv_buffer_free(recv_pkt->buffer_ptr[i]);
4020        octeon_free_recv_info(recv_info);
4021
4022        return 0;
4023}
4024
4025/**
4026 * \brief Device initialization for each Octeon device that is probed
4027 * @param octeon_dev  octeon device
4028 */
4029static int octeon_device_init(struct octeon_device *octeon_dev)
4030{
4031        int j, ret;
4032        char bootcmd[] = "\n";
4033        char *dbg_enb = NULL;
4034        enum lio_fw_state fw_state;
4035        struct octeon_device_priv *oct_priv =
4036                (struct octeon_device_priv *)octeon_dev->priv;
4037        atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4038
4039        /* Enable access to the octeon device and make its DMA capability
4040         * known to the OS.
4041         */
4042        if (octeon_pci_os_setup(octeon_dev))
4043                return 1;
4044
4045        atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4046
4047        /* Identify the Octeon type and map the BAR address space. */
4048        if (octeon_chip_specific_setup(octeon_dev)) {
4049                dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4050                return 1;
4051        }
4052
4053        atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4054
4055        /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4056         * since that is what is required for the reference to be removed
4057         * during de-initialization (see 'octeon_destroy_resources').
4058         */
4059        octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4060                               PCI_SLOT(octeon_dev->pci_dev->devfn),
4061                               PCI_FUNC(octeon_dev->pci_dev->devfn),
4062                               true);
4063
4064        octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4065
4066        /* CN23XX supports preloaded firmware if the following is true:
4067         *
4068         * The adapter indicates that firmware is currently running AND
4069         * 'fw_type' is 'auto'.
4070         *
4071         * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4072         */
4073        if (OCTEON_CN23XX_PF(octeon_dev) &&
4074            cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4075                atomic_cmpxchg(octeon_dev->adapter_fw_state,
4076                               FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4077        }
4078
4079        /* If loading firmware, only first device of adapter needs to do so. */
4080        fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4081                                  FW_NEEDS_TO_BE_LOADED,
4082                                  FW_IS_BEING_LOADED);
4083
4084        /* Here, [local variable] 'fw_state' is set to one of:
4085         *
4086         *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4087         *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4088         *                          firmware to the adapter.
4089         *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4090         *                          firmware to the adapter.
4091         */
4092
4093        /* Prior to f/w load, perform a soft reset of the Octeon device;
4094         * if error resetting, return w/error.
4095         */
4096        if (fw_state == FW_NEEDS_TO_BE_LOADED)
4097                if (octeon_dev->fn_list.soft_reset(octeon_dev))
4098                        return 1;
4099
4100        /* Initialize the dispatch mechanism used to push packets arriving on
4101         * Octeon Output queues.
4102         */
4103        if (octeon_init_dispatch_list(octeon_dev))
4104                return 1;
4105
4106        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4107                                    OPCODE_NIC_CORE_DRV_ACTIVE,
4108                                    octeon_core_drv_init,
4109                                    octeon_dev);
4110
4111        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4112                                    OPCODE_NIC_VF_DRV_NOTICE,
4113                                    octeon_recv_vf_drv_notice, octeon_dev);
4114        INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4115        octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4116        schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4117                              LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4118
4119        atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4120
4121        if (octeon_set_io_queues_off(octeon_dev)) {
4122                dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4123                return 1;
4124        }
4125
4126        if (OCTEON_CN23XX_PF(octeon_dev)) {
4127                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4128                if (ret) {
4129                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4130                        return ret;
4131                }
4132        }
4133
4134        /* Initialize soft command buffer pool
4135         */
4136        if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4137                dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4138                return 1;
4139        }
4140        atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4141
4142        /*  Setup the data structures that manage this Octeon's Input queues. */
4143        if (octeon_setup_instr_queues(octeon_dev)) {
4144                dev_err(&octeon_dev->pci_dev->dev,
4145                        "instruction queue initialization failed\n");
4146                return 1;
4147        }
4148        atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4149
4150        /* Initialize lists to manage the requests of different types that
4151         * arrive from user & kernel applications for this octeon device.
4152         */
4153        if (octeon_setup_response_list(octeon_dev)) {
4154                dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4155                return 1;
4156        }
4157        atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4158
4159        if (octeon_setup_output_queues(octeon_dev)) {
4160                dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4161                return 1;
4162        }
4163
4164        atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4165
4166        if (OCTEON_CN23XX_PF(octeon_dev)) {
4167                if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4168                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4169                        return 1;
4170                }
4171                atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4172
4173                if (octeon_allocate_ioq_vector
4174                                (octeon_dev,
4175                                 octeon_dev->sriov_info.num_pf_rings)) {
4176                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4177                        return 1;
4178                }
4179                atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4180
4181        } else {
4182                /* The input and output queue registers were setup earlier (the
4183                 * queues were not enabled). Any additional registers
4184                 * that need to be programmed should be done now.
4185                 */
4186                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4187                if (ret) {
4188                        dev_err(&octeon_dev->pci_dev->dev,
4189                                "Failed to configure device registers\n");
4190                        return ret;
4191                }
4192        }
4193
4194        /* Initialize the tasklet that handles output queue packet processing.*/
4195        dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4196        tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4197                     (unsigned long)octeon_dev);
4198
4199        /* Setup the interrupt handler and record the INT SUM register address
4200         */
4201        if (octeon_setup_interrupt(octeon_dev,
4202                                   octeon_dev->sriov_info.num_pf_rings))
4203                return 1;
4204
4205        /* Enable Octeon device interrupts */
4206        octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4207
4208        atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4209
4210        /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4211         * the output queue is enabled.
4212         * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4213         * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4214         * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4215         * before any credits have been issued, causing the ring to be reset
4216         * (and the f/w appear to never have started).
4217         */
4218        for (j = 0; j < octeon_dev->num_oqs; j++)
4219                writel(octeon_dev->droq[j]->max_count,
4220                       octeon_dev->droq[j]->pkts_credit_reg);
4221
4222        /* Enable the input and output queues for this Octeon device */
4223        ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4224        if (ret) {
4225                dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4226                return ret;
4227        }
4228
4229        atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4230
4231        if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4232                dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4233                if (!ddr_timeout) {
4234                        dev_info(&octeon_dev->pci_dev->dev,
4235                                 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4236                }
4237
4238                schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4239
4240                /* Wait for the octeon to initialize DDR after the soft-reset.*/
4241                while (!ddr_timeout) {
4242                        set_current_state(TASK_INTERRUPTIBLE);
4243                        if (schedule_timeout(HZ / 10)) {
4244                                /* user probably pressed Control-C */
4245                                return 1;
4246                        }
4247                }
4248                ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4249                if (ret) {
4250                        dev_err(&octeon_dev->pci_dev->dev,
4251                                "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4252                                ret);
4253                        return 1;
4254                }
4255
4256                if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4257                        dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4258                        return 1;
4259                }
4260
4261                /* Divert uboot to take commands from host instead. */
4262                ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4263
4264                dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4265                ret = octeon_init_consoles(octeon_dev);
4266                if (ret) {
4267                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4268                        return 1;
4269                }
4270                /* If console debug enabled, specify empty string to use default
4271                 * enablement ELSE specify NULL string for 'disabled'.
4272                 */
4273                dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4274                ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4275                if (ret) {
4276                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4277                        return 1;
4278                } else if (octeon_console_debug_enabled(0)) {
4279                        /* If console was added AND we're logging console output
4280                         * then set our console print function.
4281                         */
4282                        octeon_dev->console[0].print = octeon_dbg_console_print;
4283                }
4284
4285                atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4286
4287                dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4288                ret = load_firmware(octeon_dev);
4289                if (ret) {
4290                        dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4291                        return 1;
4292                }
4293
4294                atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4295        }
4296
4297        handshake[octeon_dev->octeon_id].init_ok = 1;
4298        complete(&handshake[octeon_dev->octeon_id].init);
4299
4300        atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4301
4302        return 0;
4303}
4304
4305/**
4306 * \brief Debug console print function
4307 * @param octeon_dev  octeon device
4308 * @param console_num console number
4309 * @param prefix      first portion of line to display
4310 * @param suffix      second portion of line to display
4311 *
4312 * The OCTEON debug console outputs entire lines (excluding '\n').
4313 * Normally, the line will be passed in the 'prefix' parameter.
4314 * However, due to buffering, it is possible for a line to be split into two
4315 * parts, in which case they will be passed as the 'prefix' parameter and
4316 * 'suffix' parameter.
4317 */
4318static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4319                                    char *prefix, char *suffix)
4320{
4321        if (prefix && suffix)
4322                dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4323                         suffix);
4324        else if (prefix)
4325                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4326        else if (suffix)
4327                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4328
4329        return 0;
4330}
4331
4332/**
4333 * \brief Exits the module
4334 */
4335static void __exit liquidio_exit(void)
4336{
4337        liquidio_deinit_pci();
4338
4339        pr_info("LiquidIO network module is now unloaded\n");
4340}
4341
4342module_init(liquidio_init);
4343module_exit(liquidio_exit);
4344