linux/drivers/net/ethernet/cavium/liquidio/lio_main.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/module.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <linux/firmware.h>
  22#include <net/vxlan.h>
  23#include <linux/kthread.h>
  24#include "liquidio_common.h"
  25#include "octeon_droq.h"
  26#include "octeon_iq.h"
  27#include "response_manager.h"
  28#include "octeon_device.h"
  29#include "octeon_nic.h"
  30#include "octeon_main.h"
  31#include "octeon_network.h"
  32#include "cn66xx_regs.h"
  33#include "cn66xx_device.h"
  34#include "cn68xx_device.h"
  35#include "cn23xx_pf_device.h"
  36#include "liquidio_image.h"
  37#include "lio_vf_rep.h"
  38
  39MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  40MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
  41MODULE_LICENSE("GPL");
  42MODULE_VERSION(LIQUIDIO_VERSION);
  43MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
  44                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  45MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
  46                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  47MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
  48                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  49MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
  50                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  51
  52static int ddr_timeout = 10000;
  53module_param(ddr_timeout, int, 0644);
  54MODULE_PARM_DESC(ddr_timeout,
  55                 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
  56
  57#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  58
  59static int debug = -1;
  60module_param(debug, int, 0644);
  61MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  62
  63static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
  64module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
  65MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
  66
  67static u32 console_bitmask;
  68module_param(console_bitmask, int, 0644);
  69MODULE_PARM_DESC(console_bitmask,
  70                 "Bitmask indicating which consoles have debug output redirected to syslog.");
  71
  72/**
  73 * \brief determines if a given console has debug enabled.
  74 * @param console console to check
  75 * @returns  1 = enabled. 0 otherwise
  76 */
  77static int octeon_console_debug_enabled(u32 console)
  78{
  79        return (console_bitmask >> (console)) & 0x1;
  80}
  81
  82/* Polling interval for determining when NIC application is alive */
  83#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
  84
  85/* runtime link query interval */
  86#define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
  87/* update localtime to octeon firmware every 60 seconds.
  88 * make firmware to use same time reference, so that it will be easy to
  89 * correlate firmware logged events/errors with host events, for debugging.
  90 */
  91#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
  92
  93/* time to wait for possible in-flight requests in milliseconds */
  94#define WAIT_INFLIGHT_REQUEST   msecs_to_jiffies(1000)
  95
  96struct lio_trusted_vf_ctx {
  97        struct completion complete;
  98        int status;
  99};
 100
 101struct oct_link_status_resp {
 102        u64 rh;
 103        struct oct_link_info link_info;
 104        u64 status;
 105};
 106
 107struct oct_timestamp_resp {
 108        u64 rh;
 109        u64 timestamp;
 110        u64 status;
 111};
 112
 113#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
 114
 115union tx_info {
 116        u64 u64;
 117        struct {
 118#ifdef __BIG_ENDIAN_BITFIELD
 119                u16 gso_size;
 120                u16 gso_segs;
 121                u32 reserved;
 122#else
 123                u32 reserved;
 124                u16 gso_segs;
 125                u16 gso_size;
 126#endif
 127        } s;
 128};
 129
 130/** Octeon device properties to be used by the NIC module.
 131 * Each octeon device in the system will be represented
 132 * by this structure in the NIC module.
 133 */
 134
 135#define OCTNIC_GSO_MAX_HEADER_SIZE 128
 136#define OCTNIC_GSO_MAX_SIZE                                                    \
 137        (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
 138
 139struct handshake {
 140        struct completion init;
 141        struct completion started;
 142        struct pci_dev *pci_dev;
 143        int init_ok;
 144        int started_ok;
 145};
 146
 147#ifdef CONFIG_PCI_IOV
 148static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
 149#endif
 150
 151static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
 152                                    char *prefix, char *suffix);
 153
 154static int octeon_device_init(struct octeon_device *);
 155static int liquidio_stop(struct net_device *netdev);
 156static void liquidio_remove(struct pci_dev *pdev);
 157static int liquidio_probe(struct pci_dev *pdev,
 158                          const struct pci_device_id *ent);
 159static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
 160                                      int linkstate);
 161
 162static struct handshake handshake[MAX_OCTEON_DEVICES];
 163static struct completion first_stage;
 164
 165static void octeon_droq_bh(unsigned long pdev)
 166{
 167        int q_no;
 168        int reschedule = 0;
 169        struct octeon_device *oct = (struct octeon_device *)pdev;
 170        struct octeon_device_priv *oct_priv =
 171                (struct octeon_device_priv *)oct->priv;
 172
 173        for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
 174                if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
 175                        continue;
 176                reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
 177                                                          MAX_PACKET_BUDGET);
 178                lio_enable_irq(oct->droq[q_no], NULL);
 179
 180                if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
 181                        /* set time and cnt interrupt thresholds for this DROQ
 182                         * for NAPI
 183                         */
 184                        int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
 185
 186                        octeon_write_csr64(
 187                            oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
 188                            0x5700000040ULL);
 189                        octeon_write_csr64(
 190                            oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
 191                }
 192        }
 193
 194        if (reschedule)
 195                tasklet_schedule(&oct_priv->droq_tasklet);
 196}
 197
 198static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 199{
 200        struct octeon_device_priv *oct_priv =
 201                (struct octeon_device_priv *)oct->priv;
 202        int retry = 100, pkt_cnt = 0, pending_pkts = 0;
 203        int i;
 204
 205        do {
 206                pending_pkts = 0;
 207
 208                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 209                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
 210                                continue;
 211                        pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
 212                }
 213                if (pkt_cnt > 0) {
 214                        pending_pkts += pkt_cnt;
 215                        tasklet_schedule(&oct_priv->droq_tasklet);
 216                }
 217                pkt_cnt = 0;
 218                schedule_timeout_uninterruptible(1);
 219
 220        } while (retry-- && pending_pkts);
 221
 222        return pkt_cnt;
 223}
 224
 225/**
 226 * \brief Forces all IO queues off on a given device
 227 * @param oct Pointer to Octeon device
 228 */
 229static void force_io_queues_off(struct octeon_device *oct)
 230{
 231        if ((oct->chip_id == OCTEON_CN66XX) ||
 232            (oct->chip_id == OCTEON_CN68XX)) {
 233                /* Reset the Enable bits for Input Queues. */
 234                octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
 235
 236                /* Reset the Enable bits for Output Queues. */
 237                octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
 238        }
 239}
 240
 241/**
 242 * \brief Cause device to go quiet so it can be safely removed/reset/etc
 243 * @param oct Pointer to Octeon device
 244 */
 245static inline void pcierror_quiesce_device(struct octeon_device *oct)
 246{
 247        int i;
 248
 249        /* Disable the input and output queues now. No more packets will
 250         * arrive from Octeon, but we should wait for all packet processing
 251         * to finish.
 252         */
 253        force_io_queues_off(oct);
 254
 255        /* To allow for in-flight requests */
 256        schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
 257
 258        if (wait_for_pending_requests(oct))
 259                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 260
 261        /* Force all requests waiting to be fetched by OCTEON to complete. */
 262        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 263                struct octeon_instr_queue *iq;
 264
 265                if (!(oct->io_qmask.iq & BIT_ULL(i)))
 266                        continue;
 267                iq = oct->instr_queue[i];
 268
 269                if (atomic_read(&iq->instr_pending)) {
 270                        spin_lock_bh(&iq->lock);
 271                        iq->fill_cnt = 0;
 272                        iq->octeon_read_index = iq->host_write_index;
 273                        iq->stats.instr_processed +=
 274                                atomic_read(&iq->instr_pending);
 275                        lio_process_iq_request_list(oct, iq, 0);
 276                        spin_unlock_bh(&iq->lock);
 277                }
 278        }
 279
 280        /* Force all pending ordered list requests to time out. */
 281        lio_process_ordered_list(oct, 1);
 282
 283        /* We do not need to wait for output queue packets to be processed. */
 284}
 285
 286/**
 287 * \brief Cleanup PCI AER uncorrectable error status
 288 * @param dev Pointer to PCI device
 289 */
 290static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 291{
 292        int pos = 0x100;
 293        u32 status, mask;
 294
 295        pr_info("%s :\n", __func__);
 296
 297        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 298        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 299        if (dev->error_state == pci_channel_io_normal)
 300                status &= ~mask;        /* Clear corresponding nonfatal bits */
 301        else
 302                status &= mask;         /* Clear corresponding fatal bits */
 303        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 304}
 305
 306/**
 307 * \brief Stop all PCI IO to a given device
 308 * @param dev Pointer to Octeon device
 309 */
 310static void stop_pci_io(struct octeon_device *oct)
 311{
 312        /* No more instructions will be forwarded. */
 313        atomic_set(&oct->status, OCT_DEV_IN_RESET);
 314
 315        pci_disable_device(oct->pci_dev);
 316
 317        /* Disable interrupts  */
 318        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 319
 320        pcierror_quiesce_device(oct);
 321
 322        /* Release the interrupt line */
 323        free_irq(oct->pci_dev->irq, oct);
 324
 325        if (oct->flags & LIO_FLAG_MSI_ENABLED)
 326                pci_disable_msi(oct->pci_dev);
 327
 328        dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 329                lio_get_state_string(&oct->status));
 330
 331        /* making it a common function for all OCTEON models */
 332        cleanup_aer_uncorrect_error_status(oct->pci_dev);
 333}
 334
 335/**
 336 * \brief called when PCI error is detected
 337 * @param pdev Pointer to PCI device
 338 * @param state The current pci connection state
 339 *
 340 * This function is called after a PCI bus error affecting
 341 * this device has been detected.
 342 */
 343static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
 344                                                     pci_channel_state_t state)
 345{
 346        struct octeon_device *oct = pci_get_drvdata(pdev);
 347
 348        /* Non-correctable Non-fatal errors */
 349        if (state == pci_channel_io_normal) {
 350                dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
 351                cleanup_aer_uncorrect_error_status(oct->pci_dev);
 352                return PCI_ERS_RESULT_CAN_RECOVER;
 353        }
 354
 355        /* Non-correctable Fatal errors */
 356        dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
 357        stop_pci_io(oct);
 358
 359        /* Always return a DISCONNECT. There is no support for recovery but only
 360         * for a clean shutdown.
 361         */
 362        return PCI_ERS_RESULT_DISCONNECT;
 363}
 364
 365/**
 366 * \brief mmio handler
 367 * @param pdev Pointer to PCI device
 368 */
 369static pci_ers_result_t liquidio_pcie_mmio_enabled(
 370                                struct pci_dev *pdev __attribute__((unused)))
 371{
 372        /* We should never hit this since we never ask for a reset for a Fatal
 373         * Error. We always return DISCONNECT in io_error above.
 374         * But play safe and return RECOVERED for now.
 375         */
 376        return PCI_ERS_RESULT_RECOVERED;
 377}
 378
 379/**
 380 * \brief called after the pci bus has been reset.
 381 * @param pdev Pointer to PCI device
 382 *
 383 * Restart the card from scratch, as if from a cold-boot. Implementation
 384 * resembles the first-half of the octeon_resume routine.
 385 */
 386static pci_ers_result_t liquidio_pcie_slot_reset(
 387                                struct pci_dev *pdev __attribute__((unused)))
 388{
 389        /* We should never hit this since we never ask for a reset for a Fatal
 390         * Error. We always return DISCONNECT in io_error above.
 391         * But play safe and return RECOVERED for now.
 392         */
 393        return PCI_ERS_RESULT_RECOVERED;
 394}
 395
 396/**
 397 * \brief called when traffic can start flowing again.
 398 * @param pdev Pointer to PCI device
 399 *
 400 * This callback is called when the error recovery driver tells us that
 401 * its OK to resume normal operation. Implementation resembles the
 402 * second-half of the octeon_resume routine.
 403 */
 404static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
 405{
 406        /* Nothing to be done here. */
 407}
 408
 409#ifdef CONFIG_PM
 410/**
 411 * \brief called when suspending
 412 * @param pdev Pointer to PCI device
 413 * @param state state to suspend to
 414 */
 415static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
 416                            pm_message_t state __attribute__((unused)))
 417{
 418        return 0;
 419}
 420
 421/**
 422 * \brief called when resuming
 423 * @param pdev Pointer to PCI device
 424 */
 425static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
 426{
 427        return 0;
 428}
 429#endif
 430
 431/* For PCI-E Advanced Error Recovery (AER) Interface */
 432static const struct pci_error_handlers liquidio_err_handler = {
 433        .error_detected = liquidio_pcie_error_detected,
 434        .mmio_enabled   = liquidio_pcie_mmio_enabled,
 435        .slot_reset     = liquidio_pcie_slot_reset,
 436        .resume         = liquidio_pcie_resume,
 437};
 438
 439static const struct pci_device_id liquidio_pci_tbl[] = {
 440        {       /* 68xx */
 441                PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 442        },
 443        {       /* 66xx */
 444                PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 445        },
 446        {       /* 23xx pf */
 447                PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 448        },
 449        {
 450                0, 0, 0, 0, 0, 0, 0
 451        }
 452};
 453MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
 454
 455static struct pci_driver liquidio_pci_driver = {
 456        .name           = "LiquidIO",
 457        .id_table       = liquidio_pci_tbl,
 458        .probe          = liquidio_probe,
 459        .remove         = liquidio_remove,
 460        .err_handler    = &liquidio_err_handler,    /* For AER */
 461
 462#ifdef CONFIG_PM
 463        .suspend        = liquidio_suspend,
 464        .resume         = liquidio_resume,
 465#endif
 466#ifdef CONFIG_PCI_IOV
 467        .sriov_configure = liquidio_enable_sriov,
 468#endif
 469};
 470
 471/**
 472 * \brief register PCI driver
 473 */
 474static int liquidio_init_pci(void)
 475{
 476        return pci_register_driver(&liquidio_pci_driver);
 477}
 478
 479/**
 480 * \brief unregister PCI driver
 481 */
 482static void liquidio_deinit_pci(void)
 483{
 484        pci_unregister_driver(&liquidio_pci_driver);
 485}
 486
 487/**
 488 * \brief Check Tx queue status, and take appropriate action
 489 * @param lio per-network private data
 490 * @returns 0 if full, number of queues woken up otherwise
 491 */
 492static inline int check_txq_status(struct lio *lio)
 493{
 494        int numqs = lio->netdev->real_num_tx_queues;
 495        int ret_val = 0;
 496        int q, iq;
 497
 498        /* check each sub-queue state */
 499        for (q = 0; q < numqs; q++) {
 500                iq = lio->linfo.txpciq[q %
 501                        lio->oct_dev->num_iqs].s.q_no;
 502                if (octnet_iq_is_full(lio->oct_dev, iq))
 503                        continue;
 504                if (__netif_subqueue_stopped(lio->netdev, q)) {
 505                        netif_wake_subqueue(lio->netdev, q);
 506                        INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
 507                                                  tx_restart, 1);
 508                        ret_val++;
 509                }
 510        }
 511
 512        return ret_val;
 513}
 514
 515/**
 516 * \brief Print link information
 517 * @param netdev network device
 518 */
 519static void print_link_info(struct net_device *netdev)
 520{
 521        struct lio *lio = GET_LIO(netdev);
 522
 523        if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
 524            ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
 525                struct oct_link_info *linfo = &lio->linfo;
 526
 527                if (linfo->link.s.link_up) {
 528                        netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
 529                                   linfo->link.s.speed,
 530                                   (linfo->link.s.duplex) ? "Full" : "Half");
 531                } else {
 532                        netif_info(lio, link, lio->netdev, "Link Down\n");
 533                }
 534        }
 535}
 536
 537/**
 538 * \brief Routine to notify MTU change
 539 * @param work work_struct data structure
 540 */
 541static void octnet_link_status_change(struct work_struct *work)
 542{
 543        struct cavium_wk *wk = (struct cavium_wk *)work;
 544        struct lio *lio = (struct lio *)wk->ctxptr;
 545
 546        /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
 547         * this API is invoked only when new max-MTU of the interface is
 548         * less than current MTU.
 549         */
 550        rtnl_lock();
 551        dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
 552        rtnl_unlock();
 553}
 554
 555/**
 556 * \brief Sets up the mtu status change work
 557 * @param netdev network device
 558 */
 559static inline int setup_link_status_change_wq(struct net_device *netdev)
 560{
 561        struct lio *lio = GET_LIO(netdev);
 562        struct octeon_device *oct = lio->oct_dev;
 563
 564        lio->link_status_wq.wq = alloc_workqueue("link-status",
 565                                                 WQ_MEM_RECLAIM, 0);
 566        if (!lio->link_status_wq.wq) {
 567                dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
 568                return -1;
 569        }
 570        INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
 571                          octnet_link_status_change);
 572        lio->link_status_wq.wk.ctxptr = lio;
 573
 574        return 0;
 575}
 576
 577static inline void cleanup_link_status_change_wq(struct net_device *netdev)
 578{
 579        struct lio *lio = GET_LIO(netdev);
 580
 581        if (lio->link_status_wq.wq) {
 582                cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
 583                destroy_workqueue(lio->link_status_wq.wq);
 584        }
 585}
 586
 587/**
 588 * \brief Update link status
 589 * @param netdev network device
 590 * @param ls link status structure
 591 *
 592 * Called on receipt of a link status response from the core application to
 593 * update each interface's link status.
 594 */
 595static inline void update_link_status(struct net_device *netdev,
 596                                      union oct_link_status *ls)
 597{
 598        struct lio *lio = GET_LIO(netdev);
 599        int changed = (lio->linfo.link.u64 != ls->u64);
 600        int current_max_mtu = lio->linfo.link.s.mtu;
 601        struct octeon_device *oct = lio->oct_dev;
 602
 603        dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
 604                __func__, lio->linfo.link.u64, ls->u64);
 605        lio->linfo.link.u64 = ls->u64;
 606
 607        if ((lio->intf_open) && (changed)) {
 608                print_link_info(netdev);
 609                lio->link_changes++;
 610
 611                if (lio->linfo.link.s.link_up) {
 612                        dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
 613                        netif_carrier_on(netdev);
 614                        wake_txqs(netdev);
 615                } else {
 616                        dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
 617                        netif_carrier_off(netdev);
 618                        stop_txqs(netdev);
 619                }
 620                if (lio->linfo.link.s.mtu != current_max_mtu) {
 621                        netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
 622                                   current_max_mtu, lio->linfo.link.s.mtu);
 623                        netdev->max_mtu = lio->linfo.link.s.mtu;
 624                }
 625                if (lio->linfo.link.s.mtu < netdev->mtu) {
 626                        dev_warn(&oct->pci_dev->dev,
 627                                 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
 628                                     netdev->mtu, lio->linfo.link.s.mtu);
 629                        queue_delayed_work(lio->link_status_wq.wq,
 630                                           &lio->link_status_wq.wk.work, 0);
 631                }
 632        }
 633}
 634
 635/**
 636 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
 637 * firmware will correct it's time, in case there is a time skew
 638 *
 639 * @work: work scheduled to send time update to octeon firmware
 640 **/
 641static void lio_sync_octeon_time(struct work_struct *work)
 642{
 643        struct cavium_wk *wk = (struct cavium_wk *)work;
 644        struct lio *lio = (struct lio *)wk->ctxptr;
 645        struct octeon_device *oct = lio->oct_dev;
 646        struct octeon_soft_command *sc;
 647        struct timespec64 ts;
 648        struct lio_time *lt;
 649        int ret;
 650
 651        sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
 652        if (!sc) {
 653                dev_err(&oct->pci_dev->dev,
 654                        "Failed to sync time to octeon: soft command allocation failed\n");
 655                return;
 656        }
 657
 658        lt = (struct lio_time *)sc->virtdptr;
 659
 660        /* Get time of the day */
 661        ktime_get_real_ts64(&ts);
 662        lt->sec = ts.tv_sec;
 663        lt->nsec = ts.tv_nsec;
 664        octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
 665
 666        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 667        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 668                                    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
 669
 670        init_completion(&sc->complete);
 671        sc->sc_status = OCTEON_REQUEST_PENDING;
 672
 673        ret = octeon_send_soft_command(oct, sc);
 674        if (ret == IQ_SEND_FAILED) {
 675                dev_err(&oct->pci_dev->dev,
 676                        "Failed to sync time to octeon: failed to send soft command\n");
 677                octeon_free_soft_command(oct, sc);
 678        } else {
 679                WRITE_ONCE(sc->caller_is_done, true);
 680        }
 681
 682        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 683                           &lio->sync_octeon_time_wq.wk.work,
 684                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 685}
 686
 687/**
 688 * setup_sync_octeon_time_wq - Sets up the work to periodically update
 689 * local time to octeon firmware
 690 *
 691 * @netdev - network device which should send time update to firmware
 692 **/
 693static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
 694{
 695        struct lio *lio = GET_LIO(netdev);
 696        struct octeon_device *oct = lio->oct_dev;
 697
 698        lio->sync_octeon_time_wq.wq =
 699                alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
 700        if (!lio->sync_octeon_time_wq.wq) {
 701                dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
 702                return -1;
 703        }
 704        INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
 705                          lio_sync_octeon_time);
 706        lio->sync_octeon_time_wq.wk.ctxptr = lio;
 707        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 708                           &lio->sync_octeon_time_wq.wk.work,
 709                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 710
 711        return 0;
 712}
 713
 714/**
 715 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
 716 * to periodically update local time to octeon firmware
 717 *
 718 * @netdev - network device which should send time update to firmware
 719 **/
 720static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
 721{
 722        struct lio *lio = GET_LIO(netdev);
 723        struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
 724
 725        if (time_wq->wq) {
 726                cancel_delayed_work_sync(&time_wq->wk.work);
 727                destroy_workqueue(time_wq->wq);
 728        }
 729}
 730
 731static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
 732{
 733        struct octeon_device *other_oct;
 734
 735        other_oct = lio_get_device(oct->octeon_id + 1);
 736
 737        if (other_oct && other_oct->pci_dev) {
 738                int oct_busnum, other_oct_busnum;
 739
 740                oct_busnum = oct->pci_dev->bus->number;
 741                other_oct_busnum = other_oct->pci_dev->bus->number;
 742
 743                if (oct_busnum == other_oct_busnum) {
 744                        int oct_slot, other_oct_slot;
 745
 746                        oct_slot = PCI_SLOT(oct->pci_dev->devfn);
 747                        other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
 748
 749                        if (oct_slot == other_oct_slot)
 750                                return other_oct;
 751                }
 752        }
 753
 754        return NULL;
 755}
 756
 757static void disable_all_vf_links(struct octeon_device *oct)
 758{
 759        struct net_device *netdev;
 760        int max_vfs, vf, i;
 761
 762        if (!oct)
 763                return;
 764
 765        max_vfs = oct->sriov_info.max_vfs;
 766
 767        for (i = 0; i < oct->ifcount; i++) {
 768                netdev = oct->props[i].netdev;
 769                if (!netdev)
 770                        continue;
 771
 772                for (vf = 0; vf < max_vfs; vf++)
 773                        liquidio_set_vf_link_state(netdev, vf,
 774                                                   IFLA_VF_LINK_STATE_DISABLE);
 775        }
 776}
 777
 778static int liquidio_watchdog(void *param)
 779{
 780        bool err_msg_was_printed[LIO_MAX_CORES];
 781        u16 mask_of_crashed_or_stuck_cores = 0;
 782        bool all_vf_links_are_disabled = false;
 783        struct octeon_device *oct = param;
 784        struct octeon_device *other_oct;
 785#ifdef CONFIG_MODULE_UNLOAD
 786        long refcount, vfs_referencing_pf;
 787        u64 vfs_mask1, vfs_mask2;
 788#endif
 789        int core;
 790
 791        memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
 792
 793        while (!kthread_should_stop()) {
 794                /* sleep for a couple of seconds so that we don't hog the CPU */
 795                set_current_state(TASK_INTERRUPTIBLE);
 796                schedule_timeout(msecs_to_jiffies(2000));
 797
 798                mask_of_crashed_or_stuck_cores =
 799                    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
 800
 801                if (!mask_of_crashed_or_stuck_cores)
 802                        continue;
 803
 804                WRITE_ONCE(oct->cores_crashed, true);
 805                other_oct = get_other_octeon_device(oct);
 806                if (other_oct)
 807                        WRITE_ONCE(other_oct->cores_crashed, true);
 808
 809                for (core = 0; core < LIO_MAX_CORES; core++) {
 810                        bool core_crashed_or_got_stuck;
 811
 812                        core_crashed_or_got_stuck =
 813                                                (mask_of_crashed_or_stuck_cores
 814                                                 >> core) & 1;
 815
 816                        if (core_crashed_or_got_stuck &&
 817                            !err_msg_was_printed[core]) {
 818                                dev_err(&oct->pci_dev->dev,
 819                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
 820                                        core);
 821                                err_msg_was_printed[core] = true;
 822                        }
 823                }
 824
 825                if (all_vf_links_are_disabled)
 826                        continue;
 827
 828                disable_all_vf_links(oct);
 829                disable_all_vf_links(other_oct);
 830                all_vf_links_are_disabled = true;
 831
 832#ifdef CONFIG_MODULE_UNLOAD
 833                vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
 834                vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
 835
 836                vfs_referencing_pf  = hweight64(vfs_mask1);
 837                vfs_referencing_pf += hweight64(vfs_mask2);
 838
 839                refcount = module_refcount(THIS_MODULE);
 840                if (refcount >= vfs_referencing_pf) {
 841                        while (vfs_referencing_pf) {
 842                                module_put(THIS_MODULE);
 843                                vfs_referencing_pf--;
 844                        }
 845                }
 846#endif
 847        }
 848
 849        return 0;
 850}
 851
 852/**
 853 * \brief PCI probe handler
 854 * @param pdev PCI device structure
 855 * @param ent unused
 856 */
 857static int
 858liquidio_probe(struct pci_dev *pdev,
 859               const struct pci_device_id *ent __attribute__((unused)))
 860{
 861        struct octeon_device *oct_dev = NULL;
 862        struct handshake *hs;
 863
 864        oct_dev = octeon_allocate_device(pdev->device,
 865                                         sizeof(struct octeon_device_priv));
 866        if (!oct_dev) {
 867                dev_err(&pdev->dev, "Unable to allocate device\n");
 868                return -ENOMEM;
 869        }
 870
 871        if (pdev->device == OCTEON_CN23XX_PF_VID)
 872                oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
 873
 874        /* Enable PTP for 6XXX Device */
 875        if (((pdev->device == OCTEON_CN66XX) ||
 876             (pdev->device == OCTEON_CN68XX)))
 877                oct_dev->ptp_enable = true;
 878        else
 879                oct_dev->ptp_enable = false;
 880
 881        dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 882                 (u32)pdev->vendor, (u32)pdev->device);
 883
 884        /* Assign octeon_device for this device to the private data area. */
 885        pci_set_drvdata(pdev, oct_dev);
 886
 887        /* set linux specific device pointer */
 888        oct_dev->pci_dev = (void *)pdev;
 889
 890        oct_dev->subsystem_id = pdev->subsystem_vendor |
 891                (pdev->subsystem_device << 16);
 892
 893        hs = &handshake[oct_dev->octeon_id];
 894        init_completion(&hs->init);
 895        init_completion(&hs->started);
 896        hs->pci_dev = pdev;
 897
 898        if (oct_dev->octeon_id == 0)
 899                /* first LiquidIO NIC is detected */
 900                complete(&first_stage);
 901
 902        if (octeon_device_init(oct_dev)) {
 903                complete(&hs->init);
 904                liquidio_remove(pdev);
 905                return -ENOMEM;
 906        }
 907
 908        if (OCTEON_CN23XX_PF(oct_dev)) {
 909                u8 bus, device, function;
 910
 911                if (atomic_read(oct_dev->adapter_refcount) == 1) {
 912                        /* Each NIC gets one watchdog kernel thread.  The first
 913                         * PF (of each NIC) that gets pci_driver->probe()'d
 914                         * creates that thread.
 915                         */
 916                        bus = pdev->bus->number;
 917                        device = PCI_SLOT(pdev->devfn);
 918                        function = PCI_FUNC(pdev->devfn);
 919                        oct_dev->watchdog_task = kthread_create(
 920                            liquidio_watchdog, oct_dev,
 921                            "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
 922                        if (!IS_ERR(oct_dev->watchdog_task)) {
 923                                wake_up_process(oct_dev->watchdog_task);
 924                        } else {
 925                                oct_dev->watchdog_task = NULL;
 926                                dev_err(&oct_dev->pci_dev->dev,
 927                                        "failed to create kernel_thread\n");
 928                                liquidio_remove(pdev);
 929                                return -1;
 930                        }
 931                }
 932        }
 933
 934        oct_dev->rx_pause = 1;
 935        oct_dev->tx_pause = 1;
 936
 937        dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
 938
 939        return 0;
 940}
 941
 942static bool fw_type_is_auto(void)
 943{
 944        return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
 945                       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
 946}
 947
 948/**
 949 * \brief PCI FLR for each Octeon device.
 950 * @param oct octeon device
 951 */
 952static void octeon_pci_flr(struct octeon_device *oct)
 953{
 954        int rc;
 955
 956        pci_save_state(oct->pci_dev);
 957
 958        pci_cfg_access_lock(oct->pci_dev);
 959
 960        /* Quiesce the device completely */
 961        pci_write_config_word(oct->pci_dev, PCI_COMMAND,
 962                              PCI_COMMAND_INTX_DISABLE);
 963
 964        rc = __pci_reset_function_locked(oct->pci_dev);
 965
 966        if (rc != 0)
 967                dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
 968                        rc, oct->pf_num);
 969
 970        pci_cfg_access_unlock(oct->pci_dev);
 971
 972        pci_restore_state(oct->pci_dev);
 973}
 974
 975/**
 976 *\brief Destroy resources associated with octeon device
 977 * @param pdev PCI device structure
 978 * @param ent unused
 979 */
 980static void octeon_destroy_resources(struct octeon_device *oct)
 981{
 982        int i, refcount;
 983        struct msix_entry *msix_entries;
 984        struct octeon_device_priv *oct_priv =
 985                (struct octeon_device_priv *)oct->priv;
 986
 987        struct handshake *hs;
 988
 989        switch (atomic_read(&oct->status)) {
 990        case OCT_DEV_RUNNING:
 991        case OCT_DEV_CORE_OK:
 992
 993                /* No more instructions will be forwarded. */
 994                atomic_set(&oct->status, OCT_DEV_IN_RESET);
 995
 996                oct->app_mode = CVM_DRV_INVALID_APP;
 997                dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 998                        lio_get_state_string(&oct->status));
 999
1000                schedule_timeout_uninterruptible(HZ / 10);
1001
1002                /* fallthrough */
1003        case OCT_DEV_HOST_OK:
1004
1005                /* fallthrough */
1006        case OCT_DEV_CONSOLE_INIT_DONE:
1007                /* Remove any consoles */
1008                octeon_remove_consoles(oct);
1009
1010                /* fallthrough */
1011        case OCT_DEV_IO_QUEUES_DONE:
1012                if (lio_wait_for_instr_fetch(oct))
1013                        dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1014
1015                if (wait_for_pending_requests(oct))
1016                        dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1017
1018                /* Disable the input and output queues now. No more packets will
1019                 * arrive from Octeon, but we should wait for all packet
1020                 * processing to finish.
1021                 */
1022                oct->fn_list.disable_io_queues(oct);
1023
1024                if (lio_wait_for_oq_pkts(oct))
1025                        dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1026
1027                /* Force all requests waiting to be fetched by OCTEON to
1028                 * complete.
1029                 */
1030                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1031                        struct octeon_instr_queue *iq;
1032
1033                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
1034                                continue;
1035                        iq = oct->instr_queue[i];
1036
1037                        if (atomic_read(&iq->instr_pending)) {
1038                                spin_lock_bh(&iq->lock);
1039                                iq->fill_cnt = 0;
1040                                iq->octeon_read_index = iq->host_write_index;
1041                                iq->stats.instr_processed +=
1042                                        atomic_read(&iq->instr_pending);
1043                                lio_process_iq_request_list(oct, iq, 0);
1044                                spin_unlock_bh(&iq->lock);
1045                        }
1046                }
1047
1048                lio_process_ordered_list(oct, 1);
1049                octeon_free_sc_done_list(oct);
1050                octeon_free_sc_zombie_list(oct);
1051
1052        /* fallthrough */
1053        case OCT_DEV_INTR_SET_DONE:
1054                /* Disable interrupts  */
1055                oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1056
1057                if (oct->msix_on) {
1058                        msix_entries = (struct msix_entry *)oct->msix_entries;
1059                        for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1060                                if (oct->ioq_vector[i].vector) {
1061                                        /* clear the affinity_cpumask */
1062                                        irq_set_affinity_hint(
1063                                                        msix_entries[i].vector,
1064                                                        NULL);
1065                                        free_irq(msix_entries[i].vector,
1066                                                 &oct->ioq_vector[i]);
1067                                        oct->ioq_vector[i].vector = 0;
1068                                }
1069                        }
1070                        /* non-iov vector's argument is oct struct */
1071                        free_irq(msix_entries[i].vector, oct);
1072
1073                        pci_disable_msix(oct->pci_dev);
1074                        kfree(oct->msix_entries);
1075                        oct->msix_entries = NULL;
1076                } else {
1077                        /* Release the interrupt line */
1078                        free_irq(oct->pci_dev->irq, oct);
1079
1080                        if (oct->flags & LIO_FLAG_MSI_ENABLED)
1081                                pci_disable_msi(oct->pci_dev);
1082                }
1083
1084                kfree(oct->irq_name_storage);
1085                oct->irq_name_storage = NULL;
1086
1087        /* fallthrough */
1088        case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1089                if (OCTEON_CN23XX_PF(oct))
1090                        octeon_free_ioq_vector(oct);
1091
1092        /* fallthrough */
1093        case OCT_DEV_MBOX_SETUP_DONE:
1094                if (OCTEON_CN23XX_PF(oct))
1095                        oct->fn_list.free_mbox(oct);
1096
1097        /* fallthrough */
1098        case OCT_DEV_IN_RESET:
1099        case OCT_DEV_DROQ_INIT_DONE:
1100                /* Wait for any pending operations */
1101                mdelay(100);
1102                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1103                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
1104                                continue;
1105                        octeon_delete_droq(oct, i);
1106                }
1107
1108                /* Force any pending handshakes to complete */
1109                for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1110                        hs = &handshake[i];
1111
1112                        if (hs->pci_dev) {
1113                                handshake[oct->octeon_id].init_ok = 0;
1114                                complete(&handshake[oct->octeon_id].init);
1115                                handshake[oct->octeon_id].started_ok = 0;
1116                                complete(&handshake[oct->octeon_id].started);
1117                        }
1118                }
1119
1120                /* fallthrough */
1121        case OCT_DEV_RESP_LIST_INIT_DONE:
1122                octeon_delete_response_list(oct);
1123
1124                /* fallthrough */
1125        case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1126                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1127                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
1128                                continue;
1129                        octeon_delete_instr_queue(oct, i);
1130                }
1131#ifdef CONFIG_PCI_IOV
1132                if (oct->sriov_info.sriov_enabled)
1133                        pci_disable_sriov(oct->pci_dev);
1134#endif
1135                /* fallthrough */
1136        case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1137                octeon_free_sc_buffer_pool(oct);
1138
1139                /* fallthrough */
1140        case OCT_DEV_DISPATCH_INIT_DONE:
1141                octeon_delete_dispatch_list(oct);
1142                cancel_delayed_work_sync(&oct->nic_poll_work.work);
1143
1144                /* fallthrough */
1145        case OCT_DEV_PCI_MAP_DONE:
1146                refcount = octeon_deregister_device(oct);
1147
1148                /* Soft reset the octeon device before exiting.
1149                 * However, if fw was loaded from card (i.e. autoboot),
1150                 * perform an FLR instead.
1151                 * Implementation note: only soft-reset the device
1152                 * if it is a CN6XXX OR the LAST CN23XX device.
1153                 */
1154                if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1155                        octeon_pci_flr(oct);
1156                else if (OCTEON_CN6XXX(oct) || !refcount)
1157                        oct->fn_list.soft_reset(oct);
1158
1159                octeon_unmap_pci_barx(oct, 0);
1160                octeon_unmap_pci_barx(oct, 1);
1161
1162                /* fallthrough */
1163        case OCT_DEV_PCI_ENABLE_DONE:
1164                pci_clear_master(oct->pci_dev);
1165                /* Disable the device, releasing the PCI INT */
1166                pci_disable_device(oct->pci_dev);
1167
1168                /* fallthrough */
1169        case OCT_DEV_BEGIN_STATE:
1170                /* Nothing to be done here either */
1171                break;
1172        }                       /* end switch (oct->status) */
1173
1174        tasklet_kill(&oct_priv->droq_tasklet);
1175}
1176
1177/**
1178 * \brief Send Rx control command
1179 * @param lio per-network private data
1180 * @param start_stop whether to start or stop
1181 */
1182static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1183{
1184        struct octeon_soft_command *sc;
1185        union octnet_cmd *ncmd;
1186        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1187        int retval;
1188
1189        if (oct->props[lio->ifidx].rx_on == start_stop)
1190                return;
1191
1192        sc = (struct octeon_soft_command *)
1193                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1194                                          16, 0);
1195        if (!sc) {
1196                netif_info(lio, rx_err, lio->netdev,
1197                           "Failed to allocate octeon_soft_command\n");
1198                return;
1199        }
1200
1201        ncmd = (union octnet_cmd *)sc->virtdptr;
1202
1203        ncmd->u64 = 0;
1204        ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1205        ncmd->s.param1 = start_stop;
1206
1207        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1208
1209        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1210
1211        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1212                                    OPCODE_NIC_CMD, 0, 0, 0);
1213
1214        init_completion(&sc->complete);
1215        sc->sc_status = OCTEON_REQUEST_PENDING;
1216
1217        retval = octeon_send_soft_command(oct, sc);
1218        if (retval == IQ_SEND_FAILED) {
1219                netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1220                octeon_free_soft_command(oct, sc);
1221                return;
1222        } else {
1223                /* Sleep on a wait queue till the cond flag indicates that the
1224                 * response arrived or timed-out.
1225                 */
1226                retval = wait_for_sc_completion_timeout(oct, sc, 0);
1227                if (retval)
1228                        return;
1229
1230                oct->props[lio->ifidx].rx_on = start_stop;
1231                WRITE_ONCE(sc->caller_is_done, true);
1232        }
1233}
1234
1235/**
1236 * \brief Destroy NIC device interface
1237 * @param oct octeon device
1238 * @param ifidx which interface to destroy
1239 *
1240 * Cleanup associated with each interface for an Octeon device  when NIC
1241 * module is being unloaded or if initialization fails during load.
1242 */
1243static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1244{
1245        struct net_device *netdev = oct->props[ifidx].netdev;
1246        struct octeon_device_priv *oct_priv =
1247                (struct octeon_device_priv *)oct->priv;
1248        struct napi_struct *napi, *n;
1249        struct lio *lio;
1250
1251        if (!netdev) {
1252                dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1253                        __func__, ifidx);
1254                return;
1255        }
1256
1257        lio = GET_LIO(netdev);
1258
1259        dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1260
1261        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1262                liquidio_stop(netdev);
1263
1264        if (oct->props[lio->ifidx].napi_enabled == 1) {
1265                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1266                        napi_disable(napi);
1267
1268                oct->props[lio->ifidx].napi_enabled = 0;
1269
1270                if (OCTEON_CN23XX_PF(oct))
1271                        oct->droq[0]->ops.poll_mode = 0;
1272        }
1273
1274        /* Delete NAPI */
1275        list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1276                netif_napi_del(napi);
1277
1278        tasklet_enable(&oct_priv->droq_tasklet);
1279
1280        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1281                unregister_netdev(netdev);
1282
1283        cleanup_sync_octeon_time_wq(netdev);
1284        cleanup_link_status_change_wq(netdev);
1285
1286        cleanup_rx_oom_poll_fn(netdev);
1287
1288        lio_delete_glists(lio);
1289
1290        free_netdev(netdev);
1291
1292        oct->props[ifidx].gmxport = -1;
1293
1294        oct->props[ifidx].netdev = NULL;
1295}
1296
1297/**
1298 * \brief Stop complete NIC functionality
1299 * @param oct octeon device
1300 */
1301static int liquidio_stop_nic_module(struct octeon_device *oct)
1302{
1303        int i, j;
1304        struct lio *lio;
1305
1306        dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1307        if (!oct->ifcount) {
1308                dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1309                return 1;
1310        }
1311
1312        spin_lock_bh(&oct->cmd_resp_wqlock);
1313        oct->cmd_resp_state = OCT_DRV_OFFLINE;
1314        spin_unlock_bh(&oct->cmd_resp_wqlock);
1315
1316        lio_vf_rep_destroy(oct);
1317
1318        for (i = 0; i < oct->ifcount; i++) {
1319                lio = GET_LIO(oct->props[i].netdev);
1320                for (j = 0; j < oct->num_oqs; j++)
1321                        octeon_unregister_droq_ops(oct,
1322                                                   lio->linfo.rxpciq[j].s.q_no);
1323        }
1324
1325        for (i = 0; i < oct->ifcount; i++)
1326                liquidio_destroy_nic_device(oct, i);
1327
1328        if (oct->devlink) {
1329                devlink_unregister(oct->devlink);
1330                devlink_free(oct->devlink);
1331                oct->devlink = NULL;
1332        }
1333
1334        dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1335        return 0;
1336}
1337
1338/**
1339 * \brief Cleans up resources at unload time
1340 * @param pdev PCI device structure
1341 */
1342static void liquidio_remove(struct pci_dev *pdev)
1343{
1344        struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1345
1346        dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1347
1348        if (oct_dev->watchdog_task)
1349                kthread_stop(oct_dev->watchdog_task);
1350
1351        if (!oct_dev->octeon_id &&
1352            oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1353                lio_vf_rep_modexit();
1354
1355        if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1356                liquidio_stop_nic_module(oct_dev);
1357
1358        /* Reset the octeon device and cleanup all memory allocated for
1359         * the octeon device by driver.
1360         */
1361        octeon_destroy_resources(oct_dev);
1362
1363        dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1364
1365        /* This octeon device has been removed. Update the global
1366         * data structure to reflect this. Free the device structure.
1367         */
1368        octeon_free_device_mem(oct_dev);
1369}
1370
1371/**
1372 * \brief Identify the Octeon device and to map the BAR address space
1373 * @param oct octeon device
1374 */
1375static int octeon_chip_specific_setup(struct octeon_device *oct)
1376{
1377        u32 dev_id, rev_id;
1378        int ret = 1;
1379        char *s;
1380
1381        pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1382        pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1383        oct->rev_id = rev_id & 0xff;
1384
1385        switch (dev_id) {
1386        case OCTEON_CN68XX_PCIID:
1387                oct->chip_id = OCTEON_CN68XX;
1388                ret = lio_setup_cn68xx_octeon_device(oct);
1389                s = "CN68XX";
1390                break;
1391
1392        case OCTEON_CN66XX_PCIID:
1393                oct->chip_id = OCTEON_CN66XX;
1394                ret = lio_setup_cn66xx_octeon_device(oct);
1395                s = "CN66XX";
1396                break;
1397
1398        case OCTEON_CN23XX_PCIID_PF:
1399                oct->chip_id = OCTEON_CN23XX_PF_VID;
1400                ret = setup_cn23xx_octeon_pf_device(oct);
1401                if (ret)
1402                        break;
1403#ifdef CONFIG_PCI_IOV
1404                if (!ret)
1405                        pci_sriov_set_totalvfs(oct->pci_dev,
1406                                               oct->sriov_info.max_vfs);
1407#endif
1408                s = "CN23XX";
1409                break;
1410
1411        default:
1412                s = "?";
1413                dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1414                        dev_id);
1415        }
1416
1417        if (!ret)
1418                dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1419                         OCTEON_MAJOR_REV(oct),
1420                         OCTEON_MINOR_REV(oct),
1421                         octeon_get_conf(oct)->card_name,
1422                         LIQUIDIO_VERSION);
1423
1424        return ret;
1425}
1426
1427/**
1428 * \brief PCI initialization for each Octeon device.
1429 * @param oct octeon device
1430 */
1431static int octeon_pci_os_setup(struct octeon_device *oct)
1432{
1433        /* setup PCI stuff first */
1434        if (pci_enable_device(oct->pci_dev)) {
1435                dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1436                return 1;
1437        }
1438
1439        if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1440                dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1441                pci_disable_device(oct->pci_dev);
1442                return 1;
1443        }
1444
1445        /* Enable PCI DMA Master. */
1446        pci_set_master(oct->pci_dev);
1447
1448        return 0;
1449}
1450
1451/**
1452 * \brief Unmap and free network buffer
1453 * @param buf buffer
1454 */
1455static void free_netbuf(void *buf)
1456{
1457        struct sk_buff *skb;
1458        struct octnet_buf_free_info *finfo;
1459        struct lio *lio;
1460
1461        finfo = (struct octnet_buf_free_info *)buf;
1462        skb = finfo->skb;
1463        lio = finfo->lio;
1464
1465        dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1466                         DMA_TO_DEVICE);
1467
1468        tx_buffer_free(skb);
1469}
1470
1471/**
1472 * \brief Unmap and free gather buffer
1473 * @param buf buffer
1474 */
1475static void free_netsgbuf(void *buf)
1476{
1477        struct octnet_buf_free_info *finfo;
1478        struct sk_buff *skb;
1479        struct lio *lio;
1480        struct octnic_gather *g;
1481        int i, frags, iq;
1482
1483        finfo = (struct octnet_buf_free_info *)buf;
1484        skb = finfo->skb;
1485        lio = finfo->lio;
1486        g = finfo->g;
1487        frags = skb_shinfo(skb)->nr_frags;
1488
1489        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1490                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1491                         DMA_TO_DEVICE);
1492
1493        i = 1;
1494        while (frags--) {
1495                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1496
1497                pci_unmap_page((lio->oct_dev)->pci_dev,
1498                               g->sg[(i >> 2)].ptr[(i & 3)],
1499                               frag->size, DMA_TO_DEVICE);
1500                i++;
1501        }
1502
1503        iq = skb_iq(lio->oct_dev, skb);
1504        spin_lock(&lio->glist_lock[iq]);
1505        list_add_tail(&g->list, &lio->glist[iq]);
1506        spin_unlock(&lio->glist_lock[iq]);
1507
1508        tx_buffer_free(skb);
1509}
1510
1511/**
1512 * \brief Unmap and free gather buffer with response
1513 * @param buf buffer
1514 */
1515static void free_netsgbuf_with_resp(void *buf)
1516{
1517        struct octeon_soft_command *sc;
1518        struct octnet_buf_free_info *finfo;
1519        struct sk_buff *skb;
1520        struct lio *lio;
1521        struct octnic_gather *g;
1522        int i, frags, iq;
1523
1524        sc = (struct octeon_soft_command *)buf;
1525        skb = (struct sk_buff *)sc->callback_arg;
1526        finfo = (struct octnet_buf_free_info *)&skb->cb;
1527
1528        lio = finfo->lio;
1529        g = finfo->g;
1530        frags = skb_shinfo(skb)->nr_frags;
1531
1532        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1533                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1534                         DMA_TO_DEVICE);
1535
1536        i = 1;
1537        while (frags--) {
1538                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1539
1540                pci_unmap_page((lio->oct_dev)->pci_dev,
1541                               g->sg[(i >> 2)].ptr[(i & 3)],
1542                               frag->size, DMA_TO_DEVICE);
1543                i++;
1544        }
1545
1546        iq = skb_iq(lio->oct_dev, skb);
1547
1548        spin_lock(&lio->glist_lock[iq]);
1549        list_add_tail(&g->list, &lio->glist[iq]);
1550        spin_unlock(&lio->glist_lock[iq]);
1551
1552        /* Don't free the skb yet */
1553}
1554
1555/**
1556 * \brief Adjust ptp frequency
1557 * @param ptp PTP clock info
1558 * @param ppb how much to adjust by, in parts-per-billion
1559 */
1560static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1561{
1562        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1563        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1564        u64 comp, delta;
1565        unsigned long flags;
1566        bool neg_adj = false;
1567
1568        if (ppb < 0) {
1569                neg_adj = true;
1570                ppb = -ppb;
1571        }
1572
1573        /* The hardware adds the clock compensation value to the
1574         * PTP clock on every coprocessor clock cycle, so we
1575         * compute the delta in terms of coprocessor clocks.
1576         */
1577        delta = (u64)ppb << 32;
1578        do_div(delta, oct->coproc_clock_rate);
1579
1580        spin_lock_irqsave(&lio->ptp_lock, flags);
1581        comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1582        if (neg_adj)
1583                comp -= delta;
1584        else
1585                comp += delta;
1586        lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1587        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1588
1589        return 0;
1590}
1591
1592/**
1593 * \brief Adjust ptp time
1594 * @param ptp PTP clock info
1595 * @param delta how much to adjust by, in nanosecs
1596 */
1597static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1598{
1599        unsigned long flags;
1600        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1601
1602        spin_lock_irqsave(&lio->ptp_lock, flags);
1603        lio->ptp_adjust += delta;
1604        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1605
1606        return 0;
1607}
1608
1609/**
1610 * \brief Get hardware clock time, including any adjustment
1611 * @param ptp PTP clock info
1612 * @param ts timespec
1613 */
1614static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1615                                struct timespec64 *ts)
1616{
1617        u64 ns;
1618        unsigned long flags;
1619        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1620        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1621
1622        spin_lock_irqsave(&lio->ptp_lock, flags);
1623        ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1624        ns += lio->ptp_adjust;
1625        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1626
1627        *ts = ns_to_timespec64(ns);
1628
1629        return 0;
1630}
1631
1632/**
1633 * \brief Set hardware clock time. Reset adjustment
1634 * @param ptp PTP clock info
1635 * @param ts timespec
1636 */
1637static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1638                                const struct timespec64 *ts)
1639{
1640        u64 ns;
1641        unsigned long flags;
1642        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1643        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1644
1645        ns = timespec64_to_ns(ts);
1646
1647        spin_lock_irqsave(&lio->ptp_lock, flags);
1648        lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1649        lio->ptp_adjust = 0;
1650        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1651
1652        return 0;
1653}
1654
1655/**
1656 * \brief Check if PTP is enabled
1657 * @param ptp PTP clock info
1658 * @param rq request
1659 * @param on is it on
1660 */
1661static int
1662liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1663                    struct ptp_clock_request *rq __attribute__((unused)),
1664                    int on __attribute__((unused)))
1665{
1666        return -EOPNOTSUPP;
1667}
1668
1669/**
1670 * \brief Open PTP clock source
1671 * @param netdev network device
1672 */
1673static void oct_ptp_open(struct net_device *netdev)
1674{
1675        struct lio *lio = GET_LIO(netdev);
1676        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1677
1678        spin_lock_init(&lio->ptp_lock);
1679
1680        snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1681        lio->ptp_info.owner = THIS_MODULE;
1682        lio->ptp_info.max_adj = 250000000;
1683        lio->ptp_info.n_alarm = 0;
1684        lio->ptp_info.n_ext_ts = 0;
1685        lio->ptp_info.n_per_out = 0;
1686        lio->ptp_info.pps = 0;
1687        lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1688        lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1689        lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1690        lio->ptp_info.settime64 = liquidio_ptp_settime;
1691        lio->ptp_info.enable = liquidio_ptp_enable;
1692
1693        lio->ptp_adjust = 0;
1694
1695        lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1696                                             &oct->pci_dev->dev);
1697
1698        if (IS_ERR(lio->ptp_clock))
1699                lio->ptp_clock = NULL;
1700}
1701
1702/**
1703 * \brief Init PTP clock
1704 * @param oct octeon device
1705 */
1706static void liquidio_ptp_init(struct octeon_device *oct)
1707{
1708        u64 clock_comp, cfg;
1709
1710        clock_comp = (u64)NSEC_PER_SEC << 32;
1711        do_div(clock_comp, oct->coproc_clock_rate);
1712        lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1713
1714        /* Enable */
1715        cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1716        lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1717}
1718
1719/**
1720 * \brief Load firmware to device
1721 * @param oct octeon device
1722 *
1723 * Maps device to firmware filename, requests firmware, and downloads it
1724 */
1725static int load_firmware(struct octeon_device *oct)
1726{
1727        int ret = 0;
1728        const struct firmware *fw;
1729        char fw_name[LIO_MAX_FW_FILENAME_LEN];
1730        char *tmp_fw_type;
1731
1732        if (fw_type_is_auto()) {
1733                tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1734                strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1735        } else {
1736                tmp_fw_type = fw_type;
1737        }
1738
1739        sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1740                octeon_get_conf(oct)->card_name, tmp_fw_type,
1741                LIO_FW_NAME_SUFFIX);
1742
1743        ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1744        if (ret) {
1745                dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1746                        fw_name);
1747                release_firmware(fw);
1748                return ret;
1749        }
1750
1751        ret = octeon_download_firmware(oct, fw->data, fw->size);
1752
1753        release_firmware(fw);
1754
1755        return ret;
1756}
1757
1758/**
1759 * \brief Poll routine for checking transmit queue status
1760 * @param work work_struct data structure
1761 */
1762static void octnet_poll_check_txq_status(struct work_struct *work)
1763{
1764        struct cavium_wk *wk = (struct cavium_wk *)work;
1765        struct lio *lio = (struct lio *)wk->ctxptr;
1766
1767        if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1768                return;
1769
1770        check_txq_status(lio);
1771        queue_delayed_work(lio->txq_status_wq.wq,
1772                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1773}
1774
1775/**
1776 * \brief Sets up the txq poll check
1777 * @param netdev network device
1778 */
1779static inline int setup_tx_poll_fn(struct net_device *netdev)
1780{
1781        struct lio *lio = GET_LIO(netdev);
1782        struct octeon_device *oct = lio->oct_dev;
1783
1784        lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1785                                                WQ_MEM_RECLAIM, 0);
1786        if (!lio->txq_status_wq.wq) {
1787                dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1788                return -1;
1789        }
1790        INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1791                          octnet_poll_check_txq_status);
1792        lio->txq_status_wq.wk.ctxptr = lio;
1793        queue_delayed_work(lio->txq_status_wq.wq,
1794                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1795        return 0;
1796}
1797
1798static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1799{
1800        struct lio *lio = GET_LIO(netdev);
1801
1802        if (lio->txq_status_wq.wq) {
1803                cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1804                destroy_workqueue(lio->txq_status_wq.wq);
1805        }
1806}
1807
1808/**
1809 * \brief Net device open for LiquidIO
1810 * @param netdev network device
1811 */
1812static int liquidio_open(struct net_device *netdev)
1813{
1814        struct lio *lio = GET_LIO(netdev);
1815        struct octeon_device *oct = lio->oct_dev;
1816        struct octeon_device_priv *oct_priv =
1817                (struct octeon_device_priv *)oct->priv;
1818        struct napi_struct *napi, *n;
1819
1820        if (oct->props[lio->ifidx].napi_enabled == 0) {
1821                tasklet_disable(&oct_priv->droq_tasklet);
1822
1823                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1824                        napi_enable(napi);
1825
1826                oct->props[lio->ifidx].napi_enabled = 1;
1827
1828                if (OCTEON_CN23XX_PF(oct))
1829                        oct->droq[0]->ops.poll_mode = 1;
1830        }
1831
1832        if (oct->ptp_enable)
1833                oct_ptp_open(netdev);
1834
1835        ifstate_set(lio, LIO_IFSTATE_RUNNING);
1836
1837        if (OCTEON_CN23XX_PF(oct)) {
1838                if (!oct->msix_on)
1839                        if (setup_tx_poll_fn(netdev))
1840                                return -1;
1841        } else {
1842                if (setup_tx_poll_fn(netdev))
1843                        return -1;
1844        }
1845
1846        netif_tx_start_all_queues(netdev);
1847
1848        /* Ready for link status updates */
1849        lio->intf_open = 1;
1850
1851        netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1852
1853        /* tell Octeon to start forwarding packets to host */
1854        send_rx_ctrl_cmd(lio, 1);
1855
1856        /* start periodical statistics fetch */
1857        INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1858        lio->stats_wk.ctxptr = lio;
1859        schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1860                                        (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1861
1862        dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1863                 netdev->name);
1864
1865        return 0;
1866}
1867
1868/**
1869 * \brief Net device stop for LiquidIO
1870 * @param netdev network device
1871 */
1872static int liquidio_stop(struct net_device *netdev)
1873{
1874        struct lio *lio = GET_LIO(netdev);
1875        struct octeon_device *oct = lio->oct_dev;
1876        struct octeon_device_priv *oct_priv =
1877                (struct octeon_device_priv *)oct->priv;
1878        struct napi_struct *napi, *n;
1879
1880        ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1881
1882        /* Stop any link updates */
1883        lio->intf_open = 0;
1884
1885        stop_txqs(netdev);
1886
1887        /* Inform that netif carrier is down */
1888        netif_carrier_off(netdev);
1889        netif_tx_disable(netdev);
1890
1891        lio->linfo.link.s.link_up = 0;
1892        lio->link_changes++;
1893
1894        /* Tell Octeon that nic interface is down. */
1895        send_rx_ctrl_cmd(lio, 0);
1896
1897        if (OCTEON_CN23XX_PF(oct)) {
1898                if (!oct->msix_on)
1899                        cleanup_tx_poll_fn(netdev);
1900        } else {
1901                cleanup_tx_poll_fn(netdev);
1902        }
1903
1904        cancel_delayed_work_sync(&lio->stats_wk.work);
1905
1906        if (lio->ptp_clock) {
1907                ptp_clock_unregister(lio->ptp_clock);
1908                lio->ptp_clock = NULL;
1909        }
1910
1911        /* Wait for any pending Rx descriptors */
1912        if (lio_wait_for_clean_oq(oct))
1913                netif_info(lio, rx_err, lio->netdev,
1914                           "Proceeding with stop interface after partial RX desc processing\n");
1915
1916        if (oct->props[lio->ifidx].napi_enabled == 1) {
1917                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1918                        napi_disable(napi);
1919
1920                oct->props[lio->ifidx].napi_enabled = 0;
1921
1922                if (OCTEON_CN23XX_PF(oct))
1923                        oct->droq[0]->ops.poll_mode = 0;
1924
1925                tasklet_enable(&oct_priv->droq_tasklet);
1926        }
1927
1928        dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1929
1930        return 0;
1931}
1932
1933/**
1934 * \brief Converts a mask based on net device flags
1935 * @param netdev network device
1936 *
1937 * This routine generates a octnet_ifflags mask from the net device flags
1938 * received from the OS.
1939 */
1940static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1941{
1942        enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1943
1944        if (netdev->flags & IFF_PROMISC)
1945                f |= OCTNET_IFFLAG_PROMISC;
1946
1947        if (netdev->flags & IFF_ALLMULTI)
1948                f |= OCTNET_IFFLAG_ALLMULTI;
1949
1950        if (netdev->flags & IFF_MULTICAST) {
1951                f |= OCTNET_IFFLAG_MULTICAST;
1952
1953                /* Accept all multicast addresses if there are more than we
1954                 * can handle
1955                 */
1956                if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1957                        f |= OCTNET_IFFLAG_ALLMULTI;
1958        }
1959
1960        if (netdev->flags & IFF_BROADCAST)
1961                f |= OCTNET_IFFLAG_BROADCAST;
1962
1963        return f;
1964}
1965
1966/**
1967 * \brief Net device set_multicast_list
1968 * @param netdev network device
1969 */
1970static void liquidio_set_mcast_list(struct net_device *netdev)
1971{
1972        struct lio *lio = GET_LIO(netdev);
1973        struct octeon_device *oct = lio->oct_dev;
1974        struct octnic_ctrl_pkt nctrl;
1975        struct netdev_hw_addr *ha;
1976        u64 *mc;
1977        int ret;
1978        int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1979
1980        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1981
1982        /* Create a ctrl pkt command to be sent to core app. */
1983        nctrl.ncmd.u64 = 0;
1984        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1985        nctrl.ncmd.s.param1 = get_new_flags(netdev);
1986        nctrl.ncmd.s.param2 = mc_count;
1987        nctrl.ncmd.s.more = mc_count;
1988        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1989        nctrl.netpndev = (u64)netdev;
1990        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1991
1992        /* copy all the addresses into the udd */
1993        mc = &nctrl.udd[0];
1994        netdev_for_each_mc_addr(ha, netdev) {
1995                *mc = 0;
1996                memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1997                /* no need to swap bytes */
1998
1999                if (++mc > &nctrl.udd[mc_count])
2000                        break;
2001        }
2002
2003        /* Apparently, any activity in this call from the kernel has to
2004         * be atomic. So we won't wait for response.
2005         */
2006
2007        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2008        if (ret) {
2009                dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2010                        ret);
2011        }
2012}
2013
2014/**
2015 * \brief Net device set_mac_address
2016 * @param netdev network device
2017 */
2018static int liquidio_set_mac(struct net_device *netdev, void *p)
2019{
2020        int ret = 0;
2021        struct lio *lio = GET_LIO(netdev);
2022        struct octeon_device *oct = lio->oct_dev;
2023        struct sockaddr *addr = (struct sockaddr *)p;
2024        struct octnic_ctrl_pkt nctrl;
2025
2026        if (!is_valid_ether_addr(addr->sa_data))
2027                return -EADDRNOTAVAIL;
2028
2029        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2030
2031        nctrl.ncmd.u64 = 0;
2032        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2033        nctrl.ncmd.s.param1 = 0;
2034        nctrl.ncmd.s.more = 1;
2035        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2036        nctrl.netpndev = (u64)netdev;
2037
2038        nctrl.udd[0] = 0;
2039        /* The MAC Address is presented in network byte order. */
2040        memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2041
2042        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2043        if (ret < 0) {
2044                dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2045                return -ENOMEM;
2046        }
2047
2048        if (nctrl.sc_status) {
2049                dev_err(&oct->pci_dev->dev,
2050                        "%s: MAC Address change failed. sc return=%x\n",
2051                         __func__, nctrl.sc_status);
2052                return -EIO;
2053        }
2054
2055        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2056        memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2057
2058        return 0;
2059}
2060
2061static void
2062liquidio_get_stats64(struct net_device *netdev,
2063                     struct rtnl_link_stats64 *lstats)
2064{
2065        struct lio *lio = GET_LIO(netdev);
2066        struct octeon_device *oct;
2067        u64 pkts = 0, drop = 0, bytes = 0;
2068        struct oct_droq_stats *oq_stats;
2069        struct oct_iq_stats *iq_stats;
2070        int i, iq_no, oq_no;
2071
2072        oct = lio->oct_dev;
2073
2074        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2075                return;
2076
2077        for (i = 0; i < oct->num_iqs; i++) {
2078                iq_no = lio->linfo.txpciq[i].s.q_no;
2079                iq_stats = &oct->instr_queue[iq_no]->stats;
2080                pkts += iq_stats->tx_done;
2081                drop += iq_stats->tx_dropped;
2082                bytes += iq_stats->tx_tot_bytes;
2083        }
2084
2085        lstats->tx_packets = pkts;
2086        lstats->tx_bytes = bytes;
2087        lstats->tx_dropped = drop;
2088
2089        pkts = 0;
2090        drop = 0;
2091        bytes = 0;
2092
2093        for (i = 0; i < oct->num_oqs; i++) {
2094                oq_no = lio->linfo.rxpciq[i].s.q_no;
2095                oq_stats = &oct->droq[oq_no]->stats;
2096                pkts += oq_stats->rx_pkts_received;
2097                drop += (oq_stats->rx_dropped +
2098                         oq_stats->dropped_nodispatch +
2099                         oq_stats->dropped_toomany +
2100                         oq_stats->dropped_nomem);
2101                bytes += oq_stats->rx_bytes_received;
2102        }
2103
2104        lstats->rx_bytes = bytes;
2105        lstats->rx_packets = pkts;
2106        lstats->rx_dropped = drop;
2107
2108        lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2109        lstats->collisions = oct->link_stats.fromhost.total_collisions;
2110
2111        /* detailed rx_errors: */
2112        lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2113        /* recved pkt with crc error    */
2114        lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2115        /* recv'd frame alignment error */
2116        lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2117        /* recv'r fifo overrun */
2118        lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2119
2120        lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2121                lstats->rx_frame_errors + lstats->rx_fifo_errors;
2122
2123        /* detailed tx_errors */
2124        lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2125        lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2126        lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2127
2128        lstats->tx_errors = lstats->tx_aborted_errors +
2129                lstats->tx_carrier_errors +
2130                lstats->tx_fifo_errors;
2131}
2132
2133/**
2134 * \brief Handler for SIOCSHWTSTAMP ioctl
2135 * @param netdev network device
2136 * @param ifr interface request
2137 * @param cmd command
2138 */
2139static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2140{
2141        struct hwtstamp_config conf;
2142        struct lio *lio = GET_LIO(netdev);
2143
2144        if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2145                return -EFAULT;
2146
2147        if (conf.flags)
2148                return -EINVAL;
2149
2150        switch (conf.tx_type) {
2151        case HWTSTAMP_TX_ON:
2152        case HWTSTAMP_TX_OFF:
2153                break;
2154        default:
2155                return -ERANGE;
2156        }
2157
2158        switch (conf.rx_filter) {
2159        case HWTSTAMP_FILTER_NONE:
2160                break;
2161        case HWTSTAMP_FILTER_ALL:
2162        case HWTSTAMP_FILTER_SOME:
2163        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2164        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2165        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2166        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2167        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2168        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2169        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2170        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2171        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2172        case HWTSTAMP_FILTER_PTP_V2_EVENT:
2173        case HWTSTAMP_FILTER_PTP_V2_SYNC:
2174        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2175        case HWTSTAMP_FILTER_NTP_ALL:
2176                conf.rx_filter = HWTSTAMP_FILTER_ALL;
2177                break;
2178        default:
2179                return -ERANGE;
2180        }
2181
2182        if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2183                ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2184
2185        else
2186                ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2187
2188        return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2189}
2190
2191/**
2192 * \brief ioctl handler
2193 * @param netdev network device
2194 * @param ifr interface request
2195 * @param cmd command
2196 */
2197static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2198{
2199        struct lio *lio = GET_LIO(netdev);
2200
2201        switch (cmd) {
2202        case SIOCSHWTSTAMP:
2203                if (lio->oct_dev->ptp_enable)
2204                        return hwtstamp_ioctl(netdev, ifr);
2205                /* fall through */
2206        default:
2207                return -EOPNOTSUPP;
2208        }
2209}
2210
2211/**
2212 * \brief handle a Tx timestamp response
2213 * @param status response status
2214 * @param buf pointer to skb
2215 */
2216static void handle_timestamp(struct octeon_device *oct,
2217                             u32 status,
2218                             void *buf)
2219{
2220        struct octnet_buf_free_info *finfo;
2221        struct octeon_soft_command *sc;
2222        struct oct_timestamp_resp *resp;
2223        struct lio *lio;
2224        struct sk_buff *skb = (struct sk_buff *)buf;
2225
2226        finfo = (struct octnet_buf_free_info *)skb->cb;
2227        lio = finfo->lio;
2228        sc = finfo->sc;
2229        oct = lio->oct_dev;
2230        resp = (struct oct_timestamp_resp *)sc->virtrptr;
2231
2232        if (status != OCTEON_REQUEST_DONE) {
2233                dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2234                        CVM_CAST64(status));
2235                resp->timestamp = 0;
2236        }
2237
2238        octeon_swap_8B_data(&resp->timestamp, 1);
2239
2240        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2241                struct skb_shared_hwtstamps ts;
2242                u64 ns = resp->timestamp;
2243
2244                netif_info(lio, tx_done, lio->netdev,
2245                           "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2246                           skb, (unsigned long long)ns);
2247                ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2248                skb_tstamp_tx(skb, &ts);
2249        }
2250
2251        octeon_free_soft_command(oct, sc);
2252        tx_buffer_free(skb);
2253}
2254
2255/* \brief Send a data packet that will be timestamped
2256 * @param oct octeon device
2257 * @param ndata pointer to network data
2258 * @param finfo pointer to private network data
2259 */
2260static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2261                                         struct octnic_data_pkt *ndata,
2262                                         struct octnet_buf_free_info *finfo,
2263                                         int xmit_more)
2264{
2265        int retval;
2266        struct octeon_soft_command *sc;
2267        struct lio *lio;
2268        int ring_doorbell;
2269        u32 len;
2270
2271        lio = finfo->lio;
2272
2273        sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2274                                            sizeof(struct oct_timestamp_resp));
2275        finfo->sc = sc;
2276
2277        if (!sc) {
2278                dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2279                return IQ_SEND_FAILED;
2280        }
2281
2282        if (ndata->reqtype == REQTYPE_NORESP_NET)
2283                ndata->reqtype = REQTYPE_RESP_NET;
2284        else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2285                ndata->reqtype = REQTYPE_RESP_NET_SG;
2286
2287        sc->callback = handle_timestamp;
2288        sc->callback_arg = finfo->skb;
2289        sc->iq_no = ndata->q_no;
2290
2291        if (OCTEON_CN23XX_PF(oct))
2292                len = (u32)((struct octeon_instr_ih3 *)
2293                            (&sc->cmd.cmd3.ih3))->dlengsz;
2294        else
2295                len = (u32)((struct octeon_instr_ih2 *)
2296                            (&sc->cmd.cmd2.ih2))->dlengsz;
2297
2298        ring_doorbell = !xmit_more;
2299
2300        retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2301                                     sc, len, ndata->reqtype);
2302
2303        if (retval == IQ_SEND_FAILED) {
2304                dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2305                        retval);
2306                octeon_free_soft_command(oct, sc);
2307        } else {
2308                netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2309        }
2310
2311        return retval;
2312}
2313
2314/** \brief Transmit networks packets to the Octeon interface
2315 * @param skbuff   skbuff struct to be passed to network layer.
2316 * @param netdev    pointer to network device
2317 * @returns whether the packet was transmitted to the device okay or not
2318 *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2319 */
2320static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2321{
2322        struct lio *lio;
2323        struct octnet_buf_free_info *finfo;
2324        union octnic_cmd_setup cmdsetup;
2325        struct octnic_data_pkt ndata;
2326        struct octeon_device *oct;
2327        struct oct_iq_stats *stats;
2328        struct octeon_instr_irh *irh;
2329        union tx_info *tx_info;
2330        int status = 0;
2331        int q_idx = 0, iq_no = 0;
2332        int j, xmit_more = 0;
2333        u64 dptr = 0;
2334        u32 tag = 0;
2335
2336        lio = GET_LIO(netdev);
2337        oct = lio->oct_dev;
2338
2339        q_idx = skb_iq(oct, skb);
2340        tag = q_idx;
2341        iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2342
2343        stats = &oct->instr_queue[iq_no]->stats;
2344
2345        /* Check for all conditions in which the current packet cannot be
2346         * transmitted.
2347         */
2348        if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2349            (!lio->linfo.link.s.link_up) ||
2350            (skb->len <= 0)) {
2351                netif_info(lio, tx_err, lio->netdev,
2352                           "Transmit failed link_status : %d\n",
2353                           lio->linfo.link.s.link_up);
2354                goto lio_xmit_failed;
2355        }
2356
2357        /* Use space in skb->cb to store info used to unmap and
2358         * free the buffers.
2359         */
2360        finfo = (struct octnet_buf_free_info *)skb->cb;
2361        finfo->lio = lio;
2362        finfo->skb = skb;
2363        finfo->sc = NULL;
2364
2365        /* Prepare the attributes for the data to be passed to OSI. */
2366        memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2367
2368        ndata.buf = (void *)finfo;
2369
2370        ndata.q_no = iq_no;
2371
2372        if (octnet_iq_is_full(oct, ndata.q_no)) {
2373                /* defer sending if queue is full */
2374                netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2375                           ndata.q_no);
2376                stats->tx_iq_busy++;
2377                return NETDEV_TX_BUSY;
2378        }
2379
2380        /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2381         *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2382         */
2383
2384        ndata.datasize = skb->len;
2385
2386        cmdsetup.u64 = 0;
2387        cmdsetup.s.iq_no = iq_no;
2388
2389        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2390                if (skb->encapsulation) {
2391                        cmdsetup.s.tnl_csum = 1;
2392                        stats->tx_vxlan++;
2393                } else {
2394                        cmdsetup.s.transport_csum = 1;
2395                }
2396        }
2397        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2398                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2399                cmdsetup.s.timestamp = 1;
2400        }
2401
2402        if (skb_shinfo(skb)->nr_frags == 0) {
2403                cmdsetup.s.u.datasize = skb->len;
2404                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2405
2406                /* Offload checksum calculation for TCP/UDP packets */
2407                dptr = dma_map_single(&oct->pci_dev->dev,
2408                                      skb->data,
2409                                      skb->len,
2410                                      DMA_TO_DEVICE);
2411                if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2412                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2413                                __func__);
2414                        stats->tx_dmamap_fail++;
2415                        return NETDEV_TX_BUSY;
2416                }
2417
2418                if (OCTEON_CN23XX_PF(oct))
2419                        ndata.cmd.cmd3.dptr = dptr;
2420                else
2421                        ndata.cmd.cmd2.dptr = dptr;
2422                finfo->dptr = dptr;
2423                ndata.reqtype = REQTYPE_NORESP_NET;
2424
2425        } else {
2426                int i, frags;
2427                struct skb_frag_struct *frag;
2428                struct octnic_gather *g;
2429
2430                spin_lock(&lio->glist_lock[q_idx]);
2431                g = (struct octnic_gather *)
2432                        lio_list_delete_head(&lio->glist[q_idx]);
2433                spin_unlock(&lio->glist_lock[q_idx]);
2434
2435                if (!g) {
2436                        netif_info(lio, tx_err, lio->netdev,
2437                                   "Transmit scatter gather: glist null!\n");
2438                        goto lio_xmit_failed;
2439                }
2440
2441                cmdsetup.s.gather = 1;
2442                cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2443                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2444
2445                memset(g->sg, 0, g->sg_size);
2446
2447                g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2448                                                 skb->data,
2449                                                 (skb->len - skb->data_len),
2450                                                 DMA_TO_DEVICE);
2451                if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2452                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2453                                __func__);
2454                        stats->tx_dmamap_fail++;
2455                        return NETDEV_TX_BUSY;
2456                }
2457                add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2458
2459                frags = skb_shinfo(skb)->nr_frags;
2460                i = 1;
2461                while (frags--) {
2462                        frag = &skb_shinfo(skb)->frags[i - 1];
2463
2464                        g->sg[(i >> 2)].ptr[(i & 3)] =
2465                                dma_map_page(&oct->pci_dev->dev,
2466                                             frag->page.p,
2467                                             frag->page_offset,
2468                                             frag->size,
2469                                             DMA_TO_DEVICE);
2470
2471                        if (dma_mapping_error(&oct->pci_dev->dev,
2472                                              g->sg[i >> 2].ptr[i & 3])) {
2473                                dma_unmap_single(&oct->pci_dev->dev,
2474                                                 g->sg[0].ptr[0],
2475                                                 skb->len - skb->data_len,
2476                                                 DMA_TO_DEVICE);
2477                                for (j = 1; j < i; j++) {
2478                                        frag = &skb_shinfo(skb)->frags[j - 1];
2479                                        dma_unmap_page(&oct->pci_dev->dev,
2480                                                       g->sg[j >> 2].ptr[j & 3],
2481                                                       frag->size,
2482                                                       DMA_TO_DEVICE);
2483                                }
2484                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2485                                        __func__);
2486                                return NETDEV_TX_BUSY;
2487                        }
2488
2489                        add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2490                        i++;
2491                }
2492
2493                dptr = g->sg_dma_ptr;
2494
2495                if (OCTEON_CN23XX_PF(oct))
2496                        ndata.cmd.cmd3.dptr = dptr;
2497                else
2498                        ndata.cmd.cmd2.dptr = dptr;
2499                finfo->dptr = dptr;
2500                finfo->g = g;
2501
2502                ndata.reqtype = REQTYPE_NORESP_NET_SG;
2503        }
2504
2505        if (OCTEON_CN23XX_PF(oct)) {
2506                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2507                tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2508        } else {
2509                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2510                tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2511        }
2512
2513        if (skb_shinfo(skb)->gso_size) {
2514                tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2515                tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2516                stats->tx_gso++;
2517        }
2518
2519        /* HW insert VLAN tag */
2520        if (skb_vlan_tag_present(skb)) {
2521                irh->priority = skb_vlan_tag_get(skb) >> 13;
2522                irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2523        }
2524
2525        xmit_more = netdev_xmit_more();
2526
2527        if (unlikely(cmdsetup.s.timestamp))
2528                status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2529        else
2530                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2531        if (status == IQ_SEND_FAILED)
2532                goto lio_xmit_failed;
2533
2534        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2535
2536        if (status == IQ_SEND_STOP)
2537                netif_stop_subqueue(netdev, q_idx);
2538
2539        netif_trans_update(netdev);
2540
2541        if (tx_info->s.gso_segs)
2542                stats->tx_done += tx_info->s.gso_segs;
2543        else
2544                stats->tx_done++;
2545        stats->tx_tot_bytes += ndata.datasize;
2546
2547        return NETDEV_TX_OK;
2548
2549lio_xmit_failed:
2550        stats->tx_dropped++;
2551        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2552                   iq_no, stats->tx_dropped);
2553        if (dptr)
2554                dma_unmap_single(&oct->pci_dev->dev, dptr,
2555                                 ndata.datasize, DMA_TO_DEVICE);
2556
2557        octeon_ring_doorbell_locked(oct, iq_no);
2558
2559        tx_buffer_free(skb);
2560        return NETDEV_TX_OK;
2561}
2562
2563/** \brief Network device Tx timeout
2564 * @param netdev    pointer to network device
2565 */
2566static void liquidio_tx_timeout(struct net_device *netdev)
2567{
2568        struct lio *lio;
2569
2570        lio = GET_LIO(netdev);
2571
2572        netif_info(lio, tx_err, lio->netdev,
2573                   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2574                   netdev->stats.tx_dropped);
2575        netif_trans_update(netdev);
2576        wake_txqs(netdev);
2577}
2578
2579static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2580                                    __be16 proto __attribute__((unused)),
2581                                    u16 vid)
2582{
2583        struct lio *lio = GET_LIO(netdev);
2584        struct octeon_device *oct = lio->oct_dev;
2585        struct octnic_ctrl_pkt nctrl;
2586        int ret = 0;
2587
2588        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2589
2590        nctrl.ncmd.u64 = 0;
2591        nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2592        nctrl.ncmd.s.param1 = vid;
2593        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2594        nctrl.netpndev = (u64)netdev;
2595        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2596
2597        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2598        if (ret) {
2599                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2600                        ret);
2601                if (ret > 0)
2602                        ret = -EIO;
2603        }
2604
2605        return ret;
2606}
2607
2608static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2609                                     __be16 proto __attribute__((unused)),
2610                                     u16 vid)
2611{
2612        struct lio *lio = GET_LIO(netdev);
2613        struct octeon_device *oct = lio->oct_dev;
2614        struct octnic_ctrl_pkt nctrl;
2615        int ret = 0;
2616
2617        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2618
2619        nctrl.ncmd.u64 = 0;
2620        nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2621        nctrl.ncmd.s.param1 = vid;
2622        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2623        nctrl.netpndev = (u64)netdev;
2624        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2625
2626        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2627        if (ret) {
2628                dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2629                        ret);
2630                if (ret > 0)
2631                        ret = -EIO;
2632        }
2633        return ret;
2634}
2635
2636/** Sending command to enable/disable RX checksum offload
2637 * @param netdev                pointer to network device
2638 * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2639 * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2640 *                              OCTNET_CMD_RXCSUM_DISABLE
2641 * @returns                     SUCCESS or FAILURE
2642 */
2643static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2644                                       u8 rx_cmd)
2645{
2646        struct lio *lio = GET_LIO(netdev);
2647        struct octeon_device *oct = lio->oct_dev;
2648        struct octnic_ctrl_pkt nctrl;
2649        int ret = 0;
2650
2651        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2652
2653        nctrl.ncmd.u64 = 0;
2654        nctrl.ncmd.s.cmd = command;
2655        nctrl.ncmd.s.param1 = rx_cmd;
2656        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2657        nctrl.netpndev = (u64)netdev;
2658        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2659
2660        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2661        if (ret) {
2662                dev_err(&oct->pci_dev->dev,
2663                        "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2664                        ret);
2665                if (ret > 0)
2666                        ret = -EIO;
2667        }
2668        return ret;
2669}
2670
2671/** Sending command to add/delete VxLAN UDP port to firmware
2672 * @param netdev                pointer to network device
2673 * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2674 * @param vxlan_port            VxLAN port to be added or deleted
2675 * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2676 *                              OCTNET_CMD_VXLAN_PORT_DEL
2677 * @returns                     SUCCESS or FAILURE
2678 */
2679static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2680                                       u16 vxlan_port, u8 vxlan_cmd_bit)
2681{
2682        struct lio *lio = GET_LIO(netdev);
2683        struct octeon_device *oct = lio->oct_dev;
2684        struct octnic_ctrl_pkt nctrl;
2685        int ret = 0;
2686
2687        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2688
2689        nctrl.ncmd.u64 = 0;
2690        nctrl.ncmd.s.cmd = command;
2691        nctrl.ncmd.s.more = vxlan_cmd_bit;
2692        nctrl.ncmd.s.param1 = vxlan_port;
2693        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2694        nctrl.netpndev = (u64)netdev;
2695        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2696
2697        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2698        if (ret) {
2699                dev_err(&oct->pci_dev->dev,
2700                        "VxLAN port add/delete failed in core (ret:0x%x)\n",
2701                        ret);
2702                if (ret > 0)
2703                        ret = -EIO;
2704        }
2705        return ret;
2706}
2707
2708/** \brief Net device fix features
2709 * @param netdev  pointer to network device
2710 * @param request features requested
2711 * @returns updated features list
2712 */
2713static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2714                                               netdev_features_t request)
2715{
2716        struct lio *lio = netdev_priv(netdev);
2717
2718        if ((request & NETIF_F_RXCSUM) &&
2719            !(lio->dev_capability & NETIF_F_RXCSUM))
2720                request &= ~NETIF_F_RXCSUM;
2721
2722        if ((request & NETIF_F_HW_CSUM) &&
2723            !(lio->dev_capability & NETIF_F_HW_CSUM))
2724                request &= ~NETIF_F_HW_CSUM;
2725
2726        if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2727                request &= ~NETIF_F_TSO;
2728
2729        if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2730                request &= ~NETIF_F_TSO6;
2731
2732        if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2733                request &= ~NETIF_F_LRO;
2734
2735        /*Disable LRO if RXCSUM is off */
2736        if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2737            (lio->dev_capability & NETIF_F_LRO))
2738                request &= ~NETIF_F_LRO;
2739
2740        if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2741            !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2742                request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2743
2744        return request;
2745}
2746
2747/** \brief Net device set features
2748 * @param netdev  pointer to network device
2749 * @param features features to enable/disable
2750 */
2751static int liquidio_set_features(struct net_device *netdev,
2752                                 netdev_features_t features)
2753{
2754        struct lio *lio = netdev_priv(netdev);
2755
2756        if ((features & NETIF_F_LRO) &&
2757            (lio->dev_capability & NETIF_F_LRO) &&
2758            !(netdev->features & NETIF_F_LRO))
2759                liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2760                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2761        else if (!(features & NETIF_F_LRO) &&
2762                 (lio->dev_capability & NETIF_F_LRO) &&
2763                 (netdev->features & NETIF_F_LRO))
2764                liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2765                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2766
2767        /* Sending command to firmware to enable/disable RX checksum
2768         * offload settings using ethtool
2769         */
2770        if (!(netdev->features & NETIF_F_RXCSUM) &&
2771            (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2772            (features & NETIF_F_RXCSUM))
2773                liquidio_set_rxcsum_command(netdev,
2774                                            OCTNET_CMD_TNL_RX_CSUM_CTL,
2775                                            OCTNET_CMD_RXCSUM_ENABLE);
2776        else if ((netdev->features & NETIF_F_RXCSUM) &&
2777                 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2778                 !(features & NETIF_F_RXCSUM))
2779                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2780                                            OCTNET_CMD_RXCSUM_DISABLE);
2781
2782        if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2783            (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2784            !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2785                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2786                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
2787        else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2788                 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2789                 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2790                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2791                                     OCTNET_CMD_VLAN_FILTER_DISABLE);
2792
2793        return 0;
2794}
2795
2796static void liquidio_add_vxlan_port(struct net_device *netdev,
2797                                    struct udp_tunnel_info *ti)
2798{
2799        if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2800                return;
2801
2802        liquidio_vxlan_port_command(netdev,
2803                                    OCTNET_CMD_VXLAN_PORT_CONFIG,
2804                                    htons(ti->port),
2805                                    OCTNET_CMD_VXLAN_PORT_ADD);
2806}
2807
2808static void liquidio_del_vxlan_port(struct net_device *netdev,
2809                                    struct udp_tunnel_info *ti)
2810{
2811        if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2812                return;
2813
2814        liquidio_vxlan_port_command(netdev,
2815                                    OCTNET_CMD_VXLAN_PORT_CONFIG,
2816                                    htons(ti->port),
2817                                    OCTNET_CMD_VXLAN_PORT_DEL);
2818}
2819
2820static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2821                                 u8 *mac, bool is_admin_assigned)
2822{
2823        struct lio *lio = GET_LIO(netdev);
2824        struct octeon_device *oct = lio->oct_dev;
2825        struct octnic_ctrl_pkt nctrl;
2826        int ret = 0;
2827
2828        if (!is_valid_ether_addr(mac))
2829                return -EINVAL;
2830
2831        if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2832                return -EINVAL;
2833
2834        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2835
2836        nctrl.ncmd.u64 = 0;
2837        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2838        /* vfidx is 0 based, but vf_num (param1) is 1 based */
2839        nctrl.ncmd.s.param1 = vfidx + 1;
2840        nctrl.ncmd.s.more = 1;
2841        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2842        nctrl.netpndev = (u64)netdev;
2843        if (is_admin_assigned) {
2844                nctrl.ncmd.s.param2 = true;
2845                nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2846        }
2847
2848        nctrl.udd[0] = 0;
2849        /* The MAC Address is presented in network byte order. */
2850        ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2851
2852        oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2853
2854        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2855        if (ret > 0)
2856                ret = -EIO;
2857
2858        return ret;
2859}
2860
2861static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2862{
2863        struct lio *lio = GET_LIO(netdev);
2864        struct octeon_device *oct = lio->oct_dev;
2865        int retval;
2866
2867        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2868                return -EINVAL;
2869
2870        retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2871        if (!retval)
2872                cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2873
2874        return retval;
2875}
2876
2877static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2878                                    bool enable)
2879{
2880        struct lio *lio = GET_LIO(netdev);
2881        struct octeon_device *oct = lio->oct_dev;
2882        struct octnic_ctrl_pkt nctrl;
2883        int retval;
2884
2885        if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2886                netif_info(lio, drv, lio->netdev,
2887                           "firmware does not support spoofchk\n");
2888                return -EOPNOTSUPP;
2889        }
2890
2891        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2892                netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2893                return -EINVAL;
2894        }
2895
2896        if (enable) {
2897                if (oct->sriov_info.vf_spoofchk[vfidx])
2898                        return 0;
2899        } else {
2900                /* Clear */
2901                if (!oct->sriov_info.vf_spoofchk[vfidx])
2902                        return 0;
2903        }
2904
2905        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2906        nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2907        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2908        nctrl.ncmd.s.param1 =
2909                vfidx + 1; /* vfidx is 0 based,
2910                            * but vf_num (param1) is 1 based
2911                            */
2912        nctrl.ncmd.s.param2 = enable;
2913        nctrl.ncmd.s.more = 0;
2914        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2915        nctrl.cb_fn = NULL;
2916
2917        retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2918
2919        if (retval) {
2920                netif_info(lio, drv, lio->netdev,
2921                           "Failed to set VF %d spoofchk %s\n", vfidx,
2922                        enable ? "on" : "off");
2923                return -1;
2924        }
2925
2926        oct->sriov_info.vf_spoofchk[vfidx] = enable;
2927        netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2928                   enable ? "on" : "off");
2929
2930        return 0;
2931}
2932
2933static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2934                                u16 vlan, u8 qos, __be16 vlan_proto)
2935{
2936        struct lio *lio = GET_LIO(netdev);
2937        struct octeon_device *oct = lio->oct_dev;
2938        struct octnic_ctrl_pkt nctrl;
2939        u16 vlantci;
2940        int ret = 0;
2941
2942        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2943                return -EINVAL;
2944
2945        if (vlan_proto != htons(ETH_P_8021Q))
2946                return -EPROTONOSUPPORT;
2947
2948        if (vlan >= VLAN_N_VID || qos > 7)
2949                return -EINVAL;
2950
2951        if (vlan)
2952                vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2953        else
2954                vlantci = 0;
2955
2956        if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2957                return 0;
2958
2959        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2960
2961        if (vlan)
2962                nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2963        else
2964                nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2965
2966        nctrl.ncmd.s.param1 = vlantci;
2967        nctrl.ncmd.s.param2 =
2968            vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2969        nctrl.ncmd.s.more = 0;
2970        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2971        nctrl.cb_fn = NULL;
2972
2973        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2974        if (ret) {
2975                if (ret > 0)
2976                        ret = -EIO;
2977                return ret;
2978        }
2979
2980        oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2981
2982        return ret;
2983}
2984
2985static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2986                                  struct ifla_vf_info *ivi)
2987{
2988        struct lio *lio = GET_LIO(netdev);
2989        struct octeon_device *oct = lio->oct_dev;
2990        u8 *macaddr;
2991
2992        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2993                return -EINVAL;
2994
2995        memset(ivi, 0, sizeof(struct ifla_vf_info));
2996
2997        ivi->vf = vfidx;
2998        macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2999        ether_addr_copy(&ivi->mac[0], macaddr);
3000        ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3001        ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3002        if (oct->sriov_info.trusted_vf.active &&
3003            oct->sriov_info.trusted_vf.id == vfidx)
3004                ivi->trusted = true;
3005        else
3006                ivi->trusted = false;
3007        ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3008        ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
3009        ivi->max_tx_rate = lio->linfo.link.s.speed;
3010        ivi->min_tx_rate = 0;
3011
3012        return 0;
3013}
3014
3015static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3016{
3017        struct octeon_device *oct = lio->oct_dev;
3018        struct octeon_soft_command *sc;
3019        int retval;
3020
3021        sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3022        if (!sc)
3023                return -ENOMEM;
3024
3025        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3026
3027        /* vfidx is 0 based, but vf_num (param1) is 1 based */
3028        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3029                                    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3030                                    trusted);
3031
3032        init_completion(&sc->complete);
3033        sc->sc_status = OCTEON_REQUEST_PENDING;
3034
3035        retval = octeon_send_soft_command(oct, sc);
3036        if (retval == IQ_SEND_FAILED) {
3037                octeon_free_soft_command(oct, sc);
3038                retval = -1;
3039        } else {
3040                /* Wait for response or timeout */
3041                retval = wait_for_sc_completion_timeout(oct, sc, 0);
3042                if (retval)
3043                        return (retval);
3044
3045                WRITE_ONCE(sc->caller_is_done, true);
3046        }
3047
3048        return retval;
3049}
3050
3051static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3052                                 bool setting)
3053{
3054        struct lio *lio = GET_LIO(netdev);
3055        struct octeon_device *oct = lio->oct_dev;
3056
3057        if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3058                /* trusted vf is not supported by firmware older than 1.7.1 */
3059                return -EOPNOTSUPP;
3060        }
3061
3062        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3063                netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3064                return -EINVAL;
3065        }
3066
3067        if (setting) {
3068                /* Set */
3069
3070                if (oct->sriov_info.trusted_vf.active &&
3071                    oct->sriov_info.trusted_vf.id == vfidx)
3072                        return 0;
3073
3074                if (oct->sriov_info.trusted_vf.active) {
3075                        netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3076                        return -EPERM;
3077                }
3078        } else {
3079                /* Clear */
3080
3081                if (!oct->sriov_info.trusted_vf.active)
3082                        return 0;
3083        }
3084
3085        if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3086                if (setting) {
3087                        oct->sriov_info.trusted_vf.id = vfidx;
3088                        oct->sriov_info.trusted_vf.active = true;
3089                } else {
3090                        oct->sriov_info.trusted_vf.active = false;
3091                }
3092
3093                netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3094                           setting ? "" : "not ");
3095        } else {
3096                netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3097                return -1;
3098        }
3099
3100        return 0;
3101}
3102
3103static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3104                                      int linkstate)
3105{
3106        struct lio *lio = GET_LIO(netdev);
3107        struct octeon_device *oct = lio->oct_dev;
3108        struct octnic_ctrl_pkt nctrl;
3109        int ret = 0;
3110
3111        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3112                return -EINVAL;
3113
3114        if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3115                return 0;
3116
3117        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3118        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3119        nctrl.ncmd.s.param1 =
3120            vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3121        nctrl.ncmd.s.param2 = linkstate;
3122        nctrl.ncmd.s.more = 0;
3123        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3124        nctrl.cb_fn = NULL;
3125
3126        ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3127
3128        if (!ret)
3129                oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3130        else if (ret > 0)
3131                ret = -EIO;
3132
3133        return ret;
3134}
3135
3136static int
3137liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3138{
3139        struct lio_devlink_priv *priv;
3140        struct octeon_device *oct;
3141
3142        priv = devlink_priv(devlink);
3143        oct = priv->oct;
3144
3145        *mode = oct->eswitch_mode;
3146
3147        return 0;
3148}
3149
3150static int
3151liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3152                          struct netlink_ext_ack *extack)
3153{
3154        struct lio_devlink_priv *priv;
3155        struct octeon_device *oct;
3156        int ret = 0;
3157
3158        priv = devlink_priv(devlink);
3159        oct = priv->oct;
3160
3161        if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3162                return -EINVAL;
3163
3164        if (oct->eswitch_mode == mode)
3165                return 0;
3166
3167        switch (mode) {
3168        case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3169                oct->eswitch_mode = mode;
3170                ret = lio_vf_rep_create(oct);
3171                break;
3172
3173        case DEVLINK_ESWITCH_MODE_LEGACY:
3174                lio_vf_rep_destroy(oct);
3175                oct->eswitch_mode = mode;
3176                break;
3177
3178        default:
3179                ret = -EINVAL;
3180        }
3181
3182        return ret;
3183}
3184
3185static const struct devlink_ops liquidio_devlink_ops = {
3186        .eswitch_mode_get = liquidio_eswitch_mode_get,
3187        .eswitch_mode_set = liquidio_eswitch_mode_set,
3188};
3189
3190static int
3191liquidio_get_port_parent_id(struct net_device *dev,
3192                            struct netdev_phys_item_id *ppid)
3193{
3194        struct lio *lio = GET_LIO(dev);
3195        struct octeon_device *oct = lio->oct_dev;
3196
3197        if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3198                return -EOPNOTSUPP;
3199
3200        ppid->id_len = ETH_ALEN;
3201        ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3202
3203        return 0;
3204}
3205
3206static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3207                                 struct ifla_vf_stats *vf_stats)
3208{
3209        struct lio *lio = GET_LIO(netdev);
3210        struct octeon_device *oct = lio->oct_dev;
3211        struct oct_vf_stats stats;
3212        int ret;
3213
3214        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3215                return -EINVAL;
3216
3217        memset(&stats, 0, sizeof(struct oct_vf_stats));
3218        ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3219        if (!ret) {
3220                vf_stats->rx_packets = stats.rx_packets;
3221                vf_stats->tx_packets = stats.tx_packets;
3222                vf_stats->rx_bytes = stats.rx_bytes;
3223                vf_stats->tx_bytes = stats.tx_bytes;
3224                vf_stats->broadcast = stats.broadcast;
3225                vf_stats->multicast = stats.multicast;
3226        }
3227
3228        return ret;
3229}
3230
3231static const struct net_device_ops lionetdevops = {
3232        .ndo_open               = liquidio_open,
3233        .ndo_stop               = liquidio_stop,
3234        .ndo_start_xmit         = liquidio_xmit,
3235        .ndo_get_stats64        = liquidio_get_stats64,
3236        .ndo_set_mac_address    = liquidio_set_mac,
3237        .ndo_set_rx_mode        = liquidio_set_mcast_list,
3238        .ndo_tx_timeout         = liquidio_tx_timeout,
3239
3240        .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3241        .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3242        .ndo_change_mtu         = liquidio_change_mtu,
3243        .ndo_do_ioctl           = liquidio_ioctl,
3244        .ndo_fix_features       = liquidio_fix_features,
3245        .ndo_set_features       = liquidio_set_features,
3246        .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
3247        .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
3248        .ndo_set_vf_mac         = liquidio_set_vf_mac,
3249        .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
3250        .ndo_get_vf_config      = liquidio_get_vf_config,
3251        .ndo_set_vf_spoofchk    = liquidio_set_vf_spoofchk,
3252        .ndo_set_vf_trust       = liquidio_set_vf_trust,
3253        .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3254        .ndo_get_vf_stats       = liquidio_get_vf_stats,
3255        .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3256};
3257
3258/** \brief Entry point for the liquidio module
3259 */
3260static int __init liquidio_init(void)
3261{
3262        int i;
3263        struct handshake *hs;
3264
3265        init_completion(&first_stage);
3266
3267        octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3268
3269        if (liquidio_init_pci())
3270                return -EINVAL;
3271
3272        wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3273
3274        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3275                hs = &handshake[i];
3276                if (hs->pci_dev) {
3277                        wait_for_completion(&hs->init);
3278                        if (!hs->init_ok) {
3279                                /* init handshake failed */
3280                                dev_err(&hs->pci_dev->dev,
3281                                        "Failed to init device\n");
3282                                liquidio_deinit_pci();
3283                                return -EIO;
3284                        }
3285                }
3286        }
3287
3288        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3289                hs = &handshake[i];
3290                if (hs->pci_dev) {
3291                        wait_for_completion_timeout(&hs->started,
3292                                                    msecs_to_jiffies(30000));
3293                        if (!hs->started_ok) {
3294                                /* starter handshake failed */
3295                                dev_err(&hs->pci_dev->dev,
3296                                        "Firmware failed to start\n");
3297                                liquidio_deinit_pci();
3298                                return -EIO;
3299                        }
3300                }
3301        }
3302
3303        return 0;
3304}
3305
3306static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3307{
3308        struct octeon_device *oct = (struct octeon_device *)buf;
3309        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3310        int gmxport = 0;
3311        union oct_link_status *ls;
3312        int i;
3313
3314        if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3315                dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3316                        recv_pkt->buffer_size[0],
3317                        recv_pkt->rh.r_nic_info.gmxport);
3318                goto nic_info_err;
3319        }
3320
3321        gmxport = recv_pkt->rh.r_nic_info.gmxport;
3322        ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3323                OCT_DROQ_INFO_SIZE);
3324
3325        octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3326        for (i = 0; i < oct->ifcount; i++) {
3327                if (oct->props[i].gmxport == gmxport) {
3328                        update_link_status(oct->props[i].netdev, ls);
3329                        break;
3330                }
3331        }
3332
3333nic_info_err:
3334        for (i = 0; i < recv_pkt->buffer_count; i++)
3335                recv_buffer_free(recv_pkt->buffer_ptr[i]);
3336        octeon_free_recv_info(recv_info);
3337        return 0;
3338}
3339
3340/**
3341 * \brief Setup network interfaces
3342 * @param octeon_dev  octeon device
3343 *
3344 * Called during init time for each device. It assumes the NIC
3345 * is already up and running.  The link information for each
3346 * interface is passed in link_info.
3347 */
3348static int setup_nic_devices(struct octeon_device *octeon_dev)
3349{
3350        struct lio *lio = NULL;
3351        struct net_device *netdev;
3352        u8 mac[6], i, j, *fw_ver, *micro_ver;
3353        unsigned long micro;
3354        u32 cur_ver;
3355        struct octeon_soft_command *sc;
3356        struct liquidio_if_cfg_resp *resp;
3357        struct octdev_props *props;
3358        int retval, num_iqueues, num_oqueues;
3359        int max_num_queues = 0;
3360        union oct_nic_if_cfg if_cfg;
3361        unsigned int base_queue;
3362        unsigned int gmx_port_id;
3363        u32 resp_size, data_size;
3364        u32 ifidx_or_pfnum;
3365        struct lio_version *vdata;
3366        struct devlink *devlink;
3367        struct lio_devlink_priv *lio_devlink;
3368
3369        /* This is to handle link status changes */
3370        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3371                                    OPCODE_NIC_INFO,
3372                                    lio_nic_info, octeon_dev);
3373
3374        /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3375         * They are handled directly.
3376         */
3377        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3378                                        free_netbuf);
3379
3380        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3381                                        free_netsgbuf);
3382
3383        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3384                                        free_netsgbuf_with_resp);
3385
3386        for (i = 0; i < octeon_dev->ifcount; i++) {
3387                resp_size = sizeof(struct liquidio_if_cfg_resp);
3388                data_size = sizeof(struct lio_version);
3389                sc = (struct octeon_soft_command *)
3390                        octeon_alloc_soft_command(octeon_dev, data_size,
3391                                                  resp_size, 0);
3392                resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3393                vdata = (struct lio_version *)sc->virtdptr;
3394
3395                *((u64 *)vdata) = 0;
3396                vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3397                vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3398                vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3399
3400                if (OCTEON_CN23XX_PF(octeon_dev)) {
3401                        num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3402                        num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3403                        base_queue = octeon_dev->sriov_info.pf_srn;
3404
3405                        gmx_port_id = octeon_dev->pf_num;
3406                        ifidx_or_pfnum = octeon_dev->pf_num;
3407                } else {
3408                        num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3409                                                octeon_get_conf(octeon_dev), i);
3410                        num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3411                                                octeon_get_conf(octeon_dev), i);
3412                        base_queue = CFG_GET_BASE_QUE_NIC_IF(
3413                                                octeon_get_conf(octeon_dev), i);
3414                        gmx_port_id = CFG_GET_GMXID_NIC_IF(
3415                                                octeon_get_conf(octeon_dev), i);
3416                        ifidx_or_pfnum = i;
3417                }
3418
3419                dev_dbg(&octeon_dev->pci_dev->dev,
3420                        "requesting config for interface %d, iqs %d, oqs %d\n",
3421                        ifidx_or_pfnum, num_iqueues, num_oqueues);
3422
3423                if_cfg.u64 = 0;
3424                if_cfg.s.num_iqueues = num_iqueues;
3425                if_cfg.s.num_oqueues = num_oqueues;
3426                if_cfg.s.base_queue = base_queue;
3427                if_cfg.s.gmx_port_id = gmx_port_id;
3428
3429                sc->iq_no = 0;
3430
3431                octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3432                                            OPCODE_NIC_IF_CFG, 0,
3433                                            if_cfg.u64, 0);
3434
3435                init_completion(&sc->complete);
3436                sc->sc_status = OCTEON_REQUEST_PENDING;
3437
3438                retval = octeon_send_soft_command(octeon_dev, sc);
3439                if (retval == IQ_SEND_FAILED) {
3440                        dev_err(&octeon_dev->pci_dev->dev,
3441                                "iq/oq config failed status: %x\n",
3442                                retval);
3443                        /* Soft instr is freed by driver in case of failure. */
3444                        octeon_free_soft_command(octeon_dev, sc);
3445                        return(-EIO);
3446                }
3447
3448                /* Sleep on a wait queue till the cond flag indicates that the
3449                 * response arrived or timed-out.
3450                 */
3451                retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3452                if (retval)
3453                        return retval;
3454
3455                retval = resp->status;
3456                if (retval) {
3457                        dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3458                        WRITE_ONCE(sc->caller_is_done, true);
3459                        goto setup_nic_dev_done;
3460                }
3461                snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3462                         32, "%s",
3463                         resp->cfg_info.liquidio_firmware_version);
3464
3465                /* Verify f/w version (in case of 'auto' loading from flash) */
3466                fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3467                if (memcmp(LIQUIDIO_BASE_VERSION,
3468                           fw_ver,
3469                           strlen(LIQUIDIO_BASE_VERSION))) {
3470                        dev_err(&octeon_dev->pci_dev->dev,
3471                                "Unmatched firmware version. Expected %s.x, got %s.\n",
3472                                LIQUIDIO_BASE_VERSION, fw_ver);
3473                        WRITE_ONCE(sc->caller_is_done, true);
3474                        goto setup_nic_dev_done;
3475                } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3476                           FW_IS_PRELOADED) {
3477                        dev_info(&octeon_dev->pci_dev->dev,
3478                                 "Using auto-loaded firmware version %s.\n",
3479                                 fw_ver);
3480                }
3481
3482                /* extract micro version field; point past '<maj>.<min>.' */
3483                micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3484                if (kstrtoul(micro_ver, 10, &micro) != 0)
3485                        micro = 0;
3486                octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3487                octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3488                octeon_dev->fw_info.ver.rev = micro;
3489
3490                octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3491                                    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3492
3493                num_iqueues = hweight64(resp->cfg_info.iqmask);
3494                num_oqueues = hweight64(resp->cfg_info.oqmask);
3495
3496                if (!(num_iqueues) || !(num_oqueues)) {
3497                        dev_err(&octeon_dev->pci_dev->dev,
3498                                "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3499                                resp->cfg_info.iqmask,
3500                                resp->cfg_info.oqmask);
3501                        WRITE_ONCE(sc->caller_is_done, true);
3502                        goto setup_nic_dev_done;
3503                }
3504
3505                if (OCTEON_CN6XXX(octeon_dev)) {
3506                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3507                                                                    cn6xxx));
3508                } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3509                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3510                                                                    cn23xx_pf));
3511                }
3512
3513                dev_dbg(&octeon_dev->pci_dev->dev,
3514                        "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3515                        i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3516                        num_iqueues, num_oqueues, max_num_queues);
3517                netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3518
3519                if (!netdev) {
3520                        dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3521                        WRITE_ONCE(sc->caller_is_done, true);
3522                        goto setup_nic_dev_done;
3523                }
3524
3525                SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3526
3527                /* Associate the routines that will handle different
3528                 * netdev tasks.
3529                 */
3530                netdev->netdev_ops = &lionetdevops;
3531
3532                retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3533                if (retval) {
3534                        dev_err(&octeon_dev->pci_dev->dev,
3535                                "setting real number rx failed\n");
3536                        WRITE_ONCE(sc->caller_is_done, true);
3537                        goto setup_nic_dev_free;
3538                }
3539
3540                retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3541                if (retval) {
3542                        dev_err(&octeon_dev->pci_dev->dev,
3543                                "setting real number tx failed\n");
3544                        WRITE_ONCE(sc->caller_is_done, true);
3545                        goto setup_nic_dev_free;
3546                }
3547
3548                lio = GET_LIO(netdev);
3549
3550                memset(lio, 0, sizeof(struct lio));
3551
3552                lio->ifidx = ifidx_or_pfnum;
3553
3554                props = &octeon_dev->props[i];
3555                props->gmxport = resp->cfg_info.linfo.gmxport;
3556                props->netdev = netdev;
3557
3558                lio->linfo.num_rxpciq = num_oqueues;
3559                lio->linfo.num_txpciq = num_iqueues;
3560                for (j = 0; j < num_oqueues; j++) {
3561                        lio->linfo.rxpciq[j].u64 =
3562                                resp->cfg_info.linfo.rxpciq[j].u64;
3563                }
3564                for (j = 0; j < num_iqueues; j++) {
3565                        lio->linfo.txpciq[j].u64 =
3566                                resp->cfg_info.linfo.txpciq[j].u64;
3567                }
3568                lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3569                lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3570                lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3571
3572                WRITE_ONCE(sc->caller_is_done, true);
3573
3574                lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3575
3576                if (OCTEON_CN23XX_PF(octeon_dev) ||
3577                    OCTEON_CN6XXX(octeon_dev)) {
3578                        lio->dev_capability = NETIF_F_HIGHDMA
3579                                              | NETIF_F_IP_CSUM
3580                                              | NETIF_F_IPV6_CSUM
3581                                              | NETIF_F_SG | NETIF_F_RXCSUM
3582                                              | NETIF_F_GRO
3583                                              | NETIF_F_TSO | NETIF_F_TSO6
3584                                              | NETIF_F_LRO;
3585                }
3586                netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3587
3588                /*  Copy of transmit encapsulation capabilities:
3589                 *  TSO, TSO6, Checksums for this device
3590                 */
3591                lio->enc_dev_capability = NETIF_F_IP_CSUM
3592                                          | NETIF_F_IPV6_CSUM
3593                                          | NETIF_F_GSO_UDP_TUNNEL
3594                                          | NETIF_F_HW_CSUM | NETIF_F_SG
3595                                          | NETIF_F_RXCSUM
3596                                          | NETIF_F_TSO | NETIF_F_TSO6
3597                                          | NETIF_F_LRO;
3598
3599                netdev->hw_enc_features = (lio->enc_dev_capability &
3600                                           ~NETIF_F_LRO);
3601
3602                lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3603
3604                netdev->vlan_features = lio->dev_capability;
3605                /* Add any unchangeable hw features */
3606                lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3607                                        NETIF_F_HW_VLAN_CTAG_RX |
3608                                        NETIF_F_HW_VLAN_CTAG_TX;
3609
3610                netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3611
3612                netdev->hw_features = lio->dev_capability;
3613                /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3614                netdev->hw_features = netdev->hw_features &
3615                        ~NETIF_F_HW_VLAN_CTAG_RX;
3616
3617                /* MTU range: 68 - 16000 */
3618                netdev->min_mtu = LIO_MIN_MTU_SIZE;
3619                netdev->max_mtu = LIO_MAX_MTU_SIZE;
3620
3621                /* Point to the  properties for octeon device to which this
3622                 * interface belongs.
3623                 */
3624                lio->oct_dev = octeon_dev;
3625                lio->octprops = props;
3626                lio->netdev = netdev;
3627
3628                dev_dbg(&octeon_dev->pci_dev->dev,
3629                        "if%d gmx: %d hw_addr: 0x%llx\n", i,
3630                        lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3631
3632                for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3633                        u8 vfmac[ETH_ALEN];
3634
3635                        eth_random_addr(vfmac);
3636                        if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3637                                dev_err(&octeon_dev->pci_dev->dev,
3638                                        "Error setting VF%d MAC address\n",
3639                                        j);
3640                                goto setup_nic_dev_free;
3641                        }
3642                }
3643
3644                /* 64-bit swap required on LE machines */
3645                octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3646                for (j = 0; j < 6; j++)
3647                        mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3648
3649                /* Copy MAC Address to OS network device structure */
3650
3651                ether_addr_copy(netdev->dev_addr, mac);
3652
3653                /* By default all interfaces on a single Octeon uses the same
3654                 * tx and rx queues
3655                 */
3656                lio->txq = lio->linfo.txpciq[0].s.q_no;
3657                lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3658                if (liquidio_setup_io_queues(octeon_dev, i,
3659                                             lio->linfo.num_txpciq,
3660                                             lio->linfo.num_rxpciq)) {
3661                        dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3662                        goto setup_nic_dev_free;
3663                }
3664
3665                ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3666
3667                lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3668                lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3669
3670                if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3671                        dev_err(&octeon_dev->pci_dev->dev,
3672                                "Gather list allocation failed\n");
3673                        goto setup_nic_dev_free;
3674                }
3675
3676                /* Register ethtool support */
3677                liquidio_set_ethtool_ops(netdev);
3678                if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3679                        octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3680                else
3681                        octeon_dev->priv_flags = 0x0;
3682
3683                if (netdev->features & NETIF_F_LRO)
3684                        liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3685                                             OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3686
3687                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3688                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
3689
3690                if ((debug != -1) && (debug & NETIF_MSG_HW))
3691                        liquidio_set_feature(netdev,
3692                                             OCTNET_CMD_VERBOSE_ENABLE, 0);
3693
3694                if (setup_link_status_change_wq(netdev))
3695                        goto setup_nic_dev_free;
3696
3697                if ((octeon_dev->fw_info.app_cap_flags &
3698                     LIQUIDIO_TIME_SYNC_CAP) &&
3699                    setup_sync_octeon_time_wq(netdev))
3700                        goto setup_nic_dev_free;
3701
3702                if (setup_rx_oom_poll_fn(netdev))
3703                        goto setup_nic_dev_free;
3704
3705                /* Register the network device with the OS */
3706                if (register_netdev(netdev)) {
3707                        dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3708                        goto setup_nic_dev_free;
3709                }
3710
3711                dev_dbg(&octeon_dev->pci_dev->dev,
3712                        "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3713                        i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3714                netif_carrier_off(netdev);
3715                lio->link_changes++;
3716
3717                ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3718
3719                /* Sending command to firmware to enable Rx checksum offload
3720                 * by default at the time of setup of Liquidio driver for
3721                 * this device
3722                 */
3723                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3724                                            OCTNET_CMD_RXCSUM_ENABLE);
3725                liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3726                                     OCTNET_CMD_TXCSUM_ENABLE);
3727
3728                dev_dbg(&octeon_dev->pci_dev->dev,
3729                        "NIC ifidx:%d Setup successful\n", i);
3730
3731                if (octeon_dev->subsystem_id ==
3732                        OCTEON_CN2350_25GB_SUBSYS_ID ||
3733                    octeon_dev->subsystem_id ==
3734                        OCTEON_CN2360_25GB_SUBSYS_ID) {
3735                        cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3736                                             octeon_dev->fw_info.ver.min,
3737                                             octeon_dev->fw_info.ver.rev);
3738
3739                        /* speed control unsupported in f/w older than 1.7.2 */
3740                        if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3741                                dev_info(&octeon_dev->pci_dev->dev,
3742                                         "speed setting not supported by f/w.");
3743                                octeon_dev->speed_setting = 25;
3744                                octeon_dev->no_speed_setting = 1;
3745                        } else {
3746                                liquidio_get_speed(lio);
3747                        }
3748
3749                        if (octeon_dev->speed_setting == 0) {
3750                                octeon_dev->speed_setting = 25;
3751                                octeon_dev->no_speed_setting = 1;
3752                        }
3753                } else {
3754                        octeon_dev->no_speed_setting = 1;
3755                        octeon_dev->speed_setting = 10;
3756                }
3757                octeon_dev->speed_boot = octeon_dev->speed_setting;
3758
3759                /* don't read FEC setting if unsupported by f/w (see above) */
3760                if (octeon_dev->speed_boot == 25 &&
3761                    !octeon_dev->no_speed_setting) {
3762                        liquidio_get_fec(lio);
3763                        octeon_dev->props[lio->ifidx].fec_boot =
3764                                octeon_dev->props[lio->ifidx].fec;
3765                }
3766        }
3767
3768        devlink = devlink_alloc(&liquidio_devlink_ops,
3769                                sizeof(struct lio_devlink_priv));
3770        if (!devlink) {
3771                dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3772                goto setup_nic_dev_free;
3773        }
3774
3775        lio_devlink = devlink_priv(devlink);
3776        lio_devlink->oct = octeon_dev;
3777
3778        if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3779                devlink_free(devlink);
3780                dev_err(&octeon_dev->pci_dev->dev,
3781                        "devlink registration failed\n");
3782                goto setup_nic_dev_free;
3783        }
3784
3785        octeon_dev->devlink = devlink;
3786        octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3787
3788        return 0;
3789
3790setup_nic_dev_free:
3791
3792        while (i--) {
3793                dev_err(&octeon_dev->pci_dev->dev,
3794                        "NIC ifidx:%d Setup failed\n", i);
3795                liquidio_destroy_nic_device(octeon_dev, i);
3796        }
3797
3798setup_nic_dev_done:
3799
3800        return -ENODEV;
3801}
3802
3803#ifdef CONFIG_PCI_IOV
3804static int octeon_enable_sriov(struct octeon_device *oct)
3805{
3806        unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3807        struct pci_dev *vfdev;
3808        int err;
3809        u32 u;
3810
3811        if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3812                err = pci_enable_sriov(oct->pci_dev,
3813                                       oct->sriov_info.num_vfs_alloced);
3814                if (err) {
3815                        dev_err(&oct->pci_dev->dev,
3816                                "OCTEON: Failed to enable PCI sriov: %d\n",
3817                                err);
3818                        oct->sriov_info.num_vfs_alloced = 0;
3819                        return err;
3820                }
3821                oct->sriov_info.sriov_enabled = 1;
3822
3823                /* init lookup table that maps DPI ring number to VF pci_dev
3824                 * struct pointer
3825                 */
3826                u = 0;
3827                vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3828                                       OCTEON_CN23XX_VF_VID, NULL);
3829                while (vfdev) {
3830                        if (vfdev->is_virtfn &&
3831                            (vfdev->physfn == oct->pci_dev)) {
3832                                oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3833                                        vfdev;
3834                                u += oct->sriov_info.rings_per_vf;
3835                        }
3836                        vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3837                                               OCTEON_CN23XX_VF_VID, vfdev);
3838                }
3839        }
3840
3841        return num_vfs_alloced;
3842}
3843
3844static int lio_pci_sriov_disable(struct octeon_device *oct)
3845{
3846        int u;
3847
3848        if (pci_vfs_assigned(oct->pci_dev)) {
3849                dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3850                return -EPERM;
3851        }
3852
3853        pci_disable_sriov(oct->pci_dev);
3854
3855        u = 0;
3856        while (u < MAX_POSSIBLE_VFS) {
3857                oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3858                u += oct->sriov_info.rings_per_vf;
3859        }
3860
3861        oct->sriov_info.num_vfs_alloced = 0;
3862        dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3863                 oct->pf_num);
3864
3865        return 0;
3866}
3867
3868static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3869{
3870        struct octeon_device *oct = pci_get_drvdata(dev);
3871        int ret = 0;
3872
3873        if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3874            (oct->sriov_info.sriov_enabled)) {
3875                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3876                         oct->pf_num, num_vfs);
3877                return 0;
3878        }
3879
3880        if (!num_vfs) {
3881                lio_vf_rep_destroy(oct);
3882                ret = lio_pci_sriov_disable(oct);
3883        } else if (num_vfs > oct->sriov_info.max_vfs) {
3884                dev_err(&oct->pci_dev->dev,
3885                        "OCTEON: Max allowed VFs:%d user requested:%d",
3886                        oct->sriov_info.max_vfs, num_vfs);
3887                ret = -EPERM;
3888        } else {
3889                oct->sriov_info.num_vfs_alloced = num_vfs;
3890                ret = octeon_enable_sriov(oct);
3891                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3892                         oct->pf_num, num_vfs);
3893                ret = lio_vf_rep_create(oct);
3894                if (ret)
3895                        dev_info(&oct->pci_dev->dev,
3896                                 "vf representor create failed");
3897        }
3898
3899        return ret;
3900}
3901#endif
3902
3903/**
3904 * \brief initialize the NIC
3905 * @param oct octeon device
3906 *
3907 * This initialization routine is called once the Octeon device application is
3908 * up and running
3909 */
3910static int liquidio_init_nic_module(struct octeon_device *oct)
3911{
3912        int i, retval = 0;
3913        int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3914
3915        dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3916
3917        /* only default iq and oq were initialized
3918         * initialize the rest as well
3919         */
3920        /* run port_config command for each port */
3921        oct->ifcount = num_nic_ports;
3922
3923        memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3924
3925        for (i = 0; i < MAX_OCTEON_LINKS; i++)
3926                oct->props[i].gmxport = -1;
3927
3928        retval = setup_nic_devices(oct);
3929        if (retval) {
3930                dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3931                goto octnet_init_failure;
3932        }
3933
3934        /* Call vf_rep_modinit if the firmware is switchdev capable
3935         * and do it from the first liquidio function probed.
3936         */
3937        if (!oct->octeon_id &&
3938            oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3939                retval = lio_vf_rep_modinit();
3940                if (retval) {
3941                        liquidio_stop_nic_module(oct);
3942                        goto octnet_init_failure;
3943                }
3944        }
3945
3946        liquidio_ptp_init(oct);
3947
3948        dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3949
3950        return retval;
3951
3952octnet_init_failure:
3953
3954        oct->ifcount = 0;
3955
3956        return retval;
3957}
3958
3959/**
3960 * \brief starter callback that invokes the remaining initialization work after
3961 * the NIC is up and running.
3962 * @param octptr  work struct work_struct
3963 */
3964static void nic_starter(struct work_struct *work)
3965{
3966        struct octeon_device *oct;
3967        struct cavium_wk *wk = (struct cavium_wk *)work;
3968
3969        oct = (struct octeon_device *)wk->ctxptr;
3970
3971        if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3972                return;
3973
3974        /* If the status of the device is CORE_OK, the core
3975         * application has reported its application type. Call
3976         * any registered handlers now and move to the RUNNING
3977         * state.
3978         */
3979        if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3980                schedule_delayed_work(&oct->nic_poll_work.work,
3981                                      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3982                return;
3983        }
3984
3985        atomic_set(&oct->status, OCT_DEV_RUNNING);
3986
3987        if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3988                dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3989
3990                if (liquidio_init_nic_module(oct))
3991                        dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3992                else
3993                        handshake[oct->octeon_id].started_ok = 1;
3994        } else {
3995                dev_err(&oct->pci_dev->dev,
3996                        "Unexpected application running on NIC (%d). Check firmware.\n",
3997                        oct->app_mode);
3998        }
3999
4000        complete(&handshake[oct->octeon_id].started);
4001}
4002
4003static int
4004octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4005{
4006        struct octeon_device *oct = (struct octeon_device *)buf;
4007        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4008        int i, notice, vf_idx;
4009        bool cores_crashed;
4010        u64 *data, vf_num;
4011
4012        notice = recv_pkt->rh.r.ossp;
4013        data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4014
4015        /* the first 64-bit word of data is the vf_num */
4016        vf_num = data[0];
4017        octeon_swap_8B_data(&vf_num, 1);
4018        vf_idx = (int)vf_num - 1;
4019
4020        cores_crashed = READ_ONCE(oct->cores_crashed);
4021
4022        if (notice == VF_DRV_LOADED) {
4023                if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4024                        oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4025                        dev_info(&oct->pci_dev->dev,
4026                                 "driver for VF%d was loaded\n", vf_idx);
4027                        if (!cores_crashed)
4028                                try_module_get(THIS_MODULE);
4029                }
4030        } else if (notice == VF_DRV_REMOVED) {
4031                if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4032                        oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4033                        dev_info(&oct->pci_dev->dev,
4034                                 "driver for VF%d was removed\n", vf_idx);
4035                        if (!cores_crashed)
4036                                module_put(THIS_MODULE);
4037                }
4038        } else if (notice == VF_DRV_MACADDR_CHANGED) {
4039                u8 *b = (u8 *)&data[1];
4040
4041                oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4042                dev_info(&oct->pci_dev->dev,
4043                         "VF driver changed VF%d's MAC address to %pM\n",
4044                         vf_idx, b + 2);
4045        }
4046
4047        for (i = 0; i < recv_pkt->buffer_count; i++)
4048                recv_buffer_free(recv_pkt->buffer_ptr[i]);
4049        octeon_free_recv_info(recv_info);
4050
4051        return 0;
4052}
4053
4054/**
4055 * \brief Device initialization for each Octeon device that is probed
4056 * @param octeon_dev  octeon device
4057 */
4058static int octeon_device_init(struct octeon_device *octeon_dev)
4059{
4060        int j, ret;
4061        char bootcmd[] = "\n";
4062        char *dbg_enb = NULL;
4063        enum lio_fw_state fw_state;
4064        struct octeon_device_priv *oct_priv =
4065                (struct octeon_device_priv *)octeon_dev->priv;
4066        atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4067
4068        /* Enable access to the octeon device and make its DMA capability
4069         * known to the OS.
4070         */
4071        if (octeon_pci_os_setup(octeon_dev))
4072                return 1;
4073
4074        atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4075
4076        /* Identify the Octeon type and map the BAR address space. */
4077        if (octeon_chip_specific_setup(octeon_dev)) {
4078                dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4079                return 1;
4080        }
4081
4082        atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4083
4084        /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4085         * since that is what is required for the reference to be removed
4086         * during de-initialization (see 'octeon_destroy_resources').
4087         */
4088        octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4089                               PCI_SLOT(octeon_dev->pci_dev->devfn),
4090                               PCI_FUNC(octeon_dev->pci_dev->devfn),
4091                               true);
4092
4093        octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4094
4095        /* CN23XX supports preloaded firmware if the following is true:
4096         *
4097         * The adapter indicates that firmware is currently running AND
4098         * 'fw_type' is 'auto'.
4099         *
4100         * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4101         */
4102        if (OCTEON_CN23XX_PF(octeon_dev) &&
4103            cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4104                atomic_cmpxchg(octeon_dev->adapter_fw_state,
4105                               FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4106        }
4107
4108        /* If loading firmware, only first device of adapter needs to do so. */
4109        fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4110                                  FW_NEEDS_TO_BE_LOADED,
4111                                  FW_IS_BEING_LOADED);
4112
4113        /* Here, [local variable] 'fw_state' is set to one of:
4114         *
4115         *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4116         *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4117         *                          firmware to the adapter.
4118         *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4119         *                          firmware to the adapter.
4120         */
4121
4122        /* Prior to f/w load, perform a soft reset of the Octeon device;
4123         * if error resetting, return w/error.
4124         */
4125        if (fw_state == FW_NEEDS_TO_BE_LOADED)
4126                if (octeon_dev->fn_list.soft_reset(octeon_dev))
4127                        return 1;
4128
4129        /* Initialize the dispatch mechanism used to push packets arriving on
4130         * Octeon Output queues.
4131         */
4132        if (octeon_init_dispatch_list(octeon_dev))
4133                return 1;
4134
4135        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4136                                    OPCODE_NIC_CORE_DRV_ACTIVE,
4137                                    octeon_core_drv_init,
4138                                    octeon_dev);
4139
4140        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4141                                    OPCODE_NIC_VF_DRV_NOTICE,
4142                                    octeon_recv_vf_drv_notice, octeon_dev);
4143        INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4144        octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4145        schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4146                              LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4147
4148        atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4149
4150        if (octeon_set_io_queues_off(octeon_dev)) {
4151                dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4152                return 1;
4153        }
4154
4155        if (OCTEON_CN23XX_PF(octeon_dev)) {
4156                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4157                if (ret) {
4158                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4159                        return ret;
4160                }
4161        }
4162
4163        /* Initialize soft command buffer pool
4164         */
4165        if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4166                dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4167                return 1;
4168        }
4169        atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4170
4171        /*  Setup the data structures that manage this Octeon's Input queues. */
4172        if (octeon_setup_instr_queues(octeon_dev)) {
4173                dev_err(&octeon_dev->pci_dev->dev,
4174                        "instruction queue initialization failed\n");
4175                return 1;
4176        }
4177        atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4178
4179        /* Initialize lists to manage the requests of different types that
4180         * arrive from user & kernel applications for this octeon device.
4181         */
4182        if (octeon_setup_response_list(octeon_dev)) {
4183                dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4184                return 1;
4185        }
4186        atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4187
4188        if (octeon_setup_output_queues(octeon_dev)) {
4189                dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4190                return 1;
4191        }
4192
4193        atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4194
4195        if (OCTEON_CN23XX_PF(octeon_dev)) {
4196                if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4197                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4198                        return 1;
4199                }
4200                atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4201
4202                if (octeon_allocate_ioq_vector
4203                                (octeon_dev,
4204                                 octeon_dev->sriov_info.num_pf_rings)) {
4205                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4206                        return 1;
4207                }
4208                atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4209
4210        } else {
4211                /* The input and output queue registers were setup earlier (the
4212                 * queues were not enabled). Any additional registers
4213                 * that need to be programmed should be done now.
4214                 */
4215                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4216                if (ret) {
4217                        dev_err(&octeon_dev->pci_dev->dev,
4218                                "Failed to configure device registers\n");
4219                        return ret;
4220                }
4221        }
4222
4223        /* Initialize the tasklet that handles output queue packet processing.*/
4224        dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4225        tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4226                     (unsigned long)octeon_dev);
4227
4228        /* Setup the interrupt handler and record the INT SUM register address
4229         */
4230        if (octeon_setup_interrupt(octeon_dev,
4231                                   octeon_dev->sriov_info.num_pf_rings))
4232                return 1;
4233
4234        /* Enable Octeon device interrupts */
4235        octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4236
4237        atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4238
4239        /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4240         * the output queue is enabled.
4241         * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4242         * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4243         * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4244         * before any credits have been issued, causing the ring to be reset
4245         * (and the f/w appear to never have started).
4246         */
4247        for (j = 0; j < octeon_dev->num_oqs; j++)
4248                writel(octeon_dev->droq[j]->max_count,
4249                       octeon_dev->droq[j]->pkts_credit_reg);
4250
4251        /* Enable the input and output queues for this Octeon device */
4252        ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4253        if (ret) {
4254                dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4255                return ret;
4256        }
4257
4258        atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4259
4260        if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4261                dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4262                if (!ddr_timeout) {
4263                        dev_info(&octeon_dev->pci_dev->dev,
4264                                 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4265                }
4266
4267                schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4268
4269                /* Wait for the octeon to initialize DDR after the soft-reset.*/
4270                while (!ddr_timeout) {
4271                        set_current_state(TASK_INTERRUPTIBLE);
4272                        if (schedule_timeout(HZ / 10)) {
4273                                /* user probably pressed Control-C */
4274                                return 1;
4275                        }
4276                }
4277                ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4278                if (ret) {
4279                        dev_err(&octeon_dev->pci_dev->dev,
4280                                "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4281                                ret);
4282                        return 1;
4283                }
4284
4285                if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4286                        dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4287                        return 1;
4288                }
4289
4290                /* Divert uboot to take commands from host instead. */
4291                ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4292
4293                dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4294                ret = octeon_init_consoles(octeon_dev);
4295                if (ret) {
4296                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4297                        return 1;
4298                }
4299                /* If console debug enabled, specify empty string to use default
4300                 * enablement ELSE specify NULL string for 'disabled'.
4301                 */
4302                dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4303                ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4304                if (ret) {
4305                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4306                        return 1;
4307                } else if (octeon_console_debug_enabled(0)) {
4308                        /* If console was added AND we're logging console output
4309                         * then set our console print function.
4310                         */
4311                        octeon_dev->console[0].print = octeon_dbg_console_print;
4312                }
4313
4314                atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4315
4316                dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4317                ret = load_firmware(octeon_dev);
4318                if (ret) {
4319                        dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4320                        return 1;
4321                }
4322
4323                atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4324        }
4325
4326        handshake[octeon_dev->octeon_id].init_ok = 1;
4327        complete(&handshake[octeon_dev->octeon_id].init);
4328
4329        atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4330
4331        return 0;
4332}
4333
4334/**
4335 * \brief Debug console print function
4336 * @param octeon_dev  octeon device
4337 * @param console_num console number
4338 * @param prefix      first portion of line to display
4339 * @param suffix      second portion of line to display
4340 *
4341 * The OCTEON debug console outputs entire lines (excluding '\n').
4342 * Normally, the line will be passed in the 'prefix' parameter.
4343 * However, due to buffering, it is possible for a line to be split into two
4344 * parts, in which case they will be passed as the 'prefix' parameter and
4345 * 'suffix' parameter.
4346 */
4347static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4348                                    char *prefix, char *suffix)
4349{
4350        if (prefix && suffix)
4351                dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4352                         suffix);
4353        else if (prefix)
4354                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4355        else if (suffix)
4356                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4357
4358        return 0;
4359}
4360
4361/**
4362 * \brief Exits the module
4363 */
4364static void __exit liquidio_exit(void)
4365{
4366        liquidio_deinit_pci();
4367
4368        pr_info("LiquidIO network module is now unloaded\n");
4369}
4370
4371module_init(liquidio_init);
4372module_exit(liquidio_exit);
4373