linux/drivers/net/ethernet/cavium/liquidio/lio_main.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/module.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <linux/firmware.h>
  22#include <net/vxlan.h>
  23#include <linux/kthread.h>
  24#include <net/switchdev.h>
  25#include "liquidio_common.h"
  26#include "octeon_droq.h"
  27#include "octeon_iq.h"
  28#include "response_manager.h"
  29#include "octeon_device.h"
  30#include "octeon_nic.h"
  31#include "octeon_main.h"
  32#include "octeon_network.h"
  33#include "cn66xx_regs.h"
  34#include "cn66xx_device.h"
  35#include "cn68xx_device.h"
  36#include "cn23xx_pf_device.h"
  37#include "liquidio_image.h"
  38#include "lio_vf_rep.h"
  39
  40MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  41MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
  42MODULE_LICENSE("GPL");
  43MODULE_VERSION(LIQUIDIO_VERSION);
  44MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
  45                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  46MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
  47                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  48MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
  49                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  50MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
  51                "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
  52
  53static int ddr_timeout = 10000;
  54module_param(ddr_timeout, int, 0644);
  55MODULE_PARM_DESC(ddr_timeout,
  56                 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
  57
  58#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  59
  60static int debug = -1;
  61module_param(debug, int, 0644);
  62MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  63
  64static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
  65module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
  66MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
  67
  68static u32 console_bitmask;
  69module_param(console_bitmask, int, 0644);
  70MODULE_PARM_DESC(console_bitmask,
  71                 "Bitmask indicating which consoles have debug output redirected to syslog.");
  72
  73/**
  74 * \brief determines if a given console has debug enabled.
  75 * @param console console to check
  76 * @returns  1 = enabled. 0 otherwise
  77 */
  78static int octeon_console_debug_enabled(u32 console)
  79{
  80        return (console_bitmask >> (console)) & 0x1;
  81}
  82
  83/* Polling interval for determining when NIC application is alive */
  84#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
  85
  86/* runtime link query interval */
  87#define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
  88/* update localtime to octeon firmware every 60 seconds.
  89 * make firmware to use same time reference, so that it will be easy to
  90 * correlate firmware logged events/errors with host events, for debugging.
  91 */
  92#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
  93
  94struct lio_trusted_vf_ctx {
  95        struct completion complete;
  96        int status;
  97};
  98
  99struct liquidio_rx_ctl_context {
 100        int octeon_id;
 101
 102        wait_queue_head_t wc;
 103
 104        int cond;
 105};
 106
 107struct oct_link_status_resp {
 108        u64 rh;
 109        struct oct_link_info link_info;
 110        u64 status;
 111};
 112
 113struct oct_timestamp_resp {
 114        u64 rh;
 115        u64 timestamp;
 116        u64 status;
 117};
 118
 119#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
 120
 121union tx_info {
 122        u64 u64;
 123        struct {
 124#ifdef __BIG_ENDIAN_BITFIELD
 125                u16 gso_size;
 126                u16 gso_segs;
 127                u32 reserved;
 128#else
 129                u32 reserved;
 130                u16 gso_segs;
 131                u16 gso_size;
 132#endif
 133        } s;
 134};
 135
 136/** Octeon device properties to be used by the NIC module.
 137 * Each octeon device in the system will be represented
 138 * by this structure in the NIC module.
 139 */
 140
 141#define OCTNIC_GSO_MAX_HEADER_SIZE 128
 142#define OCTNIC_GSO_MAX_SIZE                                                    \
 143        (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
 144
 145struct handshake {
 146        struct completion init;
 147        struct completion started;
 148        struct pci_dev *pci_dev;
 149        int init_ok;
 150        int started_ok;
 151};
 152
 153#ifdef CONFIG_PCI_IOV
 154static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
 155#endif
 156
 157static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
 158                                    char *prefix, char *suffix);
 159
 160static int octeon_device_init(struct octeon_device *);
 161static int liquidio_stop(struct net_device *netdev);
 162static void liquidio_remove(struct pci_dev *pdev);
 163static int liquidio_probe(struct pci_dev *pdev,
 164                          const struct pci_device_id *ent);
 165static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
 166                                      int linkstate);
 167
 168static struct handshake handshake[MAX_OCTEON_DEVICES];
 169static struct completion first_stage;
 170
 171static void octeon_droq_bh(unsigned long pdev)
 172{
 173        int q_no;
 174        int reschedule = 0;
 175        struct octeon_device *oct = (struct octeon_device *)pdev;
 176        struct octeon_device_priv *oct_priv =
 177                (struct octeon_device_priv *)oct->priv;
 178
 179        for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
 180                if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
 181                        continue;
 182                reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
 183                                                          MAX_PACKET_BUDGET);
 184                lio_enable_irq(oct->droq[q_no], NULL);
 185
 186                if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
 187                        /* set time and cnt interrupt thresholds for this DROQ
 188                         * for NAPI
 189                         */
 190                        int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
 191
 192                        octeon_write_csr64(
 193                            oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
 194                            0x5700000040ULL);
 195                        octeon_write_csr64(
 196                            oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
 197                }
 198        }
 199
 200        if (reschedule)
 201                tasklet_schedule(&oct_priv->droq_tasklet);
 202}
 203
 204static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 205{
 206        struct octeon_device_priv *oct_priv =
 207                (struct octeon_device_priv *)oct->priv;
 208        int retry = 100, pkt_cnt = 0, pending_pkts = 0;
 209        int i;
 210
 211        do {
 212                pending_pkts = 0;
 213
 214                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 215                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
 216                                continue;
 217                        pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
 218                }
 219                if (pkt_cnt > 0) {
 220                        pending_pkts += pkt_cnt;
 221                        tasklet_schedule(&oct_priv->droq_tasklet);
 222                }
 223                pkt_cnt = 0;
 224                schedule_timeout_uninterruptible(1);
 225
 226        } while (retry-- && pending_pkts);
 227
 228        return pkt_cnt;
 229}
 230
 231/**
 232 * \brief Forces all IO queues off on a given device
 233 * @param oct Pointer to Octeon device
 234 */
 235static void force_io_queues_off(struct octeon_device *oct)
 236{
 237        if ((oct->chip_id == OCTEON_CN66XX) ||
 238            (oct->chip_id == OCTEON_CN68XX)) {
 239                /* Reset the Enable bits for Input Queues. */
 240                octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
 241
 242                /* Reset the Enable bits for Output Queues. */
 243                octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
 244        }
 245}
 246
 247/**
 248 * \brief Cause device to go quiet so it can be safely removed/reset/etc
 249 * @param oct Pointer to Octeon device
 250 */
 251static inline void pcierror_quiesce_device(struct octeon_device *oct)
 252{
 253        int i;
 254
 255        /* Disable the input and output queues now. No more packets will
 256         * arrive from Octeon, but we should wait for all packet processing
 257         * to finish.
 258         */
 259        force_io_queues_off(oct);
 260
 261        /* To allow for in-flight requests */
 262        schedule_timeout_uninterruptible(100);
 263
 264        if (wait_for_pending_requests(oct))
 265                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 266
 267        /* Force all requests waiting to be fetched by OCTEON to complete. */
 268        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 269                struct octeon_instr_queue *iq;
 270
 271                if (!(oct->io_qmask.iq & BIT_ULL(i)))
 272                        continue;
 273                iq = oct->instr_queue[i];
 274
 275                if (atomic_read(&iq->instr_pending)) {
 276                        spin_lock_bh(&iq->lock);
 277                        iq->fill_cnt = 0;
 278                        iq->octeon_read_index = iq->host_write_index;
 279                        iq->stats.instr_processed +=
 280                                atomic_read(&iq->instr_pending);
 281                        lio_process_iq_request_list(oct, iq, 0);
 282                        spin_unlock_bh(&iq->lock);
 283                }
 284        }
 285
 286        /* Force all pending ordered list requests to time out. */
 287        lio_process_ordered_list(oct, 1);
 288
 289        /* We do not need to wait for output queue packets to be processed. */
 290}
 291
 292/**
 293 * \brief Cleanup PCI AER uncorrectable error status
 294 * @param dev Pointer to PCI device
 295 */
 296static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 297{
 298        int pos = 0x100;
 299        u32 status, mask;
 300
 301        pr_info("%s :\n", __func__);
 302
 303        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 304        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 305        if (dev->error_state == pci_channel_io_normal)
 306                status &= ~mask;        /* Clear corresponding nonfatal bits */
 307        else
 308                status &= mask;         /* Clear corresponding fatal bits */
 309        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 310}
 311
 312/**
 313 * \brief Stop all PCI IO to a given device
 314 * @param dev Pointer to Octeon device
 315 */
 316static void stop_pci_io(struct octeon_device *oct)
 317{
 318        /* No more instructions will be forwarded. */
 319        atomic_set(&oct->status, OCT_DEV_IN_RESET);
 320
 321        pci_disable_device(oct->pci_dev);
 322
 323        /* Disable interrupts  */
 324        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 325
 326        pcierror_quiesce_device(oct);
 327
 328        /* Release the interrupt line */
 329        free_irq(oct->pci_dev->irq, oct);
 330
 331        if (oct->flags & LIO_FLAG_MSI_ENABLED)
 332                pci_disable_msi(oct->pci_dev);
 333
 334        dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 335                lio_get_state_string(&oct->status));
 336
 337        /* making it a common function for all OCTEON models */
 338        cleanup_aer_uncorrect_error_status(oct->pci_dev);
 339}
 340
 341/**
 342 * \brief called when PCI error is detected
 343 * @param pdev Pointer to PCI device
 344 * @param state The current pci connection state
 345 *
 346 * This function is called after a PCI bus error affecting
 347 * this device has been detected.
 348 */
 349static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
 350                                                     pci_channel_state_t state)
 351{
 352        struct octeon_device *oct = pci_get_drvdata(pdev);
 353
 354        /* Non-correctable Non-fatal errors */
 355        if (state == pci_channel_io_normal) {
 356                dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
 357                cleanup_aer_uncorrect_error_status(oct->pci_dev);
 358                return PCI_ERS_RESULT_CAN_RECOVER;
 359        }
 360
 361        /* Non-correctable Fatal errors */
 362        dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
 363        stop_pci_io(oct);
 364
 365        /* Always return a DISCONNECT. There is no support for recovery but only
 366         * for a clean shutdown.
 367         */
 368        return PCI_ERS_RESULT_DISCONNECT;
 369}
 370
 371/**
 372 * \brief mmio handler
 373 * @param pdev Pointer to PCI device
 374 */
 375static pci_ers_result_t liquidio_pcie_mmio_enabled(
 376                                struct pci_dev *pdev __attribute__((unused)))
 377{
 378        /* We should never hit this since we never ask for a reset for a Fatal
 379         * Error. We always return DISCONNECT in io_error above.
 380         * But play safe and return RECOVERED for now.
 381         */
 382        return PCI_ERS_RESULT_RECOVERED;
 383}
 384
 385/**
 386 * \brief called after the pci bus has been reset.
 387 * @param pdev Pointer to PCI device
 388 *
 389 * Restart the card from scratch, as if from a cold-boot. Implementation
 390 * resembles the first-half of the octeon_resume routine.
 391 */
 392static pci_ers_result_t liquidio_pcie_slot_reset(
 393                                struct pci_dev *pdev __attribute__((unused)))
 394{
 395        /* We should never hit this since we never ask for a reset for a Fatal
 396         * Error. We always return DISCONNECT in io_error above.
 397         * But play safe and return RECOVERED for now.
 398         */
 399        return PCI_ERS_RESULT_RECOVERED;
 400}
 401
 402/**
 403 * \brief called when traffic can start flowing again.
 404 * @param pdev Pointer to PCI device
 405 *
 406 * This callback is called when the error recovery driver tells us that
 407 * its OK to resume normal operation. Implementation resembles the
 408 * second-half of the octeon_resume routine.
 409 */
 410static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
 411{
 412        /* Nothing to be done here. */
 413}
 414
 415#ifdef CONFIG_PM
 416/**
 417 * \brief called when suspending
 418 * @param pdev Pointer to PCI device
 419 * @param state state to suspend to
 420 */
 421static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
 422                            pm_message_t state __attribute__((unused)))
 423{
 424        return 0;
 425}
 426
 427/**
 428 * \brief called when resuming
 429 * @param pdev Pointer to PCI device
 430 */
 431static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
 432{
 433        return 0;
 434}
 435#endif
 436
 437/* For PCI-E Advanced Error Recovery (AER) Interface */
 438static const struct pci_error_handlers liquidio_err_handler = {
 439        .error_detected = liquidio_pcie_error_detected,
 440        .mmio_enabled   = liquidio_pcie_mmio_enabled,
 441        .slot_reset     = liquidio_pcie_slot_reset,
 442        .resume         = liquidio_pcie_resume,
 443};
 444
 445static const struct pci_device_id liquidio_pci_tbl[] = {
 446        {       /* 68xx */
 447                PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 448        },
 449        {       /* 66xx */
 450                PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 451        },
 452        {       /* 23xx pf */
 453                PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 454        },
 455        {
 456                0, 0, 0, 0, 0, 0, 0
 457        }
 458};
 459MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
 460
 461static struct pci_driver liquidio_pci_driver = {
 462        .name           = "LiquidIO",
 463        .id_table       = liquidio_pci_tbl,
 464        .probe          = liquidio_probe,
 465        .remove         = liquidio_remove,
 466        .err_handler    = &liquidio_err_handler,    /* For AER */
 467
 468#ifdef CONFIG_PM
 469        .suspend        = liquidio_suspend,
 470        .resume         = liquidio_resume,
 471#endif
 472#ifdef CONFIG_PCI_IOV
 473        .sriov_configure = liquidio_enable_sriov,
 474#endif
 475};
 476
 477/**
 478 * \brief register PCI driver
 479 */
 480static int liquidio_init_pci(void)
 481{
 482        return pci_register_driver(&liquidio_pci_driver);
 483}
 484
 485/**
 486 * \brief unregister PCI driver
 487 */
 488static void liquidio_deinit_pci(void)
 489{
 490        pci_unregister_driver(&liquidio_pci_driver);
 491}
 492
 493/**
 494 * \brief Check Tx queue status, and take appropriate action
 495 * @param lio per-network private data
 496 * @returns 0 if full, number of queues woken up otherwise
 497 */
 498static inline int check_txq_status(struct lio *lio)
 499{
 500        int numqs = lio->netdev->real_num_tx_queues;
 501        int ret_val = 0;
 502        int q, iq;
 503
 504        /* check each sub-queue state */
 505        for (q = 0; q < numqs; q++) {
 506                iq = lio->linfo.txpciq[q %
 507                        lio->oct_dev->num_iqs].s.q_no;
 508                if (octnet_iq_is_full(lio->oct_dev, iq))
 509                        continue;
 510                if (__netif_subqueue_stopped(lio->netdev, q)) {
 511                        netif_wake_subqueue(lio->netdev, q);
 512                        INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
 513                                                  tx_restart, 1);
 514                        ret_val++;
 515                }
 516        }
 517
 518        return ret_val;
 519}
 520
 521/**
 522 * \brief Print link information
 523 * @param netdev network device
 524 */
 525static void print_link_info(struct net_device *netdev)
 526{
 527        struct lio *lio = GET_LIO(netdev);
 528
 529        if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
 530            ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
 531                struct oct_link_info *linfo = &lio->linfo;
 532
 533                if (linfo->link.s.link_up) {
 534                        netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
 535                                   linfo->link.s.speed,
 536                                   (linfo->link.s.duplex) ? "Full" : "Half");
 537                } else {
 538                        netif_info(lio, link, lio->netdev, "Link Down\n");
 539                }
 540        }
 541}
 542
 543/**
 544 * \brief Routine to notify MTU change
 545 * @param work work_struct data structure
 546 */
 547static void octnet_link_status_change(struct work_struct *work)
 548{
 549        struct cavium_wk *wk = (struct cavium_wk *)work;
 550        struct lio *lio = (struct lio *)wk->ctxptr;
 551
 552        /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
 553         * this API is invoked only when new max-MTU of the interface is
 554         * less than current MTU.
 555         */
 556        rtnl_lock();
 557        dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
 558        rtnl_unlock();
 559}
 560
 561/**
 562 * \brief Sets up the mtu status change work
 563 * @param netdev network device
 564 */
 565static inline int setup_link_status_change_wq(struct net_device *netdev)
 566{
 567        struct lio *lio = GET_LIO(netdev);
 568        struct octeon_device *oct = lio->oct_dev;
 569
 570        lio->link_status_wq.wq = alloc_workqueue("link-status",
 571                                                 WQ_MEM_RECLAIM, 0);
 572        if (!lio->link_status_wq.wq) {
 573                dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
 574                return -1;
 575        }
 576        INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
 577                          octnet_link_status_change);
 578        lio->link_status_wq.wk.ctxptr = lio;
 579
 580        return 0;
 581}
 582
 583static inline void cleanup_link_status_change_wq(struct net_device *netdev)
 584{
 585        struct lio *lio = GET_LIO(netdev);
 586
 587        if (lio->link_status_wq.wq) {
 588                cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
 589                destroy_workqueue(lio->link_status_wq.wq);
 590        }
 591}
 592
 593/**
 594 * \brief Update link status
 595 * @param netdev network device
 596 * @param ls link status structure
 597 *
 598 * Called on receipt of a link status response from the core application to
 599 * update each interface's link status.
 600 */
 601static inline void update_link_status(struct net_device *netdev,
 602                                      union oct_link_status *ls)
 603{
 604        struct lio *lio = GET_LIO(netdev);
 605        int changed = (lio->linfo.link.u64 != ls->u64);
 606        int current_max_mtu = lio->linfo.link.s.mtu;
 607        struct octeon_device *oct = lio->oct_dev;
 608
 609        dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
 610                __func__, lio->linfo.link.u64, ls->u64);
 611        lio->linfo.link.u64 = ls->u64;
 612
 613        if ((lio->intf_open) && (changed)) {
 614                print_link_info(netdev);
 615                lio->link_changes++;
 616
 617                if (lio->linfo.link.s.link_up) {
 618                        dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
 619                        netif_carrier_on(netdev);
 620                        wake_txqs(netdev);
 621                } else {
 622                        dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
 623                        netif_carrier_off(netdev);
 624                        stop_txqs(netdev);
 625                }
 626                if (lio->linfo.link.s.mtu != current_max_mtu) {
 627                        netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
 628                                   current_max_mtu, lio->linfo.link.s.mtu);
 629                        netdev->extended->max_mtu = lio->linfo.link.s.mtu;
 630                }
 631                if (lio->linfo.link.s.mtu < netdev->mtu) {
 632                        dev_warn(&oct->pci_dev->dev,
 633                                 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
 634                                     netdev->mtu, lio->linfo.link.s.mtu);
 635                        queue_delayed_work(lio->link_status_wq.wq,
 636                                           &lio->link_status_wq.wk.work, 0);
 637                }
 638        }
 639}
 640
 641/**
 642 * lio_sync_octeon_time_cb - callback that is invoked when soft command
 643 * sent by lio_sync_octeon_time() has completed successfully or failed
 644 *
 645 * @oct - octeon device structure
 646 * @status - indicates success or failure
 647 * @buf - pointer to the command that was sent to firmware
 648 **/
 649static void lio_sync_octeon_time_cb(struct octeon_device *oct,
 650                                    u32 status, void *buf)
 651{
 652        struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
 653
 654        if (status)
 655                dev_err(&oct->pci_dev->dev,
 656                        "Failed to sync time to octeon; error=%d\n", status);
 657
 658        octeon_free_soft_command(oct, sc);
 659}
 660
 661/**
 662 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
 663 * firmware will correct it's time, in case there is a time skew
 664 *
 665 * @work: work scheduled to send time update to octeon firmware
 666 **/
 667static void lio_sync_octeon_time(struct work_struct *work)
 668{
 669        struct cavium_wk *wk = (struct cavium_wk *)work;
 670        struct lio *lio = (struct lio *)wk->ctxptr;
 671        struct octeon_device *oct = lio->oct_dev;
 672        struct octeon_soft_command *sc;
 673        struct timespec64 ts;
 674        struct lio_time *lt;
 675        int ret;
 676
 677        sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
 678        if (!sc) {
 679                dev_err(&oct->pci_dev->dev,
 680                        "Failed to sync time to octeon: soft command allocation failed\n");
 681                return;
 682        }
 683
 684        lt = (struct lio_time *)sc->virtdptr;
 685
 686        /* Get time of the day */
 687        getnstimeofday64(&ts);
 688        lt->sec = ts.tv_sec;
 689        lt->nsec = ts.tv_nsec;
 690        octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
 691
 692        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 693        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 694                                    OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
 695
 696        sc->callback = lio_sync_octeon_time_cb;
 697        sc->callback_arg = sc;
 698        sc->wait_time = 1000;
 699
 700        ret = octeon_send_soft_command(oct, sc);
 701        if (ret == IQ_SEND_FAILED) {
 702                dev_err(&oct->pci_dev->dev,
 703                        "Failed to sync time to octeon: failed to send soft command\n");
 704                octeon_free_soft_command(oct, sc);
 705        }
 706
 707        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 708                           &lio->sync_octeon_time_wq.wk.work,
 709                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 710}
 711
 712/**
 713 * setup_sync_octeon_time_wq - Sets up the work to periodically update
 714 * local time to octeon firmware
 715 *
 716 * @netdev - network device which should send time update to firmware
 717 **/
 718static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
 719{
 720        struct lio *lio = GET_LIO(netdev);
 721        struct octeon_device *oct = lio->oct_dev;
 722
 723        lio->sync_octeon_time_wq.wq =
 724                alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
 725        if (!lio->sync_octeon_time_wq.wq) {
 726                dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
 727                return -1;
 728        }
 729        INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
 730                          lio_sync_octeon_time);
 731        lio->sync_octeon_time_wq.wk.ctxptr = lio;
 732        queue_delayed_work(lio->sync_octeon_time_wq.wq,
 733                           &lio->sync_octeon_time_wq.wk.work,
 734                           msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
 735
 736        return 0;
 737}
 738
 739/**
 740 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
 741 * to periodically update local time to octeon firmware
 742 *
 743 * @netdev - network device which should send time update to firmware
 744 **/
 745static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
 746{
 747        struct lio *lio = GET_LIO(netdev);
 748        struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
 749
 750        if (time_wq->wq) {
 751                cancel_delayed_work_sync(&time_wq->wk.work);
 752                destroy_workqueue(time_wq->wq);
 753        }
 754}
 755
 756static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
 757{
 758        struct octeon_device *other_oct;
 759
 760        other_oct = lio_get_device(oct->octeon_id + 1);
 761
 762        if (other_oct && other_oct->pci_dev) {
 763                int oct_busnum, other_oct_busnum;
 764
 765                oct_busnum = oct->pci_dev->bus->number;
 766                other_oct_busnum = other_oct->pci_dev->bus->number;
 767
 768                if (oct_busnum == other_oct_busnum) {
 769                        int oct_slot, other_oct_slot;
 770
 771                        oct_slot = PCI_SLOT(oct->pci_dev->devfn);
 772                        other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
 773
 774                        if (oct_slot == other_oct_slot)
 775                                return other_oct;
 776                }
 777        }
 778
 779        return NULL;
 780}
 781
 782static void disable_all_vf_links(struct octeon_device *oct)
 783{
 784        struct net_device *netdev;
 785        int max_vfs, vf, i;
 786
 787        if (!oct)
 788                return;
 789
 790        max_vfs = oct->sriov_info.max_vfs;
 791
 792        for (i = 0; i < oct->ifcount; i++) {
 793                netdev = oct->props[i].netdev;
 794                if (!netdev)
 795                        continue;
 796
 797                for (vf = 0; vf < max_vfs; vf++)
 798                        liquidio_set_vf_link_state(netdev, vf,
 799                                                   IFLA_VF_LINK_STATE_DISABLE);
 800        }
 801}
 802
 803static int liquidio_watchdog(void *param)
 804{
 805        bool err_msg_was_printed[LIO_MAX_CORES];
 806        u16 mask_of_crashed_or_stuck_cores = 0;
 807        bool all_vf_links_are_disabled = false;
 808        struct octeon_device *oct = param;
 809        struct octeon_device *other_oct;
 810#ifdef CONFIG_MODULE_UNLOAD
 811        long refcount, vfs_referencing_pf;
 812        u64 vfs_mask1, vfs_mask2;
 813#endif
 814        int core;
 815
 816        memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
 817
 818        while (!kthread_should_stop()) {
 819                /* sleep for a couple of seconds so that we don't hog the CPU */
 820                set_current_state(TASK_INTERRUPTIBLE);
 821                schedule_timeout(msecs_to_jiffies(2000));
 822
 823                mask_of_crashed_or_stuck_cores =
 824                    (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
 825
 826                if (!mask_of_crashed_or_stuck_cores)
 827                        continue;
 828
 829                WRITE_ONCE(oct->cores_crashed, true);
 830                other_oct = get_other_octeon_device(oct);
 831                if (other_oct)
 832                        WRITE_ONCE(other_oct->cores_crashed, true);
 833
 834                for (core = 0; core < LIO_MAX_CORES; core++) {
 835                        bool core_crashed_or_got_stuck;
 836
 837                        core_crashed_or_got_stuck =
 838                                                (mask_of_crashed_or_stuck_cores
 839                                                 >> core) & 1;
 840
 841                        if (core_crashed_or_got_stuck &&
 842                            !err_msg_was_printed[core]) {
 843                                dev_err(&oct->pci_dev->dev,
 844                                        "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
 845                                        core);
 846                                err_msg_was_printed[core] = true;
 847                        }
 848                }
 849
 850                if (all_vf_links_are_disabled)
 851                        continue;
 852
 853                disable_all_vf_links(oct);
 854                disable_all_vf_links(other_oct);
 855                all_vf_links_are_disabled = true;
 856
 857#ifdef CONFIG_MODULE_UNLOAD
 858                vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
 859                vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
 860
 861                vfs_referencing_pf  = hweight64(vfs_mask1);
 862                vfs_referencing_pf += hweight64(vfs_mask2);
 863
 864                refcount = module_refcount(THIS_MODULE);
 865                if (refcount >= vfs_referencing_pf) {
 866                        while (vfs_referencing_pf) {
 867                                module_put(THIS_MODULE);
 868                                vfs_referencing_pf--;
 869                        }
 870                }
 871#endif
 872        }
 873
 874        return 0;
 875}
 876
 877/**
 878 * \brief PCI probe handler
 879 * @param pdev PCI device structure
 880 * @param ent unused
 881 */
 882static int
 883liquidio_probe(struct pci_dev *pdev,
 884               const struct pci_device_id *ent __attribute__((unused)))
 885{
 886        struct octeon_device *oct_dev = NULL;
 887        struct handshake *hs;
 888
 889        oct_dev = octeon_allocate_device(pdev->device,
 890                                         sizeof(struct octeon_device_priv));
 891        if (!oct_dev) {
 892                dev_err(&pdev->dev, "Unable to allocate device\n");
 893                return -ENOMEM;
 894        }
 895
 896        if (pdev->device == OCTEON_CN23XX_PF_VID)
 897                oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
 898
 899        /* Enable PTP for 6XXX Device */
 900        if (((pdev->device == OCTEON_CN66XX) ||
 901             (pdev->device == OCTEON_CN68XX)))
 902                oct_dev->ptp_enable = true;
 903        else
 904                oct_dev->ptp_enable = false;
 905
 906        dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 907                 (u32)pdev->vendor, (u32)pdev->device);
 908
 909        /* Assign octeon_device for this device to the private data area. */
 910        pci_set_drvdata(pdev, oct_dev);
 911
 912        /* set linux specific device pointer */
 913        oct_dev->pci_dev = (void *)pdev;
 914
 915        oct_dev->subsystem_id = pdev->subsystem_vendor |
 916                (pdev->subsystem_device << 16);
 917
 918        hs = &handshake[oct_dev->octeon_id];
 919        init_completion(&hs->init);
 920        init_completion(&hs->started);
 921        hs->pci_dev = pdev;
 922
 923        if (oct_dev->octeon_id == 0)
 924                /* first LiquidIO NIC is detected */
 925                complete(&first_stage);
 926
 927        if (octeon_device_init(oct_dev)) {
 928                complete(&hs->init);
 929                liquidio_remove(pdev);
 930                return -ENOMEM;
 931        }
 932
 933        if (OCTEON_CN23XX_PF(oct_dev)) {
 934                u8 bus, device, function;
 935
 936                if (atomic_read(oct_dev->adapter_refcount) == 1) {
 937                        /* Each NIC gets one watchdog kernel thread.  The first
 938                         * PF (of each NIC) that gets pci_driver->probe()'d
 939                         * creates that thread.
 940                         */
 941                        bus = pdev->bus->number;
 942                        device = PCI_SLOT(pdev->devfn);
 943                        function = PCI_FUNC(pdev->devfn);
 944                        oct_dev->watchdog_task = kthread_create(
 945                            liquidio_watchdog, oct_dev,
 946                            "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
 947                        if (!IS_ERR(oct_dev->watchdog_task)) {
 948                                wake_up_process(oct_dev->watchdog_task);
 949                        } else {
 950                                oct_dev->watchdog_task = NULL;
 951                                dev_err(&oct_dev->pci_dev->dev,
 952                                        "failed to create kernel_thread\n");
 953                                liquidio_remove(pdev);
 954                                return -1;
 955                        }
 956                }
 957        }
 958
 959        oct_dev->rx_pause = 1;
 960        oct_dev->tx_pause = 1;
 961
 962        dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
 963
 964        return 0;
 965}
 966
 967static bool fw_type_is_auto(void)
 968{
 969        return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
 970                       sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
 971}
 972
 973/**
 974 * \brief PCI FLR for each Octeon device.
 975 * @param oct octeon device
 976 */
 977static void octeon_pci_flr(struct octeon_device *oct)
 978{
 979        int rc;
 980
 981        pci_save_state(oct->pci_dev);
 982
 983        pci_cfg_access_lock(oct->pci_dev);
 984
 985        /* Quiesce the device completely */
 986        pci_write_config_word(oct->pci_dev, PCI_COMMAND,
 987                              PCI_COMMAND_INTX_DISABLE);
 988
 989        rc = __pci_reset_function_locked(oct->pci_dev);
 990
 991        if (rc != 0)
 992                dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
 993                        rc, oct->pf_num);
 994
 995        pci_cfg_access_unlock(oct->pci_dev);
 996
 997        pci_restore_state(oct->pci_dev);
 998}
 999
1000/**
1001 *\brief Destroy resources associated with octeon device
1002 * @param pdev PCI device structure
1003 * @param ent unused
1004 */
1005static void octeon_destroy_resources(struct octeon_device *oct)
1006{
1007        int i, refcount;
1008        struct msix_entry *msix_entries;
1009        struct octeon_device_priv *oct_priv =
1010                (struct octeon_device_priv *)oct->priv;
1011
1012        struct handshake *hs;
1013
1014        switch (atomic_read(&oct->status)) {
1015        case OCT_DEV_RUNNING:
1016        case OCT_DEV_CORE_OK:
1017
1018                /* No more instructions will be forwarded. */
1019                atomic_set(&oct->status, OCT_DEV_IN_RESET);
1020
1021                oct->app_mode = CVM_DRV_INVALID_APP;
1022                dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1023                        lio_get_state_string(&oct->status));
1024
1025                schedule_timeout_uninterruptible(HZ / 10);
1026
1027                /* fallthrough */
1028        case OCT_DEV_HOST_OK:
1029
1030                /* fallthrough */
1031        case OCT_DEV_CONSOLE_INIT_DONE:
1032                /* Remove any consoles */
1033                octeon_remove_consoles(oct);
1034
1035                /* fallthrough */
1036        case OCT_DEV_IO_QUEUES_DONE:
1037                if (wait_for_pending_requests(oct))
1038                        dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1039
1040                if (lio_wait_for_instr_fetch(oct))
1041                        dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1042
1043                /* Disable the input and output queues now. No more packets will
1044                 * arrive from Octeon, but we should wait for all packet
1045                 * processing to finish.
1046                 */
1047                oct->fn_list.disable_io_queues(oct);
1048
1049                if (lio_wait_for_oq_pkts(oct))
1050                        dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1051
1052        /* fallthrough */
1053        case OCT_DEV_INTR_SET_DONE:
1054                /* Disable interrupts  */
1055                oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1056
1057                if (oct->msix_on) {
1058                        msix_entries = (struct msix_entry *)oct->msix_entries;
1059                        for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1060                                if (oct->ioq_vector[i].vector) {
1061                                        /* clear the affinity_cpumask */
1062                                        irq_set_affinity_hint(
1063                                                        msix_entries[i].vector,
1064                                                        NULL);
1065                                        free_irq(msix_entries[i].vector,
1066                                                 &oct->ioq_vector[i]);
1067                                        oct->ioq_vector[i].vector = 0;
1068                                }
1069                        }
1070                        /* non-iov vector's argument is oct struct */
1071                        free_irq(msix_entries[i].vector, oct);
1072
1073                        pci_disable_msix(oct->pci_dev);
1074                        kfree(oct->msix_entries);
1075                        oct->msix_entries = NULL;
1076                } else {
1077                        /* Release the interrupt line */
1078                        free_irq(oct->pci_dev->irq, oct);
1079
1080                        if (oct->flags & LIO_FLAG_MSI_ENABLED)
1081                                pci_disable_msi(oct->pci_dev);
1082                }
1083
1084                kfree(oct->irq_name_storage);
1085                oct->irq_name_storage = NULL;
1086
1087        /* fallthrough */
1088        case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1089                if (OCTEON_CN23XX_PF(oct))
1090                        octeon_free_ioq_vector(oct);
1091
1092        /* fallthrough */
1093        case OCT_DEV_MBOX_SETUP_DONE:
1094                if (OCTEON_CN23XX_PF(oct))
1095                        oct->fn_list.free_mbox(oct);
1096
1097        /* fallthrough */
1098        case OCT_DEV_IN_RESET:
1099        case OCT_DEV_DROQ_INIT_DONE:
1100                /* Wait for any pending operations */
1101                mdelay(100);
1102                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1103                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
1104                                continue;
1105                        octeon_delete_droq(oct, i);
1106                }
1107
1108                /* Force any pending handshakes to complete */
1109                for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1110                        hs = &handshake[i];
1111
1112                        if (hs->pci_dev) {
1113                                handshake[oct->octeon_id].init_ok = 0;
1114                                complete(&handshake[oct->octeon_id].init);
1115                                handshake[oct->octeon_id].started_ok = 0;
1116                                complete(&handshake[oct->octeon_id].started);
1117                        }
1118                }
1119
1120                /* fallthrough */
1121        case OCT_DEV_RESP_LIST_INIT_DONE:
1122                octeon_delete_response_list(oct);
1123
1124                /* fallthrough */
1125        case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1126                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1127                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
1128                                continue;
1129                        octeon_delete_instr_queue(oct, i);
1130                }
1131#ifdef CONFIG_PCI_IOV
1132                if (oct->sriov_info.sriov_enabled)
1133                        pci_disable_sriov(oct->pci_dev);
1134#endif
1135                /* fallthrough */
1136        case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1137                octeon_free_sc_buffer_pool(oct);
1138
1139                /* fallthrough */
1140        case OCT_DEV_DISPATCH_INIT_DONE:
1141                octeon_delete_dispatch_list(oct);
1142                cancel_delayed_work_sync(&oct->nic_poll_work.work);
1143
1144                /* fallthrough */
1145        case OCT_DEV_PCI_MAP_DONE:
1146                refcount = octeon_deregister_device(oct);
1147
1148                /* Soft reset the octeon device before exiting.
1149                 * However, if fw was loaded from card (i.e. autoboot),
1150                 * perform an FLR instead.
1151                 * Implementation note: only soft-reset the device
1152                 * if it is a CN6XXX OR the LAST CN23XX device.
1153                 */
1154                if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1155                        octeon_pci_flr(oct);
1156                else if (OCTEON_CN6XXX(oct) || !refcount)
1157                        oct->fn_list.soft_reset(oct);
1158
1159                octeon_unmap_pci_barx(oct, 0);
1160                octeon_unmap_pci_barx(oct, 1);
1161
1162                /* fallthrough */
1163        case OCT_DEV_PCI_ENABLE_DONE:
1164                pci_clear_master(oct->pci_dev);
1165                /* Disable the device, releasing the PCI INT */
1166                pci_disable_device(oct->pci_dev);
1167
1168                /* fallthrough */
1169        case OCT_DEV_BEGIN_STATE:
1170                /* Nothing to be done here either */
1171                break;
1172        }                       /* end switch (oct->status) */
1173
1174        tasklet_kill(&oct_priv->droq_tasklet);
1175}
1176
1177/**
1178 * \brief Callback for rx ctrl
1179 * @param status status of request
1180 * @param buf pointer to resp structure
1181 */
1182static void rx_ctl_callback(struct octeon_device *oct,
1183                            u32 status,
1184                            void *buf)
1185{
1186        struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1187        struct liquidio_rx_ctl_context *ctx;
1188
1189        ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1190
1191        oct = lio_get_device(ctx->octeon_id);
1192        if (status)
1193                dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1194                        CVM_CAST64(status));
1195        WRITE_ONCE(ctx->cond, 1);
1196
1197        /* This barrier is required to be sure that the response has been
1198         * written fully before waking up the handler
1199         */
1200        wmb();
1201
1202        wake_up_interruptible(&ctx->wc);
1203}
1204
1205/**
1206 * \brief Send Rx control command
1207 * @param lio per-network private data
1208 * @param start_stop whether to start or stop
1209 */
1210static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1211{
1212        struct octeon_soft_command *sc;
1213        struct liquidio_rx_ctl_context *ctx;
1214        union octnet_cmd *ncmd;
1215        int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1216        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1217        int retval;
1218
1219        if (oct->props[lio->ifidx].rx_on == start_stop)
1220                return;
1221
1222        sc = (struct octeon_soft_command *)
1223                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1224                                          16, ctx_size);
1225
1226        ncmd = (union octnet_cmd *)sc->virtdptr;
1227        ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1228
1229        WRITE_ONCE(ctx->cond, 0);
1230        ctx->octeon_id = lio_get_device_id(oct);
1231        init_waitqueue_head(&ctx->wc);
1232
1233        ncmd->u64 = 0;
1234        ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1235        ncmd->s.param1 = start_stop;
1236
1237        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1238
1239        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1240
1241        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1242                                    OPCODE_NIC_CMD, 0, 0, 0);
1243
1244        sc->callback = rx_ctl_callback;
1245        sc->callback_arg = sc;
1246        sc->wait_time = 5000;
1247
1248        retval = octeon_send_soft_command(oct, sc);
1249        if (retval == IQ_SEND_FAILED) {
1250                netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1251        } else {
1252                /* Sleep on a wait queue till the cond flag indicates that the
1253                 * response arrived or timed-out.
1254                 */
1255                if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1256                        return;
1257                oct->props[lio->ifidx].rx_on = start_stop;
1258        }
1259
1260        octeon_free_soft_command(oct, sc);
1261}
1262
1263/**
1264 * \brief Destroy NIC device interface
1265 * @param oct octeon device
1266 * @param ifidx which interface to destroy
1267 *
1268 * Cleanup associated with each interface for an Octeon device  when NIC
1269 * module is being unloaded or if initialization fails during load.
1270 */
1271static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1272{
1273        struct net_device *netdev = oct->props[ifidx].netdev;
1274        struct lio *lio;
1275        struct napi_struct *napi, *n;
1276
1277        if (!netdev) {
1278                dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1279                        __func__, ifidx);
1280                return;
1281        }
1282
1283        lio = GET_LIO(netdev);
1284
1285        dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1286
1287        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1288                liquidio_stop(netdev);
1289
1290        if (oct->props[lio->ifidx].napi_enabled == 1) {
1291                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1292                        napi_disable(napi);
1293
1294                oct->props[lio->ifidx].napi_enabled = 0;
1295
1296                if (OCTEON_CN23XX_PF(oct))
1297                        oct->droq[0]->ops.poll_mode = 0;
1298        }
1299
1300        /* Delete NAPI */
1301        list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1302                netif_napi_del(napi);
1303
1304        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1305                unregister_netdev(netdev);
1306
1307        cleanup_sync_octeon_time_wq(netdev);
1308        cleanup_link_status_change_wq(netdev);
1309
1310        cleanup_rx_oom_poll_fn(netdev);
1311
1312        lio_delete_glists(lio);
1313
1314        free_netdev(netdev);
1315
1316        oct->props[ifidx].gmxport = -1;
1317
1318        oct->props[ifidx].netdev = NULL;
1319}
1320
1321/**
1322 * \brief Stop complete NIC functionality
1323 * @param oct octeon device
1324 */
1325static int liquidio_stop_nic_module(struct octeon_device *oct)
1326{
1327        int i, j;
1328        struct lio *lio;
1329
1330        dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1331        if (!oct->ifcount) {
1332                dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1333                return 1;
1334        }
1335
1336        spin_lock_bh(&oct->cmd_resp_wqlock);
1337        oct->cmd_resp_state = OCT_DRV_OFFLINE;
1338        spin_unlock_bh(&oct->cmd_resp_wqlock);
1339
1340        lio_vf_rep_destroy(oct);
1341
1342        for (i = 0; i < oct->ifcount; i++) {
1343                lio = GET_LIO(oct->props[i].netdev);
1344                for (j = 0; j < oct->num_oqs; j++)
1345                        octeon_unregister_droq_ops(oct,
1346                                                   lio->linfo.rxpciq[j].s.q_no);
1347        }
1348
1349        for (i = 0; i < oct->ifcount; i++)
1350                liquidio_destroy_nic_device(oct, i);
1351
1352        if (oct->devlink) {
1353                devlink_unregister(oct->devlink);
1354                devlink_free(oct->devlink);
1355                oct->devlink = NULL;
1356        }
1357
1358        dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1359        return 0;
1360}
1361
1362/**
1363 * \brief Cleans up resources at unload time
1364 * @param pdev PCI device structure
1365 */
1366static void liquidio_remove(struct pci_dev *pdev)
1367{
1368        struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1369
1370        dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1371
1372        if (oct_dev->watchdog_task)
1373                kthread_stop(oct_dev->watchdog_task);
1374
1375        if (!oct_dev->octeon_id &&
1376            oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1377                lio_vf_rep_modexit();
1378
1379        if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1380                liquidio_stop_nic_module(oct_dev);
1381
1382        /* Reset the octeon device and cleanup all memory allocated for
1383         * the octeon device by driver.
1384         */
1385        octeon_destroy_resources(oct_dev);
1386
1387        dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1388
1389        /* This octeon device has been removed. Update the global
1390         * data structure to reflect this. Free the device structure.
1391         */
1392        octeon_free_device_mem(oct_dev);
1393}
1394
1395/**
1396 * \brief Identify the Octeon device and to map the BAR address space
1397 * @param oct octeon device
1398 */
1399static int octeon_chip_specific_setup(struct octeon_device *oct)
1400{
1401        u32 dev_id, rev_id;
1402        int ret = 1;
1403        char *s;
1404
1405        pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1406        pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1407        oct->rev_id = rev_id & 0xff;
1408
1409        switch (dev_id) {
1410        case OCTEON_CN68XX_PCIID:
1411                oct->chip_id = OCTEON_CN68XX;
1412                ret = lio_setup_cn68xx_octeon_device(oct);
1413                s = "CN68XX";
1414                break;
1415
1416        case OCTEON_CN66XX_PCIID:
1417                oct->chip_id = OCTEON_CN66XX;
1418                ret = lio_setup_cn66xx_octeon_device(oct);
1419                s = "CN66XX";
1420                break;
1421
1422        case OCTEON_CN23XX_PCIID_PF:
1423                oct->chip_id = OCTEON_CN23XX_PF_VID;
1424                ret = setup_cn23xx_octeon_pf_device(oct);
1425                if (ret)
1426                        break;
1427#ifdef CONFIG_PCI_IOV
1428                if (!ret)
1429                        pci_sriov_set_totalvfs(oct->pci_dev,
1430                                               oct->sriov_info.max_vfs);
1431#endif
1432                s = "CN23XX";
1433                break;
1434
1435        default:
1436                s = "?";
1437                dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1438                        dev_id);
1439        }
1440
1441        if (!ret)
1442                dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1443                         OCTEON_MAJOR_REV(oct),
1444                         OCTEON_MINOR_REV(oct),
1445                         octeon_get_conf(oct)->card_name,
1446                         LIQUIDIO_VERSION);
1447
1448        return ret;
1449}
1450
1451/**
1452 * \brief PCI initialization for each Octeon device.
1453 * @param oct octeon device
1454 */
1455static int octeon_pci_os_setup(struct octeon_device *oct)
1456{
1457        /* setup PCI stuff first */
1458        if (pci_enable_device(oct->pci_dev)) {
1459                dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1460                return 1;
1461        }
1462
1463        if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1464                dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1465                pci_disable_device(oct->pci_dev);
1466                return 1;
1467        }
1468
1469        /* Enable PCI DMA Master. */
1470        pci_set_master(oct->pci_dev);
1471
1472        return 0;
1473}
1474
1475/**
1476 * \brief Unmap and free network buffer
1477 * @param buf buffer
1478 */
1479static void free_netbuf(void *buf)
1480{
1481        struct sk_buff *skb;
1482        struct octnet_buf_free_info *finfo;
1483        struct lio *lio;
1484
1485        finfo = (struct octnet_buf_free_info *)buf;
1486        skb = finfo->skb;
1487        lio = finfo->lio;
1488
1489        dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1490                         DMA_TO_DEVICE);
1491
1492        tx_buffer_free(skb);
1493}
1494
1495/**
1496 * \brief Unmap and free gather buffer
1497 * @param buf buffer
1498 */
1499static void free_netsgbuf(void *buf)
1500{
1501        struct octnet_buf_free_info *finfo;
1502        struct sk_buff *skb;
1503        struct lio *lio;
1504        struct octnic_gather *g;
1505        int i, frags, iq;
1506
1507        finfo = (struct octnet_buf_free_info *)buf;
1508        skb = finfo->skb;
1509        lio = finfo->lio;
1510        g = finfo->g;
1511        frags = skb_shinfo(skb)->nr_frags;
1512
1513        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1514                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1515                         DMA_TO_DEVICE);
1516
1517        i = 1;
1518        while (frags--) {
1519                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1520
1521                pci_unmap_page((lio->oct_dev)->pci_dev,
1522                               g->sg[(i >> 2)].ptr[(i & 3)],
1523                               frag->size, DMA_TO_DEVICE);
1524                i++;
1525        }
1526
1527        iq = skb_iq(lio->oct_dev, skb);
1528        spin_lock(&lio->glist_lock[iq]);
1529        list_add_tail(&g->list, &lio->glist[iq]);
1530        spin_unlock(&lio->glist_lock[iq]);
1531
1532        tx_buffer_free(skb);
1533}
1534
1535/**
1536 * \brief Unmap and free gather buffer with response
1537 * @param buf buffer
1538 */
1539static void free_netsgbuf_with_resp(void *buf)
1540{
1541        struct octeon_soft_command *sc;
1542        struct octnet_buf_free_info *finfo;
1543        struct sk_buff *skb;
1544        struct lio *lio;
1545        struct octnic_gather *g;
1546        int i, frags, iq;
1547
1548        sc = (struct octeon_soft_command *)buf;
1549        skb = (struct sk_buff *)sc->callback_arg;
1550        finfo = (struct octnet_buf_free_info *)&skb->cb;
1551
1552        lio = finfo->lio;
1553        g = finfo->g;
1554        frags = skb_shinfo(skb)->nr_frags;
1555
1556        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1557                         g->sg[0].ptr[0], (skb->len - skb->data_len),
1558                         DMA_TO_DEVICE);
1559
1560        i = 1;
1561        while (frags--) {
1562                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1563
1564                pci_unmap_page((lio->oct_dev)->pci_dev,
1565                               g->sg[(i >> 2)].ptr[(i & 3)],
1566                               frag->size, DMA_TO_DEVICE);
1567                i++;
1568        }
1569
1570        iq = skb_iq(lio->oct_dev, skb);
1571
1572        spin_lock(&lio->glist_lock[iq]);
1573        list_add_tail(&g->list, &lio->glist[iq]);
1574        spin_unlock(&lio->glist_lock[iq]);
1575
1576        /* Don't free the skb yet */
1577}
1578
1579/**
1580 * \brief Adjust ptp frequency
1581 * @param ptp PTP clock info
1582 * @param ppb how much to adjust by, in parts-per-billion
1583 */
1584static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1585{
1586        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1587        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1588        u64 comp, delta;
1589        unsigned long flags;
1590        bool neg_adj = false;
1591
1592        if (ppb < 0) {
1593                neg_adj = true;
1594                ppb = -ppb;
1595        }
1596
1597        /* The hardware adds the clock compensation value to the
1598         * PTP clock on every coprocessor clock cycle, so we
1599         * compute the delta in terms of coprocessor clocks.
1600         */
1601        delta = (u64)ppb << 32;
1602        do_div(delta, oct->coproc_clock_rate);
1603
1604        spin_lock_irqsave(&lio->ptp_lock, flags);
1605        comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1606        if (neg_adj)
1607                comp -= delta;
1608        else
1609                comp += delta;
1610        lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1611        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1612
1613        return 0;
1614}
1615
1616/**
1617 * \brief Adjust ptp time
1618 * @param ptp PTP clock info
1619 * @param delta how much to adjust by, in nanosecs
1620 */
1621static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1622{
1623        unsigned long flags;
1624        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1625
1626        spin_lock_irqsave(&lio->ptp_lock, flags);
1627        lio->ptp_adjust += delta;
1628        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1629
1630        return 0;
1631}
1632
1633/**
1634 * \brief Get hardware clock time, including any adjustment
1635 * @param ptp PTP clock info
1636 * @param ts timespec
1637 */
1638static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1639                                struct timespec64 *ts)
1640{
1641        u64 ns;
1642        unsigned long flags;
1643        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1644        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1645
1646        spin_lock_irqsave(&lio->ptp_lock, flags);
1647        ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1648        ns += lio->ptp_adjust;
1649        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1650
1651        *ts = ns_to_timespec64(ns);
1652
1653        return 0;
1654}
1655
1656/**
1657 * \brief Set hardware clock time. Reset adjustment
1658 * @param ptp PTP clock info
1659 * @param ts timespec
1660 */
1661static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1662                                const struct timespec64 *ts)
1663{
1664        u64 ns;
1665        unsigned long flags;
1666        struct lio *lio = container_of(ptp, struct lio, ptp_info);
1667        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1668
1669        ns = timespec64_to_ns(ts);
1670
1671        spin_lock_irqsave(&lio->ptp_lock, flags);
1672        lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1673        lio->ptp_adjust = 0;
1674        spin_unlock_irqrestore(&lio->ptp_lock, flags);
1675
1676        return 0;
1677}
1678
1679/**
1680 * \brief Check if PTP is enabled
1681 * @param ptp PTP clock info
1682 * @param rq request
1683 * @param on is it on
1684 */
1685static int
1686liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1687                    struct ptp_clock_request *rq __attribute__((unused)),
1688                    int on __attribute__((unused)))
1689{
1690        return -EOPNOTSUPP;
1691}
1692
1693/**
1694 * \brief Open PTP clock source
1695 * @param netdev network device
1696 */
1697static void oct_ptp_open(struct net_device *netdev)
1698{
1699        struct lio *lio = GET_LIO(netdev);
1700        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1701
1702        spin_lock_init(&lio->ptp_lock);
1703
1704        snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1705        lio->ptp_info.owner = THIS_MODULE;
1706        lio->ptp_info.max_adj = 250000000;
1707        lio->ptp_info.n_alarm = 0;
1708        lio->ptp_info.n_ext_ts = 0;
1709        lio->ptp_info.n_per_out = 0;
1710        lio->ptp_info.pps = 0;
1711        lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1712        lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1713        lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1714        lio->ptp_info.settime64 = liquidio_ptp_settime;
1715        lio->ptp_info.enable = liquidio_ptp_enable;
1716
1717        lio->ptp_adjust = 0;
1718
1719        lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1720                                             &oct->pci_dev->dev);
1721
1722        if (IS_ERR(lio->ptp_clock))
1723                lio->ptp_clock = NULL;
1724}
1725
1726/**
1727 * \brief Init PTP clock
1728 * @param oct octeon device
1729 */
1730static void liquidio_ptp_init(struct octeon_device *oct)
1731{
1732        u64 clock_comp, cfg;
1733
1734        clock_comp = (u64)NSEC_PER_SEC << 32;
1735        do_div(clock_comp, oct->coproc_clock_rate);
1736        lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1737
1738        /* Enable */
1739        cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1740        lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1741}
1742
1743/**
1744 * \brief Load firmware to device
1745 * @param oct octeon device
1746 *
1747 * Maps device to firmware filename, requests firmware, and downloads it
1748 */
1749static int load_firmware(struct octeon_device *oct)
1750{
1751        int ret = 0;
1752        const struct firmware *fw;
1753        char fw_name[LIO_MAX_FW_FILENAME_LEN];
1754        char *tmp_fw_type;
1755
1756        if (fw_type_is_auto()) {
1757                tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1758                strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1759        } else {
1760                tmp_fw_type = fw_type;
1761        }
1762
1763        sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1764                octeon_get_conf(oct)->card_name, tmp_fw_type,
1765                LIO_FW_NAME_SUFFIX);
1766
1767        ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1768        if (ret) {
1769                dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1770                        fw_name);
1771                release_firmware(fw);
1772                return ret;
1773        }
1774
1775        ret = octeon_download_firmware(oct, fw->data, fw->size);
1776
1777        release_firmware(fw);
1778
1779        return ret;
1780}
1781
1782/**
1783 * \brief Poll routine for checking transmit queue status
1784 * @param work work_struct data structure
1785 */
1786static void octnet_poll_check_txq_status(struct work_struct *work)
1787{
1788        struct cavium_wk *wk = (struct cavium_wk *)work;
1789        struct lio *lio = (struct lio *)wk->ctxptr;
1790
1791        if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1792                return;
1793
1794        check_txq_status(lio);
1795        queue_delayed_work(lio->txq_status_wq.wq,
1796                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1797}
1798
1799/**
1800 * \brief Sets up the txq poll check
1801 * @param netdev network device
1802 */
1803static inline int setup_tx_poll_fn(struct net_device *netdev)
1804{
1805        struct lio *lio = GET_LIO(netdev);
1806        struct octeon_device *oct = lio->oct_dev;
1807
1808        lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1809                                                WQ_MEM_RECLAIM, 0);
1810        if (!lio->txq_status_wq.wq) {
1811                dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1812                return -1;
1813        }
1814        INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1815                          octnet_poll_check_txq_status);
1816        lio->txq_status_wq.wk.ctxptr = lio;
1817        queue_delayed_work(lio->txq_status_wq.wq,
1818                           &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1819        return 0;
1820}
1821
1822static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1823{
1824        struct lio *lio = GET_LIO(netdev);
1825
1826        if (lio->txq_status_wq.wq) {
1827                cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1828                destroy_workqueue(lio->txq_status_wq.wq);
1829        }
1830}
1831
1832/**
1833 * \brief Net device open for LiquidIO
1834 * @param netdev network device
1835 */
1836static int liquidio_open(struct net_device *netdev)
1837{
1838        struct lio *lio = GET_LIO(netdev);
1839        struct octeon_device *oct = lio->oct_dev;
1840        struct napi_struct *napi, *n;
1841
1842        if (oct->props[lio->ifidx].napi_enabled == 0) {
1843                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1844                        napi_enable(napi);
1845
1846                oct->props[lio->ifidx].napi_enabled = 1;
1847
1848                if (OCTEON_CN23XX_PF(oct))
1849                        oct->droq[0]->ops.poll_mode = 1;
1850        }
1851
1852        if (oct->ptp_enable)
1853                oct_ptp_open(netdev);
1854
1855        ifstate_set(lio, LIO_IFSTATE_RUNNING);
1856
1857        if (OCTEON_CN23XX_PF(oct)) {
1858                if (!oct->msix_on)
1859                        if (setup_tx_poll_fn(netdev))
1860                                return -1;
1861        } else {
1862                if (setup_tx_poll_fn(netdev))
1863                        return -1;
1864        }
1865
1866        netif_tx_start_all_queues(netdev);
1867
1868        /* Ready for link status updates */
1869        lio->intf_open = 1;
1870
1871        netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1872
1873        /* tell Octeon to start forwarding packets to host */
1874        send_rx_ctrl_cmd(lio, 1);
1875
1876        dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1877                 netdev->name);
1878
1879        return 0;
1880}
1881
1882/**
1883 * \brief Net device stop for LiquidIO
1884 * @param netdev network device
1885 */
1886static int liquidio_stop(struct net_device *netdev)
1887{
1888        struct lio *lio = GET_LIO(netdev);
1889        struct octeon_device *oct = lio->oct_dev;
1890        struct napi_struct *napi, *n;
1891
1892        ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1893
1894        /* Stop any link updates */
1895        lio->intf_open = 0;
1896
1897        stop_txqs(netdev);
1898
1899        /* Inform that netif carrier is down */
1900        netif_carrier_off(netdev);
1901        netif_tx_disable(netdev);
1902
1903        lio->linfo.link.s.link_up = 0;
1904        lio->link_changes++;
1905
1906        /* Tell Octeon that nic interface is down. */
1907        send_rx_ctrl_cmd(lio, 0);
1908
1909        if (OCTEON_CN23XX_PF(oct)) {
1910                if (!oct->msix_on)
1911                        cleanup_tx_poll_fn(netdev);
1912        } else {
1913                cleanup_tx_poll_fn(netdev);
1914        }
1915
1916        if (lio->ptp_clock) {
1917                ptp_clock_unregister(lio->ptp_clock);
1918                lio->ptp_clock = NULL;
1919        }
1920
1921        /* Wait for any pending Rx descriptors */
1922        if (lio_wait_for_clean_oq(oct))
1923                netif_info(lio, rx_err, lio->netdev,
1924                           "Proceeding with stop interface after partial RX desc processing\n");
1925
1926        if (oct->props[lio->ifidx].napi_enabled == 1) {
1927                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1928                        napi_disable(napi);
1929
1930                oct->props[lio->ifidx].napi_enabled = 0;
1931
1932                if (OCTEON_CN23XX_PF(oct))
1933                        oct->droq[0]->ops.poll_mode = 0;
1934        }
1935
1936        dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1937
1938        return 0;
1939}
1940
1941/**
1942 * \brief Converts a mask based on net device flags
1943 * @param netdev network device
1944 *
1945 * This routine generates a octnet_ifflags mask from the net device flags
1946 * received from the OS.
1947 */
1948static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1949{
1950        enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1951
1952        if (netdev->flags & IFF_PROMISC)
1953                f |= OCTNET_IFFLAG_PROMISC;
1954
1955        if (netdev->flags & IFF_ALLMULTI)
1956                f |= OCTNET_IFFLAG_ALLMULTI;
1957
1958        if (netdev->flags & IFF_MULTICAST) {
1959                f |= OCTNET_IFFLAG_MULTICAST;
1960
1961                /* Accept all multicast addresses if there are more than we
1962                 * can handle
1963                 */
1964                if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1965                        f |= OCTNET_IFFLAG_ALLMULTI;
1966        }
1967
1968        if (netdev->flags & IFF_BROADCAST)
1969                f |= OCTNET_IFFLAG_BROADCAST;
1970
1971        return f;
1972}
1973
1974/**
1975 * \brief Net device set_multicast_list
1976 * @param netdev network device
1977 */
1978static void liquidio_set_mcast_list(struct net_device *netdev)
1979{
1980        struct lio *lio = GET_LIO(netdev);
1981        struct octeon_device *oct = lio->oct_dev;
1982        struct octnic_ctrl_pkt nctrl;
1983        struct netdev_hw_addr *ha;
1984        u64 *mc;
1985        int ret;
1986        int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1987
1988        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1989
1990        /* Create a ctrl pkt command to be sent to core app. */
1991        nctrl.ncmd.u64 = 0;
1992        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1993        nctrl.ncmd.s.param1 = get_new_flags(netdev);
1994        nctrl.ncmd.s.param2 = mc_count;
1995        nctrl.ncmd.s.more = mc_count;
1996        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1997        nctrl.netpndev = (u64)netdev;
1998        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1999
2000        /* copy all the addresses into the udd */
2001        mc = &nctrl.udd[0];
2002        netdev_for_each_mc_addr(ha, netdev) {
2003                *mc = 0;
2004                memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2005                /* no need to swap bytes */
2006
2007                if (++mc > &nctrl.udd[mc_count])
2008                        break;
2009        }
2010
2011        /* Apparently, any activity in this call from the kernel has to
2012         * be atomic. So we won't wait for response.
2013         */
2014        nctrl.wait_time = 0;
2015
2016        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2017        if (ret < 0) {
2018                dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2019                        ret);
2020        }
2021}
2022
2023/**
2024 * \brief Net device set_mac_address
2025 * @param netdev network device
2026 */
2027static int liquidio_set_mac(struct net_device *netdev, void *p)
2028{
2029        int ret = 0;
2030        struct lio *lio = GET_LIO(netdev);
2031        struct octeon_device *oct = lio->oct_dev;
2032        struct sockaddr *addr = (struct sockaddr *)p;
2033        struct octnic_ctrl_pkt nctrl;
2034
2035        if (!is_valid_ether_addr(addr->sa_data))
2036                return -EADDRNOTAVAIL;
2037
2038        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2039
2040        nctrl.ncmd.u64 = 0;
2041        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2042        nctrl.ncmd.s.param1 = 0;
2043        nctrl.ncmd.s.more = 1;
2044        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2045        nctrl.netpndev = (u64)netdev;
2046        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2047        nctrl.wait_time = 100;
2048
2049        nctrl.udd[0] = 0;
2050        /* The MAC Address is presented in network byte order. */
2051        memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2052
2053        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2054        if (ret < 0) {
2055                dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2056                return -ENOMEM;
2057        }
2058        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2059        memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2060
2061        return 0;
2062}
2063
2064static void
2065liquidio_get_stats64(struct net_device *netdev,
2066                     struct rtnl_link_stats64 *lstats)
2067{
2068        struct lio *lio = GET_LIO(netdev);
2069        struct octeon_device *oct;
2070        u64 pkts = 0, drop = 0, bytes = 0;
2071        struct oct_droq_stats *oq_stats;
2072        struct oct_iq_stats *iq_stats;
2073        int i, iq_no, oq_no;
2074
2075        oct = lio->oct_dev;
2076
2077        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2078                return;
2079
2080        for (i = 0; i < oct->num_iqs; i++) {
2081                iq_no = lio->linfo.txpciq[i].s.q_no;
2082                iq_stats = &oct->instr_queue[iq_no]->stats;
2083                pkts += iq_stats->tx_done;
2084                drop += iq_stats->tx_dropped;
2085                bytes += iq_stats->tx_tot_bytes;
2086        }
2087
2088        lstats->tx_packets = pkts;
2089        lstats->tx_bytes = bytes;
2090        lstats->tx_dropped = drop;
2091
2092        pkts = 0;
2093        drop = 0;
2094        bytes = 0;
2095
2096        for (i = 0; i < oct->num_oqs; i++) {
2097                oq_no = lio->linfo.rxpciq[i].s.q_no;
2098                oq_stats = &oct->droq[oq_no]->stats;
2099                pkts += oq_stats->rx_pkts_received;
2100                drop += (oq_stats->rx_dropped +
2101                         oq_stats->dropped_nodispatch +
2102                         oq_stats->dropped_toomany +
2103                         oq_stats->dropped_nomem);
2104                bytes += oq_stats->rx_bytes_received;
2105        }
2106
2107        lstats->rx_bytes = bytes;
2108        lstats->rx_packets = pkts;
2109        lstats->rx_dropped = drop;
2110
2111        octnet_get_link_stats(netdev);
2112        lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2113        lstats->collisions = oct->link_stats.fromhost.total_collisions;
2114
2115        /* detailed rx_errors: */
2116        lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2117        /* recved pkt with crc error    */
2118        lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2119        /* recv'd frame alignment error */
2120        lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2121        /* recv'r fifo overrun */
2122        lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2123
2124        lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2125                lstats->rx_frame_errors + lstats->rx_fifo_errors;
2126
2127        /* detailed tx_errors */
2128        lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2129        lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2130        lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2131
2132        lstats->tx_errors = lstats->tx_aborted_errors +
2133                lstats->tx_carrier_errors +
2134                lstats->tx_fifo_errors;
2135}
2136
2137/**
2138 * \brief Handler for SIOCSHWTSTAMP ioctl
2139 * @param netdev network device
2140 * @param ifr interface request
2141 * @param cmd command
2142 */
2143static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2144{
2145        struct hwtstamp_config conf;
2146        struct lio *lio = GET_LIO(netdev);
2147
2148        if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2149                return -EFAULT;
2150
2151        if (conf.flags)
2152                return -EINVAL;
2153
2154        switch (conf.tx_type) {
2155        case HWTSTAMP_TX_ON:
2156        case HWTSTAMP_TX_OFF:
2157                break;
2158        default:
2159                return -ERANGE;
2160        }
2161
2162        switch (conf.rx_filter) {
2163        case HWTSTAMP_FILTER_NONE:
2164                break;
2165        case HWTSTAMP_FILTER_ALL:
2166        case HWTSTAMP_FILTER_SOME:
2167        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2168        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2169        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2170        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2171        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2172        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2173        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2174        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2175        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2176        case HWTSTAMP_FILTER_PTP_V2_EVENT:
2177        case HWTSTAMP_FILTER_PTP_V2_SYNC:
2178        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2179        case HWTSTAMP_FILTER_NTP_ALL:
2180                conf.rx_filter = HWTSTAMP_FILTER_ALL;
2181                break;
2182        default:
2183                return -ERANGE;
2184        }
2185
2186        if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2187                ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2188
2189        else
2190                ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2191
2192        return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2193}
2194
2195/**
2196 * \brief ioctl handler
2197 * @param netdev network device
2198 * @param ifr interface request
2199 * @param cmd command
2200 */
2201static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2202{
2203        struct lio *lio = GET_LIO(netdev);
2204
2205        switch (cmd) {
2206        case SIOCSHWTSTAMP:
2207                if (lio->oct_dev->ptp_enable)
2208                        return hwtstamp_ioctl(netdev, ifr);
2209        default:
2210                return -EOPNOTSUPP;
2211        }
2212}
2213
2214/**
2215 * \brief handle a Tx timestamp response
2216 * @param status response status
2217 * @param buf pointer to skb
2218 */
2219static void handle_timestamp(struct octeon_device *oct,
2220                             u32 status,
2221                             void *buf)
2222{
2223        struct octnet_buf_free_info *finfo;
2224        struct octeon_soft_command *sc;
2225        struct oct_timestamp_resp *resp;
2226        struct lio *lio;
2227        struct sk_buff *skb = (struct sk_buff *)buf;
2228
2229        finfo = (struct octnet_buf_free_info *)skb->cb;
2230        lio = finfo->lio;
2231        sc = finfo->sc;
2232        oct = lio->oct_dev;
2233        resp = (struct oct_timestamp_resp *)sc->virtrptr;
2234
2235        if (status != OCTEON_REQUEST_DONE) {
2236                dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2237                        CVM_CAST64(status));
2238                resp->timestamp = 0;
2239        }
2240
2241        octeon_swap_8B_data(&resp->timestamp, 1);
2242
2243        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2244                struct skb_shared_hwtstamps ts;
2245                u64 ns = resp->timestamp;
2246
2247                netif_info(lio, tx_done, lio->netdev,
2248                           "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2249                           skb, (unsigned long long)ns);
2250                ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2251                skb_tstamp_tx(skb, &ts);
2252        }
2253
2254        octeon_free_soft_command(oct, sc);
2255        tx_buffer_free(skb);
2256}
2257
2258/* \brief Send a data packet that will be timestamped
2259 * @param oct octeon device
2260 * @param ndata pointer to network data
2261 * @param finfo pointer to private network data
2262 */
2263static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2264                                         struct octnic_data_pkt *ndata,
2265                                         struct octnet_buf_free_info *finfo,
2266                                         int xmit_more)
2267{
2268        int retval;
2269        struct octeon_soft_command *sc;
2270        struct lio *lio;
2271        int ring_doorbell;
2272        u32 len;
2273
2274        lio = finfo->lio;
2275
2276        sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2277                                            sizeof(struct oct_timestamp_resp));
2278        finfo->sc = sc;
2279
2280        if (!sc) {
2281                dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2282                return IQ_SEND_FAILED;
2283        }
2284
2285        if (ndata->reqtype == REQTYPE_NORESP_NET)
2286                ndata->reqtype = REQTYPE_RESP_NET;
2287        else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2288                ndata->reqtype = REQTYPE_RESP_NET_SG;
2289
2290        sc->callback = handle_timestamp;
2291        sc->callback_arg = finfo->skb;
2292        sc->iq_no = ndata->q_no;
2293
2294        if (OCTEON_CN23XX_PF(oct))
2295                len = (u32)((struct octeon_instr_ih3 *)
2296                            (&sc->cmd.cmd3.ih3))->dlengsz;
2297        else
2298                len = (u32)((struct octeon_instr_ih2 *)
2299                            (&sc->cmd.cmd2.ih2))->dlengsz;
2300
2301        ring_doorbell = !xmit_more;
2302
2303        retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2304                                     sc, len, ndata->reqtype);
2305
2306        if (retval == IQ_SEND_FAILED) {
2307                dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2308                        retval);
2309                octeon_free_soft_command(oct, sc);
2310        } else {
2311                netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2312        }
2313
2314        return retval;
2315}
2316
2317/** \brief Transmit networks packets to the Octeon interface
2318 * @param skbuff   skbuff struct to be passed to network layer.
2319 * @param netdev    pointer to network device
2320 * @returns whether the packet was transmitted to the device okay or not
2321 *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2322 */
2323static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2324{
2325        struct lio *lio;
2326        struct octnet_buf_free_info *finfo;
2327        union octnic_cmd_setup cmdsetup;
2328        struct octnic_data_pkt ndata;
2329        struct octeon_device *oct;
2330        struct oct_iq_stats *stats;
2331        struct octeon_instr_irh *irh;
2332        union tx_info *tx_info;
2333        int status = 0;
2334        int q_idx = 0, iq_no = 0;
2335        int j, xmit_more = 0;
2336        u64 dptr = 0;
2337        u32 tag = 0;
2338
2339        lio = GET_LIO(netdev);
2340        oct = lio->oct_dev;
2341
2342        q_idx = skb_iq(oct, skb);
2343        tag = q_idx;
2344        iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2345
2346        stats = &oct->instr_queue[iq_no]->stats;
2347
2348        /* Check for all conditions in which the current packet cannot be
2349         * transmitted.
2350         */
2351        if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2352            (!lio->linfo.link.s.link_up) ||
2353            (skb->len <= 0)) {
2354                netif_info(lio, tx_err, lio->netdev,
2355                           "Transmit failed link_status : %d\n",
2356                           lio->linfo.link.s.link_up);
2357                goto lio_xmit_failed;
2358        }
2359
2360        /* Use space in skb->cb to store info used to unmap and
2361         * free the buffers.
2362         */
2363        finfo = (struct octnet_buf_free_info *)skb->cb;
2364        finfo->lio = lio;
2365        finfo->skb = skb;
2366        finfo->sc = NULL;
2367
2368        /* Prepare the attributes for the data to be passed to OSI. */
2369        memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2370
2371        ndata.buf = (void *)finfo;
2372
2373        ndata.q_no = iq_no;
2374
2375        if (octnet_iq_is_full(oct, ndata.q_no)) {
2376                /* defer sending if queue is full */
2377                netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2378                           ndata.q_no);
2379                stats->tx_iq_busy++;
2380                return NETDEV_TX_BUSY;
2381        }
2382
2383        /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2384         *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2385         */
2386
2387        ndata.datasize = skb->len;
2388
2389        cmdsetup.u64 = 0;
2390        cmdsetup.s.iq_no = iq_no;
2391
2392        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2393                if (skb->encapsulation) {
2394                        cmdsetup.s.tnl_csum = 1;
2395                        stats->tx_vxlan++;
2396                } else {
2397                        cmdsetup.s.transport_csum = 1;
2398                }
2399        }
2400        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2401                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2402                cmdsetup.s.timestamp = 1;
2403        }
2404
2405        if (skb_shinfo(skb)->nr_frags == 0) {
2406                cmdsetup.s.u.datasize = skb->len;
2407                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2408
2409                /* Offload checksum calculation for TCP/UDP packets */
2410                dptr = dma_map_single(&oct->pci_dev->dev,
2411                                      skb->data,
2412                                      skb->len,
2413                                      DMA_TO_DEVICE);
2414                if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2415                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2416                                __func__);
2417                        stats->tx_dmamap_fail++;
2418                        return NETDEV_TX_BUSY;
2419                }
2420
2421                if (OCTEON_CN23XX_PF(oct))
2422                        ndata.cmd.cmd3.dptr = dptr;
2423                else
2424                        ndata.cmd.cmd2.dptr = dptr;
2425                finfo->dptr = dptr;
2426                ndata.reqtype = REQTYPE_NORESP_NET;
2427
2428        } else {
2429                int i, frags;
2430                struct skb_frag_struct *frag;
2431                struct octnic_gather *g;
2432
2433                spin_lock(&lio->glist_lock[q_idx]);
2434                g = (struct octnic_gather *)
2435                        lio_list_delete_head(&lio->glist[q_idx]);
2436                spin_unlock(&lio->glist_lock[q_idx]);
2437
2438                if (!g) {
2439                        netif_info(lio, tx_err, lio->netdev,
2440                                   "Transmit scatter gather: glist null!\n");
2441                        goto lio_xmit_failed;
2442                }
2443
2444                cmdsetup.s.gather = 1;
2445                cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2446                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2447
2448                memset(g->sg, 0, g->sg_size);
2449
2450                g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2451                                                 skb->data,
2452                                                 (skb->len - skb->data_len),
2453                                                 DMA_TO_DEVICE);
2454                if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2455                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2456                                __func__);
2457                        stats->tx_dmamap_fail++;
2458                        return NETDEV_TX_BUSY;
2459                }
2460                add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2461
2462                frags = skb_shinfo(skb)->nr_frags;
2463                i = 1;
2464                while (frags--) {
2465                        frag = &skb_shinfo(skb)->frags[i - 1];
2466
2467                        g->sg[(i >> 2)].ptr[(i & 3)] =
2468                                dma_map_page(&oct->pci_dev->dev,
2469                                             frag->page.p,
2470                                             frag->page_offset,
2471                                             frag->size,
2472                                             DMA_TO_DEVICE);
2473
2474                        if (dma_mapping_error(&oct->pci_dev->dev,
2475                                              g->sg[i >> 2].ptr[i & 3])) {
2476                                dma_unmap_single(&oct->pci_dev->dev,
2477                                                 g->sg[0].ptr[0],
2478                                                 skb->len - skb->data_len,
2479                                                 DMA_TO_DEVICE);
2480                                for (j = 1; j < i; j++) {
2481                                        frag = &skb_shinfo(skb)->frags[j - 1];
2482                                        dma_unmap_page(&oct->pci_dev->dev,
2483                                                       g->sg[j >> 2].ptr[j & 3],
2484                                                       frag->size,
2485                                                       DMA_TO_DEVICE);
2486                                }
2487                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2488                                        __func__);
2489                                return NETDEV_TX_BUSY;
2490                        }
2491
2492                        add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2493                        i++;
2494                }
2495
2496                dptr = g->sg_dma_ptr;
2497
2498                if (OCTEON_CN23XX_PF(oct))
2499                        ndata.cmd.cmd3.dptr = dptr;
2500                else
2501                        ndata.cmd.cmd2.dptr = dptr;
2502                finfo->dptr = dptr;
2503                finfo->g = g;
2504
2505                ndata.reqtype = REQTYPE_NORESP_NET_SG;
2506        }
2507
2508        if (OCTEON_CN23XX_PF(oct)) {
2509                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2510                tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2511        } else {
2512                irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2513                tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2514        }
2515
2516        if (skb_shinfo(skb)->gso_size) {
2517                tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2518                tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2519                stats->tx_gso++;
2520        }
2521
2522        /* HW insert VLAN tag */
2523        if (skb_vlan_tag_present(skb)) {
2524                irh->priority = skb_vlan_tag_get(skb) >> 13;
2525                irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2526        }
2527
2528        xmit_more = skb->xmit_more;
2529
2530        if (unlikely(cmdsetup.s.timestamp))
2531                status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2532        else
2533                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2534        if (status == IQ_SEND_FAILED)
2535                goto lio_xmit_failed;
2536
2537        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2538
2539        if (status == IQ_SEND_STOP)
2540                netif_stop_subqueue(netdev, q_idx);
2541
2542        netif_trans_update(netdev);
2543
2544        if (tx_info->s.gso_segs)
2545                stats->tx_done += tx_info->s.gso_segs;
2546        else
2547                stats->tx_done++;
2548        stats->tx_tot_bytes += ndata.datasize;
2549
2550        return NETDEV_TX_OK;
2551
2552lio_xmit_failed:
2553        stats->tx_dropped++;
2554        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2555                   iq_no, stats->tx_dropped);
2556        if (dptr)
2557                dma_unmap_single(&oct->pci_dev->dev, dptr,
2558                                 ndata.datasize, DMA_TO_DEVICE);
2559
2560        octeon_ring_doorbell_locked(oct, iq_no);
2561
2562        tx_buffer_free(skb);
2563        return NETDEV_TX_OK;
2564}
2565
2566/** \brief Network device Tx timeout
2567 * @param netdev    pointer to network device
2568 */
2569static void liquidio_tx_timeout(struct net_device *netdev)
2570{
2571        struct lio *lio;
2572
2573        lio = GET_LIO(netdev);
2574
2575        netif_info(lio, tx_err, lio->netdev,
2576                   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2577                   netdev->stats.tx_dropped);
2578        netif_trans_update(netdev);
2579        wake_txqs(netdev);
2580}
2581
2582static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2583                                    __be16 proto __attribute__((unused)),
2584                                    u16 vid)
2585{
2586        struct lio *lio = GET_LIO(netdev);
2587        struct octeon_device *oct = lio->oct_dev;
2588        struct octnic_ctrl_pkt nctrl;
2589        int ret = 0;
2590
2591        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2592
2593        nctrl.ncmd.u64 = 0;
2594        nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2595        nctrl.ncmd.s.param1 = vid;
2596        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2597        nctrl.wait_time = 100;
2598        nctrl.netpndev = (u64)netdev;
2599        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2600
2601        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2602        if (ret < 0) {
2603                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2604                        ret);
2605        }
2606
2607        return ret;
2608}
2609
2610static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2611                                     __be16 proto __attribute__((unused)),
2612                                     u16 vid)
2613{
2614        struct lio *lio = GET_LIO(netdev);
2615        struct octeon_device *oct = lio->oct_dev;
2616        struct octnic_ctrl_pkt nctrl;
2617        int ret = 0;
2618
2619        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2620
2621        nctrl.ncmd.u64 = 0;
2622        nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2623        nctrl.ncmd.s.param1 = vid;
2624        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2625        nctrl.wait_time = 100;
2626        nctrl.netpndev = (u64)netdev;
2627        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2628
2629        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2630        if (ret < 0) {
2631                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2632                        ret);
2633        }
2634        return ret;
2635}
2636
2637/** Sending command to enable/disable RX checksum offload
2638 * @param netdev                pointer to network device
2639 * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2640 * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2641 *                              OCTNET_CMD_RXCSUM_DISABLE
2642 * @returns                     SUCCESS or FAILURE
2643 */
2644static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2645                                       u8 rx_cmd)
2646{
2647        struct lio *lio = GET_LIO(netdev);
2648        struct octeon_device *oct = lio->oct_dev;
2649        struct octnic_ctrl_pkt nctrl;
2650        int ret = 0;
2651
2652        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2653
2654        nctrl.ncmd.u64 = 0;
2655        nctrl.ncmd.s.cmd = command;
2656        nctrl.ncmd.s.param1 = rx_cmd;
2657        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2658        nctrl.wait_time = 100;
2659        nctrl.netpndev = (u64)netdev;
2660        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2661
2662        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2663        if (ret < 0) {
2664                dev_err(&oct->pci_dev->dev,
2665                        "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2666                        ret);
2667        }
2668        return ret;
2669}
2670
2671/** Sending command to add/delete VxLAN UDP port to firmware
2672 * @param netdev                pointer to network device
2673 * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2674 * @param vxlan_port            VxLAN port to be added or deleted
2675 * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2676 *                              OCTNET_CMD_VXLAN_PORT_DEL
2677 * @returns                     SUCCESS or FAILURE
2678 */
2679static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2680                                       u16 vxlan_port, u8 vxlan_cmd_bit)
2681{
2682        struct lio *lio = GET_LIO(netdev);
2683        struct octeon_device *oct = lio->oct_dev;
2684        struct octnic_ctrl_pkt nctrl;
2685        int ret = 0;
2686
2687        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2688
2689        nctrl.ncmd.u64 = 0;
2690        nctrl.ncmd.s.cmd = command;
2691        nctrl.ncmd.s.more = vxlan_cmd_bit;
2692        nctrl.ncmd.s.param1 = vxlan_port;
2693        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2694        nctrl.wait_time = 100;
2695        nctrl.netpndev = (u64)netdev;
2696        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2697
2698        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2699        if (ret < 0) {
2700                dev_err(&oct->pci_dev->dev,
2701                        "VxLAN port add/delete failed in core (ret:0x%x)\n",
2702                        ret);
2703        }
2704        return ret;
2705}
2706
2707/** \brief Net device fix features
2708 * @param netdev  pointer to network device
2709 * @param request features requested
2710 * @returns updated features list
2711 */
2712static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2713                                               netdev_features_t request)
2714{
2715        struct lio *lio = netdev_priv(netdev);
2716
2717        if ((request & NETIF_F_RXCSUM) &&
2718            !(lio->dev_capability & NETIF_F_RXCSUM))
2719                request &= ~NETIF_F_RXCSUM;
2720
2721        if ((request & NETIF_F_HW_CSUM) &&
2722            !(lio->dev_capability & NETIF_F_HW_CSUM))
2723                request &= ~NETIF_F_HW_CSUM;
2724
2725        if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2726                request &= ~NETIF_F_TSO;
2727
2728        if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2729                request &= ~NETIF_F_TSO6;
2730
2731        if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2732                request &= ~NETIF_F_LRO;
2733
2734        /*Disable LRO if RXCSUM is off */
2735        if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2736            (lio->dev_capability & NETIF_F_LRO))
2737                request &= ~NETIF_F_LRO;
2738
2739        if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2740            !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2741                request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2742
2743        return request;
2744}
2745
2746/** \brief Net device set features
2747 * @param netdev  pointer to network device
2748 * @param features features to enable/disable
2749 */
2750static int liquidio_set_features(struct net_device *netdev,
2751                                 netdev_features_t features)
2752{
2753        struct lio *lio = netdev_priv(netdev);
2754
2755        if ((features & NETIF_F_LRO) &&
2756            (lio->dev_capability & NETIF_F_LRO) &&
2757            !(netdev->features & NETIF_F_LRO))
2758                liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2759                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2760        else if (!(features & NETIF_F_LRO) &&
2761                 (lio->dev_capability & NETIF_F_LRO) &&
2762                 (netdev->features & NETIF_F_LRO))
2763                liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2764                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2765
2766        /* Sending command to firmware to enable/disable RX checksum
2767         * offload settings using ethtool
2768         */
2769        if (!(netdev->features & NETIF_F_RXCSUM) &&
2770            (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2771            (features & NETIF_F_RXCSUM))
2772                liquidio_set_rxcsum_command(netdev,
2773                                            OCTNET_CMD_TNL_RX_CSUM_CTL,
2774                                            OCTNET_CMD_RXCSUM_ENABLE);
2775        else if ((netdev->features & NETIF_F_RXCSUM) &&
2776                 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2777                 !(features & NETIF_F_RXCSUM))
2778                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2779                                            OCTNET_CMD_RXCSUM_DISABLE);
2780
2781        if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2782            (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2783            !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2784                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2785                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
2786        else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2787                 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2788                 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2789                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2790                                     OCTNET_CMD_VLAN_FILTER_DISABLE);
2791
2792        return 0;
2793}
2794
2795static void liquidio_add_vxlan_port(struct net_device *netdev,
2796                                    struct udp_tunnel_info *ti)
2797{
2798        if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2799                return;
2800
2801        liquidio_vxlan_port_command(netdev,
2802                                    OCTNET_CMD_VXLAN_PORT_CONFIG,
2803                                    htons(ti->port),
2804                                    OCTNET_CMD_VXLAN_PORT_ADD);
2805}
2806
2807static void liquidio_del_vxlan_port(struct net_device *netdev,
2808                                    struct udp_tunnel_info *ti)
2809{
2810        if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2811                return;
2812
2813        liquidio_vxlan_port_command(netdev,
2814                                    OCTNET_CMD_VXLAN_PORT_CONFIG,
2815                                    htons(ti->port),
2816                                    OCTNET_CMD_VXLAN_PORT_DEL);
2817}
2818
2819static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2820                                 u8 *mac, bool is_admin_assigned)
2821{
2822        struct lio *lio = GET_LIO(netdev);
2823        struct octeon_device *oct = lio->oct_dev;
2824        struct octnic_ctrl_pkt nctrl;
2825
2826        if (!is_valid_ether_addr(mac))
2827                return -EINVAL;
2828
2829        if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2830                return -EINVAL;
2831
2832        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2833
2834        nctrl.ncmd.u64 = 0;
2835        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2836        /* vfidx is 0 based, but vf_num (param1) is 1 based */
2837        nctrl.ncmd.s.param1 = vfidx + 1;
2838        nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
2839        nctrl.ncmd.s.more = 1;
2840        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2841        nctrl.netpndev = (u64)netdev;
2842        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2843        nctrl.wait_time = LIO_CMD_WAIT_TM;
2844
2845        nctrl.udd[0] = 0;
2846        /* The MAC Address is presented in network byte order. */
2847        ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2848
2849        oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2850
2851        octnet_send_nic_ctrl_pkt(oct, &nctrl);
2852
2853        return 0;
2854}
2855
2856static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2857{
2858        struct lio *lio = GET_LIO(netdev);
2859        struct octeon_device *oct = lio->oct_dev;
2860        int retval;
2861
2862        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2863                return -EINVAL;
2864
2865        retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2866        if (!retval)
2867                cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2868
2869        return retval;
2870}
2871
2872static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2873                                u16 vlan, u8 qos, __be16 vlan_proto)
2874{
2875        struct lio *lio = GET_LIO(netdev);
2876        struct octeon_device *oct = lio->oct_dev;
2877        struct octnic_ctrl_pkt nctrl;
2878        u16 vlantci;
2879
2880        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2881                return -EINVAL;
2882
2883        if (vlan_proto != htons(ETH_P_8021Q))
2884                return -EPROTONOSUPPORT;
2885
2886        if (vlan >= VLAN_N_VID || qos > 7)
2887                return -EINVAL;
2888
2889        if (vlan)
2890                vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2891        else
2892                vlantci = 0;
2893
2894        if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2895                return 0;
2896
2897        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2898
2899        if (vlan)
2900                nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2901        else
2902                nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2903
2904        nctrl.ncmd.s.param1 = vlantci;
2905        nctrl.ncmd.s.param2 =
2906            vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2907        nctrl.ncmd.s.more = 0;
2908        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2909        nctrl.cb_fn = 0;
2910        nctrl.wait_time = LIO_CMD_WAIT_TM;
2911
2912        octnet_send_nic_ctrl_pkt(oct, &nctrl);
2913
2914        oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2915
2916        return 0;
2917}
2918
2919static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2920                                  struct ifla_vf_info *ivi)
2921{
2922        struct lio *lio = GET_LIO(netdev);
2923        struct octeon_device *oct = lio->oct_dev;
2924        u8 *macaddr;
2925
2926        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2927                return -EINVAL;
2928
2929        ivi->vf = vfidx;
2930        macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2931        ether_addr_copy(&ivi->mac[0], macaddr);
2932        ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2933        ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2934        if (oct->sriov_info.trusted_vf.active &&
2935            oct->sriov_info.trusted_vf.id == vfidx)
2936                ivi->trusted = true;
2937        else
2938                ivi->trusted = false;
2939        ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2940        return 0;
2941}
2942
2943static void trusted_vf_callback(struct octeon_device *oct_dev,
2944                                u32 status, void *ptr)
2945{
2946        struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
2947        struct lio_trusted_vf_ctx *ctx;
2948
2949        ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
2950        ctx->status = status;
2951
2952        complete(&ctx->complete);
2953}
2954
2955static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2956{
2957        struct octeon_device *oct = lio->oct_dev;
2958        struct lio_trusted_vf_ctx *ctx;
2959        struct octeon_soft_command *sc;
2960        int ctx_size, retval;
2961
2962        ctx_size = sizeof(struct lio_trusted_vf_ctx);
2963        sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size);
2964
2965        ctx  = (struct lio_trusted_vf_ctx *)sc->ctxptr;
2966        init_completion(&ctx->complete);
2967
2968        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2969
2970        /* vfidx is 0 based, but vf_num (param1) is 1 based */
2971        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
2972                                    OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
2973                                    trusted);
2974
2975        sc->callback = trusted_vf_callback;
2976        sc->callback_arg = sc;
2977        sc->wait_time = 1000;
2978
2979        retval = octeon_send_soft_command(oct, sc);
2980        if (retval == IQ_SEND_FAILED) {
2981                retval = -1;
2982        } else {
2983                /* Wait for response or timeout */
2984                if (wait_for_completion_timeout(&ctx->complete,
2985                                                msecs_to_jiffies(2000)))
2986                        retval = ctx->status;
2987                else
2988                        retval = -1;
2989        }
2990
2991        octeon_free_soft_command(oct, sc);
2992
2993        return retval;
2994}
2995
2996static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
2997                                 bool setting)
2998{
2999        struct lio *lio = GET_LIO(netdev);
3000        struct octeon_device *oct = lio->oct_dev;
3001
3002        if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3003                /* trusted vf is not supported by firmware older than 1.7.1 */
3004                return -EOPNOTSUPP;
3005        }
3006
3007        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3008                netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3009                return -EINVAL;
3010        }
3011
3012        if (setting) {
3013                /* Set */
3014
3015                if (oct->sriov_info.trusted_vf.active &&
3016                    oct->sriov_info.trusted_vf.id == vfidx)
3017                        return 0;
3018
3019                if (oct->sriov_info.trusted_vf.active) {
3020                        netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3021                        return -EPERM;
3022                }
3023        } else {
3024                /* Clear */
3025
3026                if (!oct->sriov_info.trusted_vf.active)
3027                        return 0;
3028        }
3029
3030        if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3031                if (setting) {
3032                        oct->sriov_info.trusted_vf.id = vfidx;
3033                        oct->sriov_info.trusted_vf.active = true;
3034                } else {
3035                        oct->sriov_info.trusted_vf.active = false;
3036                }
3037
3038                netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3039                           setting ? "" : "not ");
3040        } else {
3041                netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3042                return -1;
3043        }
3044
3045        return 0;
3046}
3047
3048static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3049                                      int linkstate)
3050{
3051        struct lio *lio = GET_LIO(netdev);
3052        struct octeon_device *oct = lio->oct_dev;
3053        struct octnic_ctrl_pkt nctrl;
3054
3055        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3056                return -EINVAL;
3057
3058        if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3059                return 0;
3060
3061        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3062        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3063        nctrl.ncmd.s.param1 =
3064            vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3065        nctrl.ncmd.s.param2 = linkstate;
3066        nctrl.ncmd.s.more = 0;
3067        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3068        nctrl.cb_fn = 0;
3069        nctrl.wait_time = LIO_CMD_WAIT_TM;
3070
3071        octnet_send_nic_ctrl_pkt(oct, &nctrl);
3072
3073        oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3074
3075        return 0;
3076}
3077
3078static int
3079liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3080{
3081        struct lio_devlink_priv *priv;
3082        struct octeon_device *oct;
3083
3084        priv = devlink_priv(devlink);
3085        oct = priv->oct;
3086
3087        *mode = oct->eswitch_mode;
3088
3089        return 0;
3090}
3091
3092static int
3093liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
3094{
3095        struct lio_devlink_priv *priv;
3096        struct octeon_device *oct;
3097        int ret = 0;
3098
3099        priv = devlink_priv(devlink);
3100        oct = priv->oct;
3101
3102        if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3103                return -EINVAL;
3104
3105        if (oct->eswitch_mode == mode)
3106                return 0;
3107
3108        switch (mode) {
3109        case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3110                oct->eswitch_mode = mode;
3111                ret = lio_vf_rep_create(oct);
3112                break;
3113
3114        case DEVLINK_ESWITCH_MODE_LEGACY:
3115                lio_vf_rep_destroy(oct);
3116                oct->eswitch_mode = mode;
3117                break;
3118
3119        default:
3120                ret = -EINVAL;
3121        }
3122
3123        return ret;
3124}
3125
3126static const struct devlink_ops liquidio_devlink_ops = {
3127        .eswitch_mode_get = liquidio_eswitch_mode_get,
3128        .eswitch_mode_set = liquidio_eswitch_mode_set,
3129};
3130
3131static int
3132lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3133{
3134        struct lio *lio = GET_LIO(dev);
3135        struct octeon_device *oct = lio->oct_dev;
3136
3137        if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3138                return -EOPNOTSUPP;
3139
3140        switch (attr->id) {
3141        case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3142                attr->u.ppid.id_len = ETH_ALEN;
3143                ether_addr_copy(attr->u.ppid.id,
3144                                (void *)&lio->linfo.hw_addr + 2);
3145                break;
3146
3147        default:
3148                return -EOPNOTSUPP;
3149        }
3150
3151        return 0;
3152}
3153
3154static const struct switchdev_ops lio_pf_switchdev_ops = {
3155        .switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3156};
3157
3158static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3159                                 struct ifla_vf_stats *vf_stats)
3160{
3161        struct lio *lio = GET_LIO(netdev);
3162        struct octeon_device *oct = lio->oct_dev;
3163        struct oct_vf_stats stats;
3164        int ret;
3165
3166        if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3167                return -EINVAL;
3168
3169        memset(&stats, 0, sizeof(struct oct_vf_stats));
3170        ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3171        if (!ret) {
3172                vf_stats->rx_packets = stats.rx_packets;
3173                vf_stats->tx_packets = stats.tx_packets;
3174                vf_stats->rx_bytes = stats.rx_bytes;
3175                vf_stats->tx_bytes = stats.tx_bytes;
3176                vf_stats->broadcast = stats.broadcast;
3177                vf_stats->multicast = stats.multicast;
3178        }
3179
3180        return ret;
3181}
3182
3183static const struct net_device_ops lionetdevops = {
3184        .ndo_size               = sizeof(struct net_device_ops),
3185        .ndo_open               = liquidio_open,
3186        .ndo_stop               = liquidio_stop,
3187        .ndo_start_xmit         = liquidio_xmit,
3188        .ndo_get_stats64        = liquidio_get_stats64,
3189        .ndo_set_mac_address    = liquidio_set_mac,
3190        .ndo_set_rx_mode        = liquidio_set_mcast_list,
3191        .ndo_tx_timeout         = liquidio_tx_timeout,
3192
3193        .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3194        .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3195        .extended.ndo_change_mtu        = liquidio_change_mtu,
3196        .ndo_do_ioctl           = liquidio_ioctl,
3197        .ndo_fix_features       = liquidio_fix_features,
3198        .ndo_set_features       = liquidio_set_features,
3199        .extended.ndo_udp_tunnel_add    = liquidio_add_vxlan_port,
3200        .extended.ndo_udp_tunnel_del    = liquidio_del_vxlan_port,
3201        .ndo_set_vf_mac         = liquidio_set_vf_mac,
3202        .extended.ndo_set_vf_vlan       = liquidio_set_vf_vlan,
3203        .ndo_get_vf_config      = liquidio_get_vf_config,
3204        .extended.ndo_set_vf_trust      = liquidio_set_vf_trust,
3205        .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3206        .ndo_get_vf_stats       = liquidio_get_vf_stats,
3207};
3208
3209/** \brief Entry point for the liquidio module
3210 */
3211static int __init liquidio_init(void)
3212{
3213        int i;
3214        struct handshake *hs;
3215
3216        init_completion(&first_stage);
3217
3218        octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3219
3220        if (liquidio_init_pci())
3221                return -EINVAL;
3222
3223        wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3224
3225        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3226                hs = &handshake[i];
3227                if (hs->pci_dev) {
3228                        wait_for_completion(&hs->init);
3229                        if (!hs->init_ok) {
3230                                /* init handshake failed */
3231                                dev_err(&hs->pci_dev->dev,
3232                                        "Failed to init device\n");
3233                                liquidio_deinit_pci();
3234                                return -EIO;
3235                        }
3236                }
3237        }
3238
3239        for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3240                hs = &handshake[i];
3241                if (hs->pci_dev) {
3242                        wait_for_completion_timeout(&hs->started,
3243                                                    msecs_to_jiffies(30000));
3244                        if (!hs->started_ok) {
3245                                /* starter handshake failed */
3246                                dev_err(&hs->pci_dev->dev,
3247                                        "Firmware failed to start\n");
3248                                liquidio_deinit_pci();
3249                                return -EIO;
3250                        }
3251                }
3252        }
3253
3254        return 0;
3255}
3256
3257static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3258{
3259        struct octeon_device *oct = (struct octeon_device *)buf;
3260        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3261        int gmxport = 0;
3262        union oct_link_status *ls;
3263        int i;
3264
3265        if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3266                dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3267                        recv_pkt->buffer_size[0],
3268                        recv_pkt->rh.r_nic_info.gmxport);
3269                goto nic_info_err;
3270        }
3271
3272        gmxport = recv_pkt->rh.r_nic_info.gmxport;
3273        ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3274                OCT_DROQ_INFO_SIZE);
3275
3276        octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3277        for (i = 0; i < oct->ifcount; i++) {
3278                if (oct->props[i].gmxport == gmxport) {
3279                        update_link_status(oct->props[i].netdev, ls);
3280                        break;
3281                }
3282        }
3283
3284nic_info_err:
3285        for (i = 0; i < recv_pkt->buffer_count; i++)
3286                recv_buffer_free(recv_pkt->buffer_ptr[i]);
3287        octeon_free_recv_info(recv_info);
3288        return 0;
3289}
3290
3291/**
3292 * \brief Setup network interfaces
3293 * @param octeon_dev  octeon device
3294 *
3295 * Called during init time for each device. It assumes the NIC
3296 * is already up and running.  The link information for each
3297 * interface is passed in link_info.
3298 */
3299static int setup_nic_devices(struct octeon_device *octeon_dev)
3300{
3301        struct lio *lio = NULL;
3302        struct net_device *netdev;
3303        u8 mac[6], i, j, *fw_ver;
3304        struct octeon_soft_command *sc;
3305        struct liquidio_if_cfg_context *ctx;
3306        struct liquidio_if_cfg_resp *resp;
3307        struct octdev_props *props;
3308        int retval, num_iqueues, num_oqueues;
3309        int max_num_queues = 0;
3310        union oct_nic_if_cfg if_cfg;
3311        unsigned int base_queue;
3312        unsigned int gmx_port_id;
3313        u32 resp_size, ctx_size, data_size;
3314        u32 ifidx_or_pfnum;
3315        struct lio_version *vdata;
3316        struct devlink *devlink;
3317        struct lio_devlink_priv *lio_devlink;
3318
3319        /* This is to handle link status changes */
3320        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3321                                    OPCODE_NIC_INFO,
3322                                    lio_nic_info, octeon_dev);
3323
3324        /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3325         * They are handled directly.
3326         */
3327        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3328                                        free_netbuf);
3329
3330        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3331                                        free_netsgbuf);
3332
3333        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3334                                        free_netsgbuf_with_resp);
3335
3336        for (i = 0; i < octeon_dev->ifcount; i++) {
3337                resp_size = sizeof(struct liquidio_if_cfg_resp);
3338                ctx_size = sizeof(struct liquidio_if_cfg_context);
3339                data_size = sizeof(struct lio_version);
3340                sc = (struct octeon_soft_command *)
3341                        octeon_alloc_soft_command(octeon_dev, data_size,
3342                                                  resp_size, ctx_size);
3343                resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3344                ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3345                vdata = (struct lio_version *)sc->virtdptr;
3346
3347                *((u64 *)vdata) = 0;
3348                vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3349                vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3350                vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3351
3352                if (OCTEON_CN23XX_PF(octeon_dev)) {
3353                        num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3354                        num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3355                        base_queue = octeon_dev->sriov_info.pf_srn;
3356
3357                        gmx_port_id = octeon_dev->pf_num;
3358                        ifidx_or_pfnum = octeon_dev->pf_num;
3359                } else {
3360                        num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3361                                                octeon_get_conf(octeon_dev), i);
3362                        num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3363                                                octeon_get_conf(octeon_dev), i);
3364                        base_queue = CFG_GET_BASE_QUE_NIC_IF(
3365                                                octeon_get_conf(octeon_dev), i);
3366                        gmx_port_id = CFG_GET_GMXID_NIC_IF(
3367                                                octeon_get_conf(octeon_dev), i);
3368                        ifidx_or_pfnum = i;
3369                }
3370
3371                dev_dbg(&octeon_dev->pci_dev->dev,
3372                        "requesting config for interface %d, iqs %d, oqs %d\n",
3373                        ifidx_or_pfnum, num_iqueues, num_oqueues);
3374                WRITE_ONCE(ctx->cond, 0);
3375                ctx->octeon_id = lio_get_device_id(octeon_dev);
3376                init_waitqueue_head(&ctx->wc);
3377
3378                if_cfg.u64 = 0;
3379                if_cfg.s.num_iqueues = num_iqueues;
3380                if_cfg.s.num_oqueues = num_oqueues;
3381                if_cfg.s.base_queue = base_queue;
3382                if_cfg.s.gmx_port_id = gmx_port_id;
3383
3384                sc->iq_no = 0;
3385
3386                octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3387                                            OPCODE_NIC_IF_CFG, 0,
3388                                            if_cfg.u64, 0);
3389
3390                sc->callback = lio_if_cfg_callback;
3391                sc->callback_arg = sc;
3392                sc->wait_time = LIO_IFCFG_WAIT_TIME;
3393
3394                retval = octeon_send_soft_command(octeon_dev, sc);
3395                if (retval == IQ_SEND_FAILED) {
3396                        dev_err(&octeon_dev->pci_dev->dev,
3397                                "iq/oq config failed status: %x\n",
3398                                retval);
3399                        /* Soft instr is freed by driver in case of failure. */
3400                        goto setup_nic_dev_fail;
3401                }
3402
3403                /* Sleep on a wait queue till the cond flag indicates that the
3404                 * response arrived or timed-out.
3405                 */
3406                if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3407                        dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3408                        goto setup_nic_wait_intr;
3409                }
3410
3411                retval = resp->status;
3412                if (retval) {
3413                        dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3414                        goto setup_nic_dev_fail;
3415                }
3416
3417                /* Verify f/w version (in case of 'auto' loading from flash) */
3418                fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3419                if (memcmp(LIQUIDIO_BASE_VERSION,
3420                           fw_ver,
3421                           strlen(LIQUIDIO_BASE_VERSION))) {
3422                        dev_err(&octeon_dev->pci_dev->dev,
3423                                "Unmatched firmware version. Expected %s.x, got %s.\n",
3424                                LIQUIDIO_BASE_VERSION, fw_ver);
3425                        goto setup_nic_dev_fail;
3426                } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3427                           FW_IS_PRELOADED) {
3428                        dev_info(&octeon_dev->pci_dev->dev,
3429                                 "Using auto-loaded firmware version %s.\n",
3430                                 fw_ver);
3431                }
3432
3433                octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3434                                    (sizeof(struct liquidio_if_cfg_info)) >> 3);
3435
3436                num_iqueues = hweight64(resp->cfg_info.iqmask);
3437                num_oqueues = hweight64(resp->cfg_info.oqmask);
3438
3439                if (!(num_iqueues) || !(num_oqueues)) {
3440                        dev_err(&octeon_dev->pci_dev->dev,
3441                                "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3442                                resp->cfg_info.iqmask,
3443                                resp->cfg_info.oqmask);
3444                        goto setup_nic_dev_fail;
3445                }
3446
3447                if (OCTEON_CN6XXX(octeon_dev)) {
3448                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3449                                                                    cn6xxx));
3450                } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3451                        max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3452                                                                    cn23xx_pf));
3453                }
3454
3455                dev_dbg(&octeon_dev->pci_dev->dev,
3456                        "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3457                        i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3458                        num_iqueues, num_oqueues, max_num_queues);
3459                netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3460
3461                if (!netdev) {
3462                        dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3463                        goto setup_nic_dev_fail;
3464                }
3465
3466                SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3467
3468                /* Associate the routines that will handle different
3469                 * netdev tasks.
3470                 */
3471                netdev->netdev_ops = &lionetdevops;
3472                SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3473
3474                retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3475                if (retval) {
3476                        dev_err(&octeon_dev->pci_dev->dev,
3477                                "setting real number rx failed\n");
3478                        goto setup_nic_dev_fail;
3479                }
3480
3481                retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3482                if (retval) {
3483                        dev_err(&octeon_dev->pci_dev->dev,
3484                                "setting real number tx failed\n");
3485                        goto setup_nic_dev_fail;
3486                }
3487
3488                lio = GET_LIO(netdev);
3489
3490                memset(lio, 0, sizeof(struct lio));
3491
3492                lio->ifidx = ifidx_or_pfnum;
3493
3494                props = &octeon_dev->props[i];
3495                props->gmxport = resp->cfg_info.linfo.gmxport;
3496                props->netdev = netdev;
3497
3498                lio->linfo.num_rxpciq = num_oqueues;
3499                lio->linfo.num_txpciq = num_iqueues;
3500                for (j = 0; j < num_oqueues; j++) {
3501                        lio->linfo.rxpciq[j].u64 =
3502                                resp->cfg_info.linfo.rxpciq[j].u64;
3503                }
3504                for (j = 0; j < num_iqueues; j++) {
3505                        lio->linfo.txpciq[j].u64 =
3506                                resp->cfg_info.linfo.txpciq[j].u64;
3507                }
3508                lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3509                lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3510                lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3511
3512                lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3513
3514                if (OCTEON_CN23XX_PF(octeon_dev) ||
3515                    OCTEON_CN6XXX(octeon_dev)) {
3516                        lio->dev_capability = NETIF_F_HIGHDMA
3517                                              | NETIF_F_IP_CSUM
3518                                              | NETIF_F_IPV6_CSUM
3519                                              | NETIF_F_SG | NETIF_F_RXCSUM
3520                                              | NETIF_F_GRO
3521                                              | NETIF_F_TSO | NETIF_F_TSO6
3522                                              | NETIF_F_LRO;
3523                }
3524                netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3525
3526                /*  Copy of transmit encapsulation capabilities:
3527                 *  TSO, TSO6, Checksums for this device
3528                 */
3529                lio->enc_dev_capability = NETIF_F_IP_CSUM
3530                                          | NETIF_F_IPV6_CSUM
3531                                          | NETIF_F_GSO_UDP_TUNNEL
3532                                          | NETIF_F_HW_CSUM | NETIF_F_SG
3533                                          | NETIF_F_RXCSUM
3534                                          | NETIF_F_TSO | NETIF_F_TSO6
3535                                          | NETIF_F_LRO;
3536
3537                netdev->hw_enc_features = (lio->enc_dev_capability &
3538                                           ~NETIF_F_LRO);
3539
3540                lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3541
3542                netdev->vlan_features = lio->dev_capability;
3543                /* Add any unchangeable hw features */
3544                lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3545                                        NETIF_F_HW_VLAN_CTAG_RX |
3546                                        NETIF_F_HW_VLAN_CTAG_TX;
3547
3548                netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3549
3550                netdev->hw_features = lio->dev_capability;
3551                /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3552                netdev->hw_features = netdev->hw_features &
3553                        ~NETIF_F_HW_VLAN_CTAG_RX;
3554
3555                /* MTU range: 68 - 16000 */
3556                netdev->extended->min_mtu = LIO_MIN_MTU_SIZE;
3557                netdev->extended->max_mtu = LIO_MAX_MTU_SIZE;
3558
3559                /* Point to the  properties for octeon device to which this
3560                 * interface belongs.
3561                 */
3562                lio->oct_dev = octeon_dev;
3563                lio->octprops = props;
3564                lio->netdev = netdev;
3565
3566                dev_dbg(&octeon_dev->pci_dev->dev,
3567                        "if%d gmx: %d hw_addr: 0x%llx\n", i,
3568                        lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3569
3570                for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3571                        u8 vfmac[ETH_ALEN];
3572
3573                        random_ether_addr(&vfmac[0]);
3574                        if (__liquidio_set_vf_mac(netdev, j,
3575                                                  &vfmac[0], false)) {
3576                                dev_err(&octeon_dev->pci_dev->dev,
3577                                        "Error setting VF%d MAC address\n",
3578                                        j);
3579                                goto setup_nic_dev_fail;
3580                        }
3581                }
3582
3583                /* 64-bit swap required on LE machines */
3584                octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3585                for (j = 0; j < 6; j++)
3586                        mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3587
3588                /* Copy MAC Address to OS network device structure */
3589
3590                ether_addr_copy(netdev->dev_addr, mac);
3591
3592                /* By default all interfaces on a single Octeon uses the same
3593                 * tx and rx queues
3594                 */
3595                lio->txq = lio->linfo.txpciq[0].s.q_no;
3596                lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3597                if (liquidio_setup_io_queues(octeon_dev, i,
3598                                             lio->linfo.num_txpciq,
3599                                             lio->linfo.num_rxpciq)) {
3600                        dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3601                        goto setup_nic_dev_fail;
3602                }
3603
3604                ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3605
3606                lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3607                lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3608
3609                if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3610                        dev_err(&octeon_dev->pci_dev->dev,
3611                                "Gather list allocation failed\n");
3612                        goto setup_nic_dev_fail;
3613                }
3614
3615                /* Register ethtool support */
3616                liquidio_set_ethtool_ops(netdev);
3617                if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3618                        octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3619                else
3620                        octeon_dev->priv_flags = 0x0;
3621
3622                if (netdev->features & NETIF_F_LRO)
3623                        liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3624                                             OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3625
3626                liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3627                                     OCTNET_CMD_VLAN_FILTER_ENABLE);
3628
3629                if ((debug != -1) && (debug & NETIF_MSG_HW))
3630                        liquidio_set_feature(netdev,
3631                                             OCTNET_CMD_VERBOSE_ENABLE, 0);
3632
3633                if (setup_link_status_change_wq(netdev))
3634                        goto setup_nic_dev_fail;
3635
3636                if ((octeon_dev->fw_info.app_cap_flags &
3637                     LIQUIDIO_TIME_SYNC_CAP) &&
3638                    setup_sync_octeon_time_wq(netdev))
3639                        goto setup_nic_dev_fail;
3640
3641                if (setup_rx_oom_poll_fn(netdev))
3642                        goto setup_nic_dev_fail;
3643
3644                /* Register the network device with the OS */
3645                if (register_netdev(netdev)) {
3646                        dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3647                        goto setup_nic_dev_fail;
3648                }
3649
3650                dev_dbg(&octeon_dev->pci_dev->dev,
3651                        "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3652                        i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3653                netif_carrier_off(netdev);
3654                lio->link_changes++;
3655
3656                ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3657
3658                /* Sending command to firmware to enable Rx checksum offload
3659                 * by default at the time of setup of Liquidio driver for
3660                 * this device
3661                 */
3662                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3663                                            OCTNET_CMD_RXCSUM_ENABLE);
3664                liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3665                                     OCTNET_CMD_TXCSUM_ENABLE);
3666
3667                dev_dbg(&octeon_dev->pci_dev->dev,
3668                        "NIC ifidx:%d Setup successful\n", i);
3669
3670                octeon_free_soft_command(octeon_dev, sc);
3671
3672                if (octeon_dev->subsystem_id ==
3673                        OCTEON_CN2350_25GB_SUBSYS_ID ||
3674                    octeon_dev->subsystem_id ==
3675                        OCTEON_CN2360_25GB_SUBSYS_ID) {
3676                        liquidio_get_speed(lio);
3677
3678                        if (octeon_dev->speed_setting == 0) {
3679                                octeon_dev->speed_setting = 25;
3680                                octeon_dev->no_speed_setting = 1;
3681                        }
3682                } else {
3683                        octeon_dev->no_speed_setting = 1;
3684                        octeon_dev->speed_setting = 10;
3685                }
3686                octeon_dev->speed_boot = octeon_dev->speed_setting;
3687
3688        }
3689
3690        devlink = devlink_alloc(&liquidio_devlink_ops,
3691                                sizeof(struct lio_devlink_priv));
3692        if (!devlink) {
3693                dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3694                goto setup_nic_wait_intr;
3695        }
3696
3697        lio_devlink = devlink_priv(devlink);
3698        lio_devlink->oct = octeon_dev;
3699
3700        if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3701                devlink_free(devlink);
3702                dev_err(&octeon_dev->pci_dev->dev,
3703                        "devlink registration failed\n");
3704                goto setup_nic_wait_intr;
3705        }
3706
3707        octeon_dev->devlink = devlink;
3708        octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3709
3710        return 0;
3711
3712setup_nic_dev_fail:
3713
3714        octeon_free_soft_command(octeon_dev, sc);
3715
3716setup_nic_wait_intr:
3717
3718        while (i--) {
3719                dev_err(&octeon_dev->pci_dev->dev,
3720                        "NIC ifidx:%d Setup failed\n", i);
3721                liquidio_destroy_nic_device(octeon_dev, i);
3722        }
3723        return -ENODEV;
3724}
3725
3726#ifdef CONFIG_PCI_IOV
3727static int octeon_enable_sriov(struct octeon_device *oct)
3728{
3729        unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3730        struct pci_dev *vfdev;
3731        int err;
3732        u32 u;
3733
3734        if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3735                err = pci_enable_sriov(oct->pci_dev,
3736                                       oct->sriov_info.num_vfs_alloced);
3737                if (err) {
3738                        dev_err(&oct->pci_dev->dev,
3739                                "OCTEON: Failed to enable PCI sriov: %d\n",
3740                                err);
3741                        oct->sriov_info.num_vfs_alloced = 0;
3742                        return err;
3743                }
3744                oct->sriov_info.sriov_enabled = 1;
3745
3746                /* init lookup table that maps DPI ring number to VF pci_dev
3747                 * struct pointer
3748                 */
3749                u = 0;
3750                vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3751                                       OCTEON_CN23XX_VF_VID, NULL);
3752                while (vfdev) {
3753                        if (vfdev->is_virtfn &&
3754                            (vfdev->physfn == oct->pci_dev)) {
3755                                oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3756                                        vfdev;
3757                                u += oct->sriov_info.rings_per_vf;
3758                        }
3759                        vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3760                                               OCTEON_CN23XX_VF_VID, vfdev);
3761                }
3762        }
3763
3764        return num_vfs_alloced;
3765}
3766
3767static int lio_pci_sriov_disable(struct octeon_device *oct)
3768{
3769        int u;
3770
3771        if (pci_vfs_assigned(oct->pci_dev)) {
3772                dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3773                return -EPERM;
3774        }
3775
3776        pci_disable_sriov(oct->pci_dev);
3777
3778        u = 0;
3779        while (u < MAX_POSSIBLE_VFS) {
3780                oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3781                u += oct->sriov_info.rings_per_vf;
3782        }
3783
3784        oct->sriov_info.num_vfs_alloced = 0;
3785        dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3786                 oct->pf_num);
3787
3788        return 0;
3789}
3790
3791static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3792{
3793        struct octeon_device *oct = pci_get_drvdata(dev);
3794        int ret = 0;
3795
3796        if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3797            (oct->sriov_info.sriov_enabled)) {
3798                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3799                         oct->pf_num, num_vfs);
3800                return 0;
3801        }
3802
3803        if (!num_vfs) {
3804                lio_vf_rep_destroy(oct);
3805                ret = lio_pci_sriov_disable(oct);
3806        } else if (num_vfs > oct->sriov_info.max_vfs) {
3807                dev_err(&oct->pci_dev->dev,
3808                        "OCTEON: Max allowed VFs:%d user requested:%d",
3809                        oct->sriov_info.max_vfs, num_vfs);
3810                ret = -EPERM;
3811        } else {
3812                oct->sriov_info.num_vfs_alloced = num_vfs;
3813                ret = octeon_enable_sriov(oct);
3814                dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3815                         oct->pf_num, num_vfs);
3816                ret = lio_vf_rep_create(oct);
3817                if (ret)
3818                        dev_info(&oct->pci_dev->dev,
3819                                 "vf representor create failed");
3820        }
3821
3822        return ret;
3823}
3824#endif
3825
3826/**
3827 * \brief initialize the NIC
3828 * @param oct octeon device
3829 *
3830 * This initialization routine is called once the Octeon device application is
3831 * up and running
3832 */
3833static int liquidio_init_nic_module(struct octeon_device *oct)
3834{
3835        int i, retval = 0;
3836        int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3837
3838        dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3839
3840        /* only default iq and oq were initialized
3841         * initialize the rest as well
3842         */
3843        /* run port_config command for each port */
3844        oct->ifcount = num_nic_ports;
3845
3846        memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3847
3848        for (i = 0; i < MAX_OCTEON_LINKS; i++)
3849                oct->props[i].gmxport = -1;
3850
3851        retval = setup_nic_devices(oct);
3852        if (retval) {
3853                dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3854                goto octnet_init_failure;
3855        }
3856
3857        /* Call vf_rep_modinit if the firmware is switchdev capable
3858         * and do it from the first liquidio function probed.
3859         */
3860        if (!oct->octeon_id &&
3861            oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3862                retval = lio_vf_rep_modinit();
3863                if (retval) {
3864                        liquidio_stop_nic_module(oct);
3865                        goto octnet_init_failure;
3866                }
3867        }
3868
3869        liquidio_ptp_init(oct);
3870
3871        dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3872
3873        return retval;
3874
3875octnet_init_failure:
3876
3877        oct->ifcount = 0;
3878
3879        return retval;
3880}
3881
3882/**
3883 * \brief starter callback that invokes the remaining initialization work after
3884 * the NIC is up and running.
3885 * @param octptr  work struct work_struct
3886 */
3887static void nic_starter(struct work_struct *work)
3888{
3889        struct octeon_device *oct;
3890        struct cavium_wk *wk = (struct cavium_wk *)work;
3891
3892        oct = (struct octeon_device *)wk->ctxptr;
3893
3894        if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3895                return;
3896
3897        /* If the status of the device is CORE_OK, the core
3898         * application has reported its application type. Call
3899         * any registered handlers now and move to the RUNNING
3900         * state.
3901         */
3902        if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3903                schedule_delayed_work(&oct->nic_poll_work.work,
3904                                      LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3905                return;
3906        }
3907
3908        atomic_set(&oct->status, OCT_DEV_RUNNING);
3909
3910        if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3911                dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3912
3913                if (liquidio_init_nic_module(oct))
3914                        dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3915                else
3916                        handshake[oct->octeon_id].started_ok = 1;
3917        } else {
3918                dev_err(&oct->pci_dev->dev,
3919                        "Unexpected application running on NIC (%d). Check firmware.\n",
3920                        oct->app_mode);
3921        }
3922
3923        complete(&handshake[oct->octeon_id].started);
3924}
3925
3926static int
3927octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3928{
3929        struct octeon_device *oct = (struct octeon_device *)buf;
3930        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3931        int i, notice, vf_idx;
3932        bool cores_crashed;
3933        u64 *data, vf_num;
3934
3935        notice = recv_pkt->rh.r.ossp;
3936        data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3937
3938        /* the first 64-bit word of data is the vf_num */
3939        vf_num = data[0];
3940        octeon_swap_8B_data(&vf_num, 1);
3941        vf_idx = (int)vf_num - 1;
3942
3943        cores_crashed = READ_ONCE(oct->cores_crashed);
3944
3945        if (notice == VF_DRV_LOADED) {
3946                if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
3947                        oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
3948                        dev_info(&oct->pci_dev->dev,
3949                                 "driver for VF%d was loaded\n", vf_idx);
3950                        if (!cores_crashed)
3951                                try_module_get(THIS_MODULE);
3952                }
3953        } else if (notice == VF_DRV_REMOVED) {
3954                if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
3955                        oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
3956                        dev_info(&oct->pci_dev->dev,
3957                                 "driver for VF%d was removed\n", vf_idx);
3958                        if (!cores_crashed)
3959                                module_put(THIS_MODULE);
3960                }
3961        } else if (notice == VF_DRV_MACADDR_CHANGED) {
3962                u8 *b = (u8 *)&data[1];
3963
3964                oct->sriov_info.vf_macaddr[vf_idx] = data[1];
3965                dev_info(&oct->pci_dev->dev,
3966                         "VF driver changed VF%d's MAC address to %pM\n",
3967                         vf_idx, b + 2);
3968        }
3969
3970        for (i = 0; i < recv_pkt->buffer_count; i++)
3971                recv_buffer_free(recv_pkt->buffer_ptr[i]);
3972        octeon_free_recv_info(recv_info);
3973
3974        return 0;
3975}
3976
3977/**
3978 * \brief Device initialization for each Octeon device that is probed
3979 * @param octeon_dev  octeon device
3980 */
3981static int octeon_device_init(struct octeon_device *octeon_dev)
3982{
3983        int j, ret;
3984        char bootcmd[] = "\n";
3985        char *dbg_enb = NULL;
3986        enum lio_fw_state fw_state;
3987        struct octeon_device_priv *oct_priv =
3988                (struct octeon_device_priv *)octeon_dev->priv;
3989        atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3990
3991        /* Enable access to the octeon device and make its DMA capability
3992         * known to the OS.
3993         */
3994        if (octeon_pci_os_setup(octeon_dev))
3995                return 1;
3996
3997        atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
3998
3999        /* Identify the Octeon type and map the BAR address space. */
4000        if (octeon_chip_specific_setup(octeon_dev)) {
4001                dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4002                return 1;
4003        }
4004
4005        atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4006
4007        /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4008         * since that is what is required for the reference to be removed
4009         * during de-initialization (see 'octeon_destroy_resources').
4010         */
4011        octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4012                               PCI_SLOT(octeon_dev->pci_dev->devfn),
4013                               PCI_FUNC(octeon_dev->pci_dev->devfn),
4014                               true);
4015
4016        octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4017
4018        /* CN23XX supports preloaded firmware if the following is true:
4019         *
4020         * The adapter indicates that firmware is currently running AND
4021         * 'fw_type' is 'auto'.
4022         *
4023         * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4024         */
4025        if (OCTEON_CN23XX_PF(octeon_dev) &&
4026            cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4027                atomic_cmpxchg(octeon_dev->adapter_fw_state,
4028                               FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4029        }
4030
4031        /* If loading firmware, only first device of adapter needs to do so. */
4032        fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4033                                  FW_NEEDS_TO_BE_LOADED,
4034                                  FW_IS_BEING_LOADED);
4035
4036        /* Here, [local variable] 'fw_state' is set to one of:
4037         *
4038         *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4039         *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4040         *                          firmware to the adapter.
4041         *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4042         *                          firmware to the adapter.
4043         */
4044
4045        /* Prior to f/w load, perform a soft reset of the Octeon device;
4046         * if error resetting, return w/error.
4047         */
4048        if (fw_state == FW_NEEDS_TO_BE_LOADED)
4049                if (octeon_dev->fn_list.soft_reset(octeon_dev))
4050                        return 1;
4051
4052        /* Initialize the dispatch mechanism used to push packets arriving on
4053         * Octeon Output queues.
4054         */
4055        if (octeon_init_dispatch_list(octeon_dev))
4056                return 1;
4057
4058        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4059                                    OPCODE_NIC_CORE_DRV_ACTIVE,
4060                                    octeon_core_drv_init,
4061                                    octeon_dev);
4062
4063        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4064                                    OPCODE_NIC_VF_DRV_NOTICE,
4065                                    octeon_recv_vf_drv_notice, octeon_dev);
4066        INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4067        octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4068        schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4069                              LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4070
4071        atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4072
4073        if (octeon_set_io_queues_off(octeon_dev)) {
4074                dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4075                return 1;
4076        }
4077
4078        if (OCTEON_CN23XX_PF(octeon_dev)) {
4079                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4080                if (ret) {
4081                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4082                        return ret;
4083                }
4084        }
4085
4086        /* Initialize soft command buffer pool
4087         */
4088        if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4089                dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4090                return 1;
4091        }
4092        atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4093
4094        /*  Setup the data structures that manage this Octeon's Input queues. */
4095        if (octeon_setup_instr_queues(octeon_dev)) {
4096                dev_err(&octeon_dev->pci_dev->dev,
4097                        "instruction queue initialization failed\n");
4098                return 1;
4099        }
4100        atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4101
4102        /* Initialize lists to manage the requests of different types that
4103         * arrive from user & kernel applications for this octeon device.
4104         */
4105        if (octeon_setup_response_list(octeon_dev)) {
4106                dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4107                return 1;
4108        }
4109        atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4110
4111        if (octeon_setup_output_queues(octeon_dev)) {
4112                dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4113                return 1;
4114        }
4115
4116        atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4117
4118        if (OCTEON_CN23XX_PF(octeon_dev)) {
4119                if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4120                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4121                        return 1;
4122                }
4123                atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4124
4125                if (octeon_allocate_ioq_vector
4126                                (octeon_dev,
4127                                 octeon_dev->sriov_info.num_pf_rings)) {
4128                        dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4129                        return 1;
4130                }
4131                atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4132
4133        } else {
4134                /* The input and output queue registers were setup earlier (the
4135                 * queues were not enabled). Any additional registers
4136                 * that need to be programmed should be done now.
4137                 */
4138                ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4139                if (ret) {
4140                        dev_err(&octeon_dev->pci_dev->dev,
4141                                "Failed to configure device registers\n");
4142                        return ret;
4143                }
4144        }
4145
4146        /* Initialize the tasklet that handles output queue packet processing.*/
4147        dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4148        tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4149                     (unsigned long)octeon_dev);
4150
4151        /* Setup the interrupt handler and record the INT SUM register address
4152         */
4153        if (octeon_setup_interrupt(octeon_dev,
4154                                   octeon_dev->sriov_info.num_pf_rings))
4155                return 1;
4156
4157        /* Enable Octeon device interrupts */
4158        octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4159
4160        atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4161
4162        /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4163         * the output queue is enabled.
4164         * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4165         * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4166         * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4167         * before any credits have been issued, causing the ring to be reset
4168         * (and the f/w appear to never have started).
4169         */
4170        for (j = 0; j < octeon_dev->num_oqs; j++)
4171                writel(octeon_dev->droq[j]->max_count,
4172                       octeon_dev->droq[j]->pkts_credit_reg);
4173
4174        /* Enable the input and output queues for this Octeon device */
4175        ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4176        if (ret) {
4177                dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4178                return ret;
4179        }
4180
4181        atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4182
4183        if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4184                dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4185                if (!ddr_timeout) {
4186                        dev_info(&octeon_dev->pci_dev->dev,
4187                                 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4188                }
4189
4190                schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4191
4192                /* Wait for the octeon to initialize DDR after the soft-reset.*/
4193                while (!ddr_timeout) {
4194                        set_current_state(TASK_INTERRUPTIBLE);
4195                        if (schedule_timeout(HZ / 10)) {
4196                                /* user probably pressed Control-C */
4197                                return 1;
4198                        }
4199                }
4200                ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4201                if (ret) {
4202                        dev_err(&octeon_dev->pci_dev->dev,
4203                                "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4204                                ret);
4205                        return 1;
4206                }
4207
4208                if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4209                        dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4210                        return 1;
4211                }
4212
4213                /* Divert uboot to take commands from host instead. */
4214                ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4215
4216                dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4217                ret = octeon_init_consoles(octeon_dev);
4218                if (ret) {
4219                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4220                        return 1;
4221                }
4222                /* If console debug enabled, specify empty string to use default
4223                 * enablement ELSE specify NULL string for 'disabled'.
4224                 */
4225                dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4226                ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4227                if (ret) {
4228                        dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4229                        return 1;
4230                } else if (octeon_console_debug_enabled(0)) {
4231                        /* If console was added AND we're logging console output
4232                         * then set our console print function.
4233                         */
4234                        octeon_dev->console[0].print = octeon_dbg_console_print;
4235                }
4236
4237                atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4238
4239                dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4240                ret = load_firmware(octeon_dev);
4241                if (ret) {
4242                        dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4243                        return 1;
4244                }
4245
4246                atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4247        }
4248
4249        handshake[octeon_dev->octeon_id].init_ok = 1;
4250        complete(&handshake[octeon_dev->octeon_id].init);
4251
4252        atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4253
4254        return 0;
4255}
4256
4257/**
4258 * \brief Debug console print function
4259 * @param octeon_dev  octeon device
4260 * @param console_num console number
4261 * @param prefix      first portion of line to display
4262 * @param suffix      second portion of line to display
4263 *
4264 * The OCTEON debug console outputs entire lines (excluding '\n').
4265 * Normally, the line will be passed in the 'prefix' parameter.
4266 * However, due to buffering, it is possible for a line to be split into two
4267 * parts, in which case they will be passed as the 'prefix' parameter and
4268 * 'suffix' parameter.
4269 */
4270static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4271                                    char *prefix, char *suffix)
4272{
4273        if (prefix && suffix)
4274                dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4275                         suffix);
4276        else if (prefix)
4277                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4278        else if (suffix)
4279                dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4280
4281        return 0;
4282}
4283
4284/**
4285 * \brief Exits the module
4286 */
4287static void __exit liquidio_exit(void)
4288{
4289        liquidio_deinit_pci();
4290
4291        pr_info("LiquidIO network module is now unloaded\n");
4292}
4293
4294module_init(liquidio_init);
4295module_exit(liquidio_exit);
4296