linux/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/module.h>
  19#include <linux/interrupt.h>
  20#include <linux/pci.h>
  21#include <net/vxlan.h>
  22#include "liquidio_common.h"
  23#include "octeon_droq.h"
  24#include "octeon_iq.h"
  25#include "response_manager.h"
  26#include "octeon_device.h"
  27#include "octeon_nic.h"
  28#include "octeon_main.h"
  29#include "octeon_network.h"
  30#include "cn23xx_vf_device.h"
  31
  32MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  33MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
  34MODULE_LICENSE("GPL");
  35MODULE_VERSION(LIQUIDIO_VERSION);
  36
  37static int debug = -1;
  38module_param(debug, int, 0644);
  39MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  40
  41#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  42
  43struct liquidio_rx_ctl_context {
  44        int octeon_id;
  45
  46        wait_queue_head_t wc;
  47
  48        int cond;
  49};
  50
  51struct oct_timestamp_resp {
  52        u64 rh;
  53        u64 timestamp;
  54        u64 status;
  55};
  56
  57union tx_info {
  58        u64 u64;
  59        struct {
  60#ifdef __BIG_ENDIAN_BITFIELD
  61                u16 gso_size;
  62                u16 gso_segs;
  63                u32 reserved;
  64#else
  65                u32 reserved;
  66                u16 gso_segs;
  67                u16 gso_size;
  68#endif
  69        } s;
  70};
  71
  72#define OCTNIC_GSO_MAX_HEADER_SIZE 128
  73#define OCTNIC_GSO_MAX_SIZE \
  74                (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
  75
  76static int
  77liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  78static void liquidio_vf_remove(struct pci_dev *pdev);
  79static int octeon_device_init(struct octeon_device *oct);
  80static int liquidio_stop(struct net_device *netdev);
  81
  82static int lio_wait_for_oq_pkts(struct octeon_device *oct)
  83{
  84        struct octeon_device_priv *oct_priv =
  85            (struct octeon_device_priv *)oct->priv;
  86        int retry = MAX_IO_PENDING_PKT_COUNT;
  87        int pkt_cnt = 0, pending_pkts;
  88        int i;
  89
  90        do {
  91                pending_pkts = 0;
  92
  93                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
  94                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
  95                                continue;
  96                        pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
  97                }
  98                if (pkt_cnt > 0) {
  99                        pending_pkts += pkt_cnt;
 100                        tasklet_schedule(&oct_priv->droq_tasklet);
 101                }
 102                pkt_cnt = 0;
 103                schedule_timeout_uninterruptible(1);
 104
 105        } while (retry-- && pending_pkts);
 106
 107        return pkt_cnt;
 108}
 109
 110/**
 111 * \brief Cause device to go quiet so it can be safely removed/reset/etc
 112 * @param oct Pointer to Octeon device
 113 */
 114static void pcierror_quiesce_device(struct octeon_device *oct)
 115{
 116        int i;
 117
 118        /* Disable the input and output queues now. No more packets will
 119         * arrive from Octeon, but we should wait for all packet processing
 120         * to finish.
 121         */
 122
 123        /* To allow for in-flight requests */
 124        schedule_timeout_uninterruptible(100);
 125
 126        if (wait_for_pending_requests(oct))
 127                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 128
 129        /* Force all requests waiting to be fetched by OCTEON to complete. */
 130        for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 131                struct octeon_instr_queue *iq;
 132
 133                if (!(oct->io_qmask.iq & BIT_ULL(i)))
 134                        continue;
 135                iq = oct->instr_queue[i];
 136
 137                if (atomic_read(&iq->instr_pending)) {
 138                        spin_lock_bh(&iq->lock);
 139                        iq->fill_cnt = 0;
 140                        iq->octeon_read_index = iq->host_write_index;
 141                        iq->stats.instr_processed +=
 142                            atomic_read(&iq->instr_pending);
 143                        lio_process_iq_request_list(oct, iq, 0);
 144                        spin_unlock_bh(&iq->lock);
 145                }
 146        }
 147
 148        /* Force all pending ordered list requests to time out. */
 149        lio_process_ordered_list(oct, 1);
 150
 151        /* We do not need to wait for output queue packets to be processed. */
 152}
 153
 154/**
 155 * \brief Cleanup PCI AER uncorrectable error status
 156 * @param dev Pointer to PCI device
 157 */
 158static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 159{
 160        u32 status, mask;
 161        int pos = 0x100;
 162
 163        pr_info("%s :\n", __func__);
 164
 165        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
 166        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
 167        if (dev->error_state == pci_channel_io_normal)
 168                status &= ~mask; /* Clear corresponding nonfatal bits */
 169        else
 170                status &= mask; /* Clear corresponding fatal bits */
 171        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
 172}
 173
 174/**
 175 * \brief Stop all PCI IO to a given device
 176 * @param dev Pointer to Octeon device
 177 */
 178static void stop_pci_io(struct octeon_device *oct)
 179{
 180        struct msix_entry *msix_entries;
 181        int i;
 182
 183        /* No more instructions will be forwarded. */
 184        atomic_set(&oct->status, OCT_DEV_IN_RESET);
 185
 186        for (i = 0; i < oct->ifcount; i++)
 187                netif_device_detach(oct->props[i].netdev);
 188
 189        /* Disable interrupts  */
 190        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 191
 192        pcierror_quiesce_device(oct);
 193        if (oct->msix_on) {
 194                msix_entries = (struct msix_entry *)oct->msix_entries;
 195                for (i = 0; i < oct->num_msix_irqs; i++) {
 196                        /* clear the affinity_cpumask */
 197                        irq_set_affinity_hint(msix_entries[i].vector,
 198                                              NULL);
 199                        free_irq(msix_entries[i].vector,
 200                                 &oct->ioq_vector[i]);
 201                }
 202                pci_disable_msix(oct->pci_dev);
 203                kfree(oct->msix_entries);
 204                oct->msix_entries = NULL;
 205                octeon_free_ioq_vector(oct);
 206        }
 207        dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 208                lio_get_state_string(&oct->status));
 209
 210        /* making it a common function for all OCTEON models */
 211        cleanup_aer_uncorrect_error_status(oct->pci_dev);
 212
 213        pci_disable_device(oct->pci_dev);
 214}
 215
 216/**
 217 * \brief called when PCI error is detected
 218 * @param pdev Pointer to PCI device
 219 * @param state The current pci connection state
 220 *
 221 * This function is called after a PCI bus error affecting
 222 * this device has been detected.
 223 */
 224static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
 225                                                     pci_channel_state_t state)
 226{
 227        struct octeon_device *oct = pci_get_drvdata(pdev);
 228
 229        /* Non-correctable Non-fatal errors */
 230        if (state == pci_channel_io_normal) {
 231                dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
 232                cleanup_aer_uncorrect_error_status(oct->pci_dev);
 233                return PCI_ERS_RESULT_CAN_RECOVER;
 234        }
 235
 236        /* Non-correctable Fatal errors */
 237        dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
 238        stop_pci_io(oct);
 239
 240        return PCI_ERS_RESULT_DISCONNECT;
 241}
 242
 243/* For PCI-E Advanced Error Recovery (AER) Interface */
 244static const struct pci_error_handlers liquidio_vf_err_handler = {
 245        .error_detected = liquidio_pcie_error_detected,
 246};
 247
 248static const struct pci_device_id liquidio_vf_pci_tbl[] = {
 249        {
 250                PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
 251                PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
 252        },
 253        {
 254                0, 0, 0, 0, 0, 0, 0
 255        }
 256};
 257MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
 258
 259static struct pci_driver liquidio_vf_pci_driver = {
 260        .name           = "LiquidIO_VF",
 261        .id_table       = liquidio_vf_pci_tbl,
 262        .probe          = liquidio_vf_probe,
 263        .remove         = liquidio_vf_remove,
 264        .err_handler    = &liquidio_vf_err_handler,    /* For AER */
 265};
 266
 267/**
 268 * \brief Print link information
 269 * @param netdev network device
 270 */
 271static void print_link_info(struct net_device *netdev)
 272{
 273        struct lio *lio = GET_LIO(netdev);
 274
 275        if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
 276            ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
 277                struct oct_link_info *linfo = &lio->linfo;
 278
 279                if (linfo->link.s.link_up) {
 280                        netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
 281                                   linfo->link.s.speed,
 282                                   (linfo->link.s.duplex) ? "Full" : "Half");
 283                } else {
 284                        netif_info(lio, link, lio->netdev, "Link Down\n");
 285                }
 286        }
 287}
 288
 289/**
 290 * \brief Routine to notify MTU change
 291 * @param work work_struct data structure
 292 */
 293static void octnet_link_status_change(struct work_struct *work)
 294{
 295        struct cavium_wk *wk = (struct cavium_wk *)work;
 296        struct lio *lio = (struct lio *)wk->ctxptr;
 297
 298        /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
 299         * this API is invoked only when new max-MTU of the interface is
 300         * less than current MTU.
 301         */
 302        rtnl_lock();
 303        dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
 304        rtnl_unlock();
 305}
 306
 307/**
 308 * \brief Sets up the mtu status change work
 309 * @param netdev network device
 310 */
 311static int setup_link_status_change_wq(struct net_device *netdev)
 312{
 313        struct lio *lio = GET_LIO(netdev);
 314        struct octeon_device *oct = lio->oct_dev;
 315
 316        lio->link_status_wq.wq = alloc_workqueue("link-status",
 317                                                 WQ_MEM_RECLAIM, 0);
 318        if (!lio->link_status_wq.wq) {
 319                dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
 320                return -1;
 321        }
 322        INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
 323                          octnet_link_status_change);
 324        lio->link_status_wq.wk.ctxptr = lio;
 325
 326        return 0;
 327}
 328
 329static void cleanup_link_status_change_wq(struct net_device *netdev)
 330{
 331        struct lio *lio = GET_LIO(netdev);
 332
 333        if (lio->link_status_wq.wq) {
 334                cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
 335                destroy_workqueue(lio->link_status_wq.wq);
 336        }
 337}
 338
 339/**
 340 * \brief Update link status
 341 * @param netdev network device
 342 * @param ls link status structure
 343 *
 344 * Called on receipt of a link status response from the core application to
 345 * update each interface's link status.
 346 */
 347static void update_link_status(struct net_device *netdev,
 348                               union oct_link_status *ls)
 349{
 350        struct lio *lio = GET_LIO(netdev);
 351        int current_max_mtu = lio->linfo.link.s.mtu;
 352        struct octeon_device *oct = lio->oct_dev;
 353
 354        if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
 355                lio->linfo.link.u64 = ls->u64;
 356
 357                print_link_info(netdev);
 358                lio->link_changes++;
 359
 360                if (lio->linfo.link.s.link_up) {
 361                        netif_carrier_on(netdev);
 362                        wake_txqs(netdev);
 363                } else {
 364                        netif_carrier_off(netdev);
 365                        stop_txqs(netdev);
 366                }
 367
 368                if (lio->linfo.link.s.mtu != current_max_mtu) {
 369                        dev_info(&oct->pci_dev->dev,
 370                                 "Max MTU Changed from %d to %d\n",
 371                                 current_max_mtu, lio->linfo.link.s.mtu);
 372                        netdev->max_mtu = lio->linfo.link.s.mtu;
 373                }
 374
 375                if (lio->linfo.link.s.mtu < netdev->mtu) {
 376                        dev_warn(&oct->pci_dev->dev,
 377                                 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
 378                                 netdev->mtu, lio->linfo.link.s.mtu);
 379                        queue_delayed_work(lio->link_status_wq.wq,
 380                                           &lio->link_status_wq.wk.work, 0);
 381                }
 382        }
 383}
 384
 385/**
 386 * \brief PCI probe handler
 387 * @param pdev PCI device structure
 388 * @param ent unused
 389 */
 390static int
 391liquidio_vf_probe(struct pci_dev *pdev,
 392                  const struct pci_device_id *ent __attribute__((unused)))
 393{
 394        struct octeon_device *oct_dev = NULL;
 395
 396        oct_dev = octeon_allocate_device(pdev->device,
 397                                         sizeof(struct octeon_device_priv));
 398
 399        if (!oct_dev) {
 400                dev_err(&pdev->dev, "Unable to allocate device\n");
 401                return -ENOMEM;
 402        }
 403        oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
 404
 405        dev_info(&pdev->dev, "Initializing device %x:%x.\n",
 406                 (u32)pdev->vendor, (u32)pdev->device);
 407
 408        /* mark hardware as deprecated in RHEL8 */
 409        mark_hardware_deprecated(DRV_NAME);
 410
 411        /* Assign octeon_device for this device to the private data area. */
 412        pci_set_drvdata(pdev, oct_dev);
 413
 414        /* set linux specific device pointer */
 415        oct_dev->pci_dev = pdev;
 416
 417        oct_dev->subsystem_id = pdev->subsystem_vendor |
 418                (pdev->subsystem_device << 16);
 419
 420        if (octeon_device_init(oct_dev)) {
 421                liquidio_vf_remove(pdev);
 422                return -ENOMEM;
 423        }
 424
 425        dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
 426
 427        return 0;
 428}
 429
 430/**
 431 * \brief PCI FLR for each Octeon device.
 432 * @param oct octeon device
 433 */
 434static void octeon_pci_flr(struct octeon_device *oct)
 435{
 436        pci_save_state(oct->pci_dev);
 437
 438        pci_cfg_access_lock(oct->pci_dev);
 439
 440        /* Quiesce the device completely */
 441        pci_write_config_word(oct->pci_dev, PCI_COMMAND,
 442                              PCI_COMMAND_INTX_DISABLE);
 443
 444        pcie_flr(oct->pci_dev);
 445
 446        pci_cfg_access_unlock(oct->pci_dev);
 447
 448        pci_restore_state(oct->pci_dev);
 449}
 450
 451/**
 452 *\brief Destroy resources associated with octeon device
 453 * @param pdev PCI device structure
 454 * @param ent unused
 455 */
 456static void octeon_destroy_resources(struct octeon_device *oct)
 457{
 458        struct msix_entry *msix_entries;
 459        int i;
 460
 461        switch (atomic_read(&oct->status)) {
 462        case OCT_DEV_RUNNING:
 463        case OCT_DEV_CORE_OK:
 464                /* No more instructions will be forwarded. */
 465                atomic_set(&oct->status, OCT_DEV_IN_RESET);
 466
 467                oct->app_mode = CVM_DRV_INVALID_APP;
 468                dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
 469                        lio_get_state_string(&oct->status));
 470
 471                schedule_timeout_uninterruptible(HZ / 10);
 472
 473                /* fallthrough */
 474        case OCT_DEV_HOST_OK:
 475                /* fallthrough */
 476        case OCT_DEV_IO_QUEUES_DONE:
 477                if (wait_for_pending_requests(oct))
 478                        dev_err(&oct->pci_dev->dev, "There were pending requests\n");
 479
 480                if (lio_wait_for_instr_fetch(oct))
 481                        dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
 482
 483                /* Disable the input and output queues now. No more packets will
 484                 * arrive from Octeon, but we should wait for all packet
 485                 * processing to finish.
 486                 */
 487                oct->fn_list.disable_io_queues(oct);
 488
 489                if (lio_wait_for_oq_pkts(oct))
 490                        dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
 491                /* fall through */
 492        case OCT_DEV_INTR_SET_DONE:
 493                /* Disable interrupts  */
 494                oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 495
 496                if (oct->msix_on) {
 497                        msix_entries = (struct msix_entry *)oct->msix_entries;
 498                        for (i = 0; i < oct->num_msix_irqs; i++) {
 499                                if (oct->ioq_vector[i].vector) {
 500                                        irq_set_affinity_hint(
 501                                                        msix_entries[i].vector,
 502                                                        NULL);
 503                                        free_irq(msix_entries[i].vector,
 504                                                 &oct->ioq_vector[i]);
 505                                        oct->ioq_vector[i].vector = 0;
 506                                }
 507                        }
 508                        pci_disable_msix(oct->pci_dev);
 509                        kfree(oct->msix_entries);
 510                        oct->msix_entries = NULL;
 511                        kfree(oct->irq_name_storage);
 512                        oct->irq_name_storage = NULL;
 513                }
 514                /* Soft reset the octeon device before exiting */
 515                if (oct->pci_dev->reset_fn)
 516                        octeon_pci_flr(oct);
 517                else
 518                        cn23xx_vf_ask_pf_to_do_flr(oct);
 519
 520                /* fallthrough */
 521        case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
 522                octeon_free_ioq_vector(oct);
 523
 524                /* fallthrough */
 525        case OCT_DEV_MBOX_SETUP_DONE:
 526                oct->fn_list.free_mbox(oct);
 527
 528                /* fallthrough */
 529        case OCT_DEV_IN_RESET:
 530        case OCT_DEV_DROQ_INIT_DONE:
 531                mdelay(100);
 532                for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
 533                        if (!(oct->io_qmask.oq & BIT_ULL(i)))
 534                                continue;
 535                        octeon_delete_droq(oct, i);
 536                }
 537
 538                /* fallthrough */
 539        case OCT_DEV_RESP_LIST_INIT_DONE:
 540                octeon_delete_response_list(oct);
 541
 542                /* fallthrough */
 543        case OCT_DEV_INSTR_QUEUE_INIT_DONE:
 544                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 545                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
 546                                continue;
 547                        octeon_delete_instr_queue(oct, i);
 548                }
 549
 550                /* fallthrough */
 551        case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
 552                octeon_free_sc_buffer_pool(oct);
 553
 554                /* fallthrough */
 555        case OCT_DEV_DISPATCH_INIT_DONE:
 556                octeon_delete_dispatch_list(oct);
 557                cancel_delayed_work_sync(&oct->nic_poll_work.work);
 558
 559                /* fallthrough */
 560        case OCT_DEV_PCI_MAP_DONE:
 561                octeon_unmap_pci_barx(oct, 0);
 562                octeon_unmap_pci_barx(oct, 1);
 563
 564                /* fallthrough */
 565        case OCT_DEV_PCI_ENABLE_DONE:
 566                pci_clear_master(oct->pci_dev);
 567                /* Disable the device, releasing the PCI INT */
 568                pci_disable_device(oct->pci_dev);
 569
 570                /* fallthrough */
 571        case OCT_DEV_BEGIN_STATE:
 572                /* Nothing to be done here either */
 573                break;
 574        }
 575}
 576
 577/**
 578 * \brief Callback for rx ctrl
 579 * @param status status of request
 580 * @param buf pointer to resp structure
 581 */
 582static void rx_ctl_callback(struct octeon_device *oct,
 583                            u32 status, void *buf)
 584{
 585        struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
 586        struct liquidio_rx_ctl_context *ctx;
 587
 588        ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
 589
 590        oct = lio_get_device(ctx->octeon_id);
 591        if (status)
 592                dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
 593                        CVM_CAST64(status));
 594        WRITE_ONCE(ctx->cond, 1);
 595
 596        /* This barrier is required to be sure that the response has been
 597         * written fully before waking up the handler
 598         */
 599        wmb();
 600
 601        wake_up_interruptible(&ctx->wc);
 602}
 603
 604/**
 605 * \brief Send Rx control command
 606 * @param lio per-network private data
 607 * @param start_stop whether to start or stop
 608 */
 609static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
 610{
 611        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
 612        int ctx_size = sizeof(struct liquidio_rx_ctl_context);
 613        struct liquidio_rx_ctl_context *ctx;
 614        struct octeon_soft_command *sc;
 615        union octnet_cmd *ncmd;
 616        int retval;
 617
 618        if (oct->props[lio->ifidx].rx_on == start_stop)
 619                return;
 620
 621        sc = (struct octeon_soft_command *)
 622                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
 623                                          16, ctx_size);
 624
 625        ncmd = (union octnet_cmd *)sc->virtdptr;
 626        ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
 627
 628        WRITE_ONCE(ctx->cond, 0);
 629        ctx->octeon_id = lio_get_device_id(oct);
 630        init_waitqueue_head(&ctx->wc);
 631
 632        ncmd->u64 = 0;
 633        ncmd->s.cmd = OCTNET_CMD_RX_CTL;
 634        ncmd->s.param1 = start_stop;
 635
 636        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
 637
 638        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
 639
 640        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
 641                                    OPCODE_NIC_CMD, 0, 0, 0);
 642
 643        sc->callback = rx_ctl_callback;
 644        sc->callback_arg = sc;
 645        sc->wait_time = 5000;
 646
 647        retval = octeon_send_soft_command(oct, sc);
 648        if (retval == IQ_SEND_FAILED) {
 649                netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
 650        } else {
 651                /* Sleep on a wait queue till the cond flag indicates that the
 652                 * response arrived or timed-out.
 653                 */
 654                if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
 655                        return;
 656                oct->props[lio->ifidx].rx_on = start_stop;
 657        }
 658
 659        octeon_free_soft_command(oct, sc);
 660}
 661
 662/**
 663 * \brief Destroy NIC device interface
 664 * @param oct octeon device
 665 * @param ifidx which interface to destroy
 666 *
 667 * Cleanup associated with each interface for an Octeon device  when NIC
 668 * module is being unloaded or if initialization fails during load.
 669 */
 670static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
 671{
 672        struct net_device *netdev = oct->props[ifidx].netdev;
 673        struct napi_struct *napi, *n;
 674        struct lio *lio;
 675
 676        if (!netdev) {
 677                dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
 678                        __func__, ifidx);
 679                return;
 680        }
 681
 682        lio = GET_LIO(netdev);
 683
 684        dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
 685
 686        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
 687                liquidio_stop(netdev);
 688
 689        if (oct->props[lio->ifidx].napi_enabled == 1) {
 690                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 691                        napi_disable(napi);
 692
 693                oct->props[lio->ifidx].napi_enabled = 0;
 694
 695                oct->droq[0]->ops.poll_mode = 0;
 696        }
 697
 698        /* Delete NAPI */
 699        list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 700                netif_napi_del(napi);
 701
 702        if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
 703                unregister_netdev(netdev);
 704
 705        cleanup_rx_oom_poll_fn(netdev);
 706
 707        cleanup_link_status_change_wq(netdev);
 708
 709        lio_delete_glists(lio);
 710
 711        free_netdev(netdev);
 712
 713        oct->props[ifidx].gmxport = -1;
 714
 715        oct->props[ifidx].netdev = NULL;
 716}
 717
 718/**
 719 * \brief Stop complete NIC functionality
 720 * @param oct octeon device
 721 */
 722static int liquidio_stop_nic_module(struct octeon_device *oct)
 723{
 724        struct lio *lio;
 725        int i, j;
 726
 727        dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
 728        if (!oct->ifcount) {
 729                dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
 730                return 1;
 731        }
 732
 733        spin_lock_bh(&oct->cmd_resp_wqlock);
 734        oct->cmd_resp_state = OCT_DRV_OFFLINE;
 735        spin_unlock_bh(&oct->cmd_resp_wqlock);
 736
 737        for (i = 0; i < oct->ifcount; i++) {
 738                lio = GET_LIO(oct->props[i].netdev);
 739                for (j = 0; j < oct->num_oqs; j++)
 740                        octeon_unregister_droq_ops(oct,
 741                                                   lio->linfo.rxpciq[j].s.q_no);
 742        }
 743
 744        for (i = 0; i < oct->ifcount; i++)
 745                liquidio_destroy_nic_device(oct, i);
 746
 747        dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
 748        return 0;
 749}
 750
 751/**
 752 * \brief Cleans up resources at unload time
 753 * @param pdev PCI device structure
 754 */
 755static void liquidio_vf_remove(struct pci_dev *pdev)
 756{
 757        struct octeon_device *oct_dev = pci_get_drvdata(pdev);
 758
 759        dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
 760
 761        if (oct_dev->app_mode == CVM_DRV_NIC_APP)
 762                liquidio_stop_nic_module(oct_dev);
 763
 764        /* Reset the octeon device and cleanup all memory allocated for
 765         * the octeon device by driver.
 766         */
 767        octeon_destroy_resources(oct_dev);
 768
 769        dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
 770
 771        /* This octeon device has been removed. Update the global
 772         * data structure to reflect this. Free the device structure.
 773         */
 774        octeon_free_device_mem(oct_dev);
 775}
 776
 777/**
 778 * \brief PCI initialization for each Octeon device.
 779 * @param oct octeon device
 780 */
 781static int octeon_pci_os_setup(struct octeon_device *oct)
 782{
 783#ifdef CONFIG_PCI_IOV
 784        /* setup PCI stuff first */
 785        if (!oct->pci_dev->physfn)
 786                octeon_pci_flr(oct);
 787#endif
 788
 789        if (pci_enable_device(oct->pci_dev)) {
 790                dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
 791                return 1;
 792        }
 793
 794        if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
 795                dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
 796                pci_disable_device(oct->pci_dev);
 797                return 1;
 798        }
 799
 800        /* Enable PCI DMA Master. */
 801        pci_set_master(oct->pci_dev);
 802
 803        return 0;
 804}
 805
 806/**
 807 * \brief Unmap and free network buffer
 808 * @param buf buffer
 809 */
 810static void free_netbuf(void *buf)
 811{
 812        struct octnet_buf_free_info *finfo;
 813        struct sk_buff *skb;
 814        struct lio *lio;
 815
 816        finfo = (struct octnet_buf_free_info *)buf;
 817        skb = finfo->skb;
 818        lio = finfo->lio;
 819
 820        dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
 821                         DMA_TO_DEVICE);
 822
 823        tx_buffer_free(skb);
 824}
 825
 826/**
 827 * \brief Unmap and free gather buffer
 828 * @param buf buffer
 829 */
 830static void free_netsgbuf(void *buf)
 831{
 832        struct octnet_buf_free_info *finfo;
 833        struct octnic_gather *g;
 834        struct sk_buff *skb;
 835        int i, frags, iq;
 836        struct lio *lio;
 837
 838        finfo = (struct octnet_buf_free_info *)buf;
 839        skb = finfo->skb;
 840        lio = finfo->lio;
 841        g = finfo->g;
 842        frags = skb_shinfo(skb)->nr_frags;
 843
 844        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
 845                         g->sg[0].ptr[0], (skb->len - skb->data_len),
 846                         DMA_TO_DEVICE);
 847
 848        i = 1;
 849        while (frags--) {
 850                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
 851
 852                pci_unmap_page((lio->oct_dev)->pci_dev,
 853                               g->sg[(i >> 2)].ptr[(i & 3)],
 854                               frag->size, DMA_TO_DEVICE);
 855                i++;
 856        }
 857
 858        iq = skb_iq(lio->oct_dev, skb);
 859
 860        spin_lock(&lio->glist_lock[iq]);
 861        list_add_tail(&g->list, &lio->glist[iq]);
 862        spin_unlock(&lio->glist_lock[iq]);
 863
 864        tx_buffer_free(skb);
 865}
 866
 867/**
 868 * \brief Unmap and free gather buffer with response
 869 * @param buf buffer
 870 */
 871static void free_netsgbuf_with_resp(void *buf)
 872{
 873        struct octnet_buf_free_info *finfo;
 874        struct octeon_soft_command *sc;
 875        struct octnic_gather *g;
 876        struct sk_buff *skb;
 877        int i, frags, iq;
 878        struct lio *lio;
 879
 880        sc = (struct octeon_soft_command *)buf;
 881        skb = (struct sk_buff *)sc->callback_arg;
 882        finfo = (struct octnet_buf_free_info *)&skb->cb;
 883
 884        lio = finfo->lio;
 885        g = finfo->g;
 886        frags = skb_shinfo(skb)->nr_frags;
 887
 888        dma_unmap_single(&lio->oct_dev->pci_dev->dev,
 889                         g->sg[0].ptr[0], (skb->len - skb->data_len),
 890                         DMA_TO_DEVICE);
 891
 892        i = 1;
 893        while (frags--) {
 894                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
 895
 896                pci_unmap_page((lio->oct_dev)->pci_dev,
 897                               g->sg[(i >> 2)].ptr[(i & 3)],
 898                               frag->size, DMA_TO_DEVICE);
 899                i++;
 900        }
 901
 902        iq = skb_iq(lio->oct_dev, skb);
 903
 904        spin_lock(&lio->glist_lock[iq]);
 905        list_add_tail(&g->list, &lio->glist[iq]);
 906        spin_unlock(&lio->glist_lock[iq]);
 907
 908        /* Don't free the skb yet */
 909}
 910
 911/**
 912 * \brief Net device open for LiquidIO
 913 * @param netdev network device
 914 */
 915static int liquidio_open(struct net_device *netdev)
 916{
 917        struct lio *lio = GET_LIO(netdev);
 918        struct octeon_device *oct = lio->oct_dev;
 919        struct napi_struct *napi, *n;
 920
 921        if (!oct->props[lio->ifidx].napi_enabled) {
 922                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 923                        napi_enable(napi);
 924
 925                oct->props[lio->ifidx].napi_enabled = 1;
 926
 927                oct->droq[0]->ops.poll_mode = 1;
 928        }
 929
 930        ifstate_set(lio, LIO_IFSTATE_RUNNING);
 931
 932        /* Ready for link status updates */
 933        lio->intf_open = 1;
 934
 935        netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
 936        start_txqs(netdev);
 937
 938        /* tell Octeon to start forwarding packets to host */
 939        send_rx_ctrl_cmd(lio, 1);
 940
 941        dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
 942
 943        return 0;
 944}
 945
 946/**
 947 * \brief Net device stop for LiquidIO
 948 * @param netdev network device
 949 */
 950static int liquidio_stop(struct net_device *netdev)
 951{
 952        struct lio *lio = GET_LIO(netdev);
 953        struct octeon_device *oct = lio->oct_dev;
 954        struct napi_struct *napi, *n;
 955
 956        /* tell Octeon to stop forwarding packets to host */
 957        send_rx_ctrl_cmd(lio, 0);
 958
 959        netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
 960        /* Inform that netif carrier is down */
 961        lio->intf_open = 0;
 962        lio->linfo.link.s.link_up = 0;
 963
 964        netif_carrier_off(netdev);
 965        lio->link_changes++;
 966
 967        ifstate_reset(lio, LIO_IFSTATE_RUNNING);
 968
 969        stop_txqs(netdev);
 970
 971        /* Wait for any pending Rx descriptors */
 972        if (lio_wait_for_clean_oq(oct))
 973                netif_info(lio, rx_err, lio->netdev,
 974                           "Proceeding with stop interface after partial RX desc processing\n");
 975
 976        if (oct->props[lio->ifidx].napi_enabled == 1) {
 977                list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 978                        napi_disable(napi);
 979
 980                oct->props[lio->ifidx].napi_enabled = 0;
 981
 982                oct->droq[0]->ops.poll_mode = 0;
 983        }
 984
 985        dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
 986
 987        return 0;
 988}
 989
 990/**
 991 * \brief Converts a mask based on net device flags
 992 * @param netdev network device
 993 *
 994 * This routine generates a octnet_ifflags mask from the net device flags
 995 * received from the OS.
 996 */
 997static enum octnet_ifflags get_new_flags(struct net_device *netdev)
 998{
 999        enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1000
1001        if (netdev->flags & IFF_PROMISC)
1002                f |= OCTNET_IFFLAG_PROMISC;
1003
1004        if (netdev->flags & IFF_ALLMULTI)
1005                f |= OCTNET_IFFLAG_ALLMULTI;
1006
1007        if (netdev->flags & IFF_MULTICAST) {
1008                f |= OCTNET_IFFLAG_MULTICAST;
1009
1010                /* Accept all multicast addresses if there are more than we
1011                 * can handle
1012                 */
1013                if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1014                        f |= OCTNET_IFFLAG_ALLMULTI;
1015        }
1016
1017        if (netdev->flags & IFF_BROADCAST)
1018                f |= OCTNET_IFFLAG_BROADCAST;
1019
1020        return f;
1021}
1022
1023static void liquidio_set_uc_list(struct net_device *netdev)
1024{
1025        struct lio *lio = GET_LIO(netdev);
1026        struct octeon_device *oct = lio->oct_dev;
1027        struct octnic_ctrl_pkt nctrl;
1028        struct netdev_hw_addr *ha;
1029        u64 *mac;
1030
1031        if (lio->netdev_uc_count == netdev_uc_count(netdev))
1032                return;
1033
1034        if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1035                dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1036                return;
1037        }
1038
1039        lio->netdev_uc_count = netdev_uc_count(netdev);
1040
1041        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1042        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1043        nctrl.ncmd.s.more = lio->netdev_uc_count;
1044        nctrl.ncmd.s.param1 = oct->vf_num;
1045        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1046        nctrl.netpndev = (u64)netdev;
1047        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1048
1049        /* copy all the addresses into the udd */
1050        mac = &nctrl.udd[0];
1051        netdev_for_each_uc_addr(ha, netdev) {
1052                ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1053                mac++;
1054        }
1055
1056        octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1057}
1058
1059/**
1060 * \brief Net device set_multicast_list
1061 * @param netdev network device
1062 */
1063static void liquidio_set_mcast_list(struct net_device *netdev)
1064{
1065        int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1066        struct lio *lio = GET_LIO(netdev);
1067        struct octeon_device *oct = lio->oct_dev;
1068        struct octnic_ctrl_pkt nctrl;
1069        struct netdev_hw_addr *ha;
1070        u64 *mc;
1071        int ret;
1072
1073        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1074
1075        /* Create a ctrl pkt command to be sent to core app. */
1076        nctrl.ncmd.u64 = 0;
1077        nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1078        nctrl.ncmd.s.param1 = get_new_flags(netdev);
1079        nctrl.ncmd.s.param2 = mc_count;
1080        nctrl.ncmd.s.more = mc_count;
1081        nctrl.netpndev = (u64)netdev;
1082        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1083
1084        /* copy all the addresses into the udd */
1085        mc = &nctrl.udd[0];
1086        netdev_for_each_mc_addr(ha, netdev) {
1087                *mc = 0;
1088                ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1089                /* no need to swap bytes */
1090                if (++mc > &nctrl.udd[mc_count])
1091                        break;
1092        }
1093
1094        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1095
1096        /* Apparently, any activity in this call from the kernel has to
1097         * be atomic. So we won't wait for response.
1098         */
1099        nctrl.wait_time = 0;
1100
1101        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1102        if (ret < 0) {
1103                dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1104                        ret);
1105        }
1106
1107        liquidio_set_uc_list(netdev);
1108}
1109
1110/**
1111 * \brief Net device set_mac_address
1112 * @param netdev network device
1113 */
1114static int liquidio_set_mac(struct net_device *netdev, void *p)
1115{
1116        struct sockaddr *addr = (struct sockaddr *)p;
1117        struct lio *lio = GET_LIO(netdev);
1118        struct octeon_device *oct = lio->oct_dev;
1119        struct octnic_ctrl_pkt nctrl;
1120        int ret = 0;
1121
1122        if (!is_valid_ether_addr(addr->sa_data))
1123                return -EADDRNOTAVAIL;
1124
1125        if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1126                return 0;
1127
1128        if (lio->linfo.macaddr_is_admin_asgnd)
1129                return -EPERM;
1130
1131        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1132
1133        nctrl.ncmd.u64 = 0;
1134        nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1135        nctrl.ncmd.s.param1 = 0;
1136        nctrl.ncmd.s.more = 1;
1137        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1138        nctrl.netpndev = (u64)netdev;
1139        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1140        nctrl.wait_time = 100;
1141
1142        nctrl.udd[0] = 0;
1143        /* The MAC Address is presented in network byte order. */
1144        ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1145
1146        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1147        if (ret < 0) {
1148                dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1149                return -ENOMEM;
1150        }
1151        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1152        ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1153
1154        return 0;
1155}
1156
1157static void
1158liquidio_get_stats64(struct net_device *netdev,
1159                     struct rtnl_link_stats64 *lstats)
1160{
1161        struct lio *lio = GET_LIO(netdev);
1162        struct octeon_device *oct;
1163        u64 pkts = 0, drop = 0, bytes = 0;
1164        struct oct_droq_stats *oq_stats;
1165        struct oct_iq_stats *iq_stats;
1166        int i, iq_no, oq_no;
1167
1168        oct = lio->oct_dev;
1169
1170        if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1171                return;
1172
1173        for (i = 0; i < oct->num_iqs; i++) {
1174                iq_no = lio->linfo.txpciq[i].s.q_no;
1175                iq_stats = &oct->instr_queue[iq_no]->stats;
1176                pkts += iq_stats->tx_done;
1177                drop += iq_stats->tx_dropped;
1178                bytes += iq_stats->tx_tot_bytes;
1179        }
1180
1181        lstats->tx_packets = pkts;
1182        lstats->tx_bytes = bytes;
1183        lstats->tx_dropped = drop;
1184
1185        pkts = 0;
1186        drop = 0;
1187        bytes = 0;
1188
1189        for (i = 0; i < oct->num_oqs; i++) {
1190                oq_no = lio->linfo.rxpciq[i].s.q_no;
1191                oq_stats = &oct->droq[oq_no]->stats;
1192                pkts += oq_stats->rx_pkts_received;
1193                drop += (oq_stats->rx_dropped +
1194                         oq_stats->dropped_nodispatch +
1195                         oq_stats->dropped_toomany +
1196                         oq_stats->dropped_nomem);
1197                bytes += oq_stats->rx_bytes_received;
1198        }
1199
1200        lstats->rx_bytes = bytes;
1201        lstats->rx_packets = pkts;
1202        lstats->rx_dropped = drop;
1203
1204        octnet_get_link_stats(netdev);
1205        lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1206
1207        /* detailed rx_errors: */
1208        lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1209        /* recved pkt with crc error */
1210        lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1211        /* recv'd frame alignment error */
1212        lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1213
1214        lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1215                            lstats->rx_frame_errors;
1216
1217        /* detailed tx_errors */
1218        lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1219        lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1220
1221        lstats->tx_errors = lstats->tx_aborted_errors +
1222                lstats->tx_carrier_errors;
1223}
1224
1225/**
1226 * \brief Handler for SIOCSHWTSTAMP ioctl
1227 * @param netdev network device
1228 * @param ifr interface request
1229 * @param cmd command
1230 */
1231static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1232{
1233        struct lio *lio = GET_LIO(netdev);
1234        struct hwtstamp_config conf;
1235
1236        if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1237                return -EFAULT;
1238
1239        if (conf.flags)
1240                return -EINVAL;
1241
1242        switch (conf.tx_type) {
1243        case HWTSTAMP_TX_ON:
1244        case HWTSTAMP_TX_OFF:
1245                break;
1246        default:
1247                return -ERANGE;
1248        }
1249
1250        switch (conf.rx_filter) {
1251        case HWTSTAMP_FILTER_NONE:
1252                break;
1253        case HWTSTAMP_FILTER_ALL:
1254        case HWTSTAMP_FILTER_SOME:
1255        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1256        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1257        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1258        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1259        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1260        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1261        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1262        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1263        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1264        case HWTSTAMP_FILTER_PTP_V2_EVENT:
1265        case HWTSTAMP_FILTER_PTP_V2_SYNC:
1266        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1267        case HWTSTAMP_FILTER_NTP_ALL:
1268                conf.rx_filter = HWTSTAMP_FILTER_ALL;
1269                break;
1270        default:
1271                return -ERANGE;
1272        }
1273
1274        if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1275                ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1276
1277        else
1278                ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1279
1280        return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1281}
1282
1283/**
1284 * \brief ioctl handler
1285 * @param netdev network device
1286 * @param ifr interface request
1287 * @param cmd command
1288 */
1289static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1290{
1291        switch (cmd) {
1292        case SIOCSHWTSTAMP:
1293                return hwtstamp_ioctl(netdev, ifr);
1294        default:
1295                return -EOPNOTSUPP;
1296        }
1297}
1298
1299static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1300{
1301        struct sk_buff *skb = (struct sk_buff *)buf;
1302        struct octnet_buf_free_info *finfo;
1303        struct oct_timestamp_resp *resp;
1304        struct octeon_soft_command *sc;
1305        struct lio *lio;
1306
1307        finfo = (struct octnet_buf_free_info *)skb->cb;
1308        lio = finfo->lio;
1309        sc = finfo->sc;
1310        oct = lio->oct_dev;
1311        resp = (struct oct_timestamp_resp *)sc->virtrptr;
1312
1313        if (status != OCTEON_REQUEST_DONE) {
1314                dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1315                        CVM_CAST64(status));
1316                resp->timestamp = 0;
1317        }
1318
1319        octeon_swap_8B_data(&resp->timestamp, 1);
1320
1321        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1322                struct skb_shared_hwtstamps ts;
1323                u64 ns = resp->timestamp;
1324
1325                netif_info(lio, tx_done, lio->netdev,
1326                           "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1327                           skb, (unsigned long long)ns);
1328                ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1329                skb_tstamp_tx(skb, &ts);
1330        }
1331
1332        octeon_free_soft_command(oct, sc);
1333        tx_buffer_free(skb);
1334}
1335
1336/* \brief Send a data packet that will be timestamped
1337 * @param oct octeon device
1338 * @param ndata pointer to network data
1339 * @param finfo pointer to private network data
1340 */
1341static int send_nic_timestamp_pkt(struct octeon_device *oct,
1342                                  struct octnic_data_pkt *ndata,
1343                                  struct octnet_buf_free_info *finfo,
1344                                  int xmit_more)
1345{
1346        struct octeon_soft_command *sc;
1347        int ring_doorbell;
1348        struct lio *lio;
1349        int retval;
1350        u32 len;
1351
1352        lio = finfo->lio;
1353
1354        sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1355                                            sizeof(struct oct_timestamp_resp));
1356        finfo->sc = sc;
1357
1358        if (!sc) {
1359                dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1360                return IQ_SEND_FAILED;
1361        }
1362
1363        if (ndata->reqtype == REQTYPE_NORESP_NET)
1364                ndata->reqtype = REQTYPE_RESP_NET;
1365        else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1366                ndata->reqtype = REQTYPE_RESP_NET_SG;
1367
1368        sc->callback = handle_timestamp;
1369        sc->callback_arg = finfo->skb;
1370        sc->iq_no = ndata->q_no;
1371
1372        len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1373
1374        ring_doorbell = !xmit_more;
1375
1376        retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1377                                     sc, len, ndata->reqtype);
1378
1379        if (retval == IQ_SEND_FAILED) {
1380                dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1381                        retval);
1382                octeon_free_soft_command(oct, sc);
1383        } else {
1384                netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1385        }
1386
1387        return retval;
1388}
1389
1390/** \brief Transmit networks packets to the Octeon interface
1391 * @param skbuff   skbuff struct to be passed to network layer.
1392 * @param netdev   pointer to network device
1393 * @returns whether the packet was transmitted to the device okay or not
1394 *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
1395 */
1396static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1397{
1398        struct octnet_buf_free_info *finfo;
1399        union octnic_cmd_setup cmdsetup;
1400        struct octnic_data_pkt ndata;
1401        struct octeon_instr_irh *irh;
1402        struct oct_iq_stats *stats;
1403        struct octeon_device *oct;
1404        int q_idx = 0, iq_no = 0;
1405        union tx_info *tx_info;
1406        int xmit_more = 0;
1407        struct lio *lio;
1408        int status = 0;
1409        u64 dptr = 0;
1410        u32 tag = 0;
1411        int j;
1412
1413        lio = GET_LIO(netdev);
1414        oct = lio->oct_dev;
1415
1416        q_idx = skb_iq(lio->oct_dev, skb);
1417        tag = q_idx;
1418        iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1419
1420        stats = &oct->instr_queue[iq_no]->stats;
1421
1422        /* Check for all conditions in which the current packet cannot be
1423         * transmitted.
1424         */
1425        if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1426            (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1427                netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1428                           lio->linfo.link.s.link_up);
1429                goto lio_xmit_failed;
1430        }
1431
1432        /* Use space in skb->cb to store info used to unmap and
1433         * free the buffers.
1434         */
1435        finfo = (struct octnet_buf_free_info *)skb->cb;
1436        finfo->lio = lio;
1437        finfo->skb = skb;
1438        finfo->sc = NULL;
1439
1440        /* Prepare the attributes for the data to be passed to OSI. */
1441        memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1442
1443        ndata.buf = finfo;
1444
1445        ndata.q_no = iq_no;
1446
1447        if (octnet_iq_is_full(oct, ndata.q_no)) {
1448                /* defer sending if queue is full */
1449                netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1450                           ndata.q_no);
1451                stats->tx_iq_busy++;
1452                return NETDEV_TX_BUSY;
1453        }
1454
1455        ndata.datasize = skb->len;
1456
1457        cmdsetup.u64 = 0;
1458        cmdsetup.s.iq_no = iq_no;
1459
1460        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1461                if (skb->encapsulation) {
1462                        cmdsetup.s.tnl_csum = 1;
1463                        stats->tx_vxlan++;
1464                } else {
1465                        cmdsetup.s.transport_csum = 1;
1466                }
1467        }
1468        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1469                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1470                cmdsetup.s.timestamp = 1;
1471        }
1472
1473        if (!skb_shinfo(skb)->nr_frags) {
1474                cmdsetup.s.u.datasize = skb->len;
1475                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1476                /* Offload checksum calculation for TCP/UDP packets */
1477                dptr = dma_map_single(&oct->pci_dev->dev,
1478                                      skb->data,
1479                                      skb->len,
1480                                      DMA_TO_DEVICE);
1481                if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1482                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1483                                __func__);
1484                        return NETDEV_TX_BUSY;
1485                }
1486
1487                ndata.cmd.cmd3.dptr = dptr;
1488                finfo->dptr = dptr;
1489                ndata.reqtype = REQTYPE_NORESP_NET;
1490
1491        } else {
1492                struct skb_frag_struct *frag;
1493                struct octnic_gather *g;
1494                int i, frags;
1495
1496                spin_lock(&lio->glist_lock[q_idx]);
1497                g = (struct octnic_gather *)
1498                        lio_list_delete_head(&lio->glist[q_idx]);
1499                spin_unlock(&lio->glist_lock[q_idx]);
1500
1501                if (!g) {
1502                        netif_info(lio, tx_err, lio->netdev,
1503                                   "Transmit scatter gather: glist null!\n");
1504                        goto lio_xmit_failed;
1505                }
1506
1507                cmdsetup.s.gather = 1;
1508                cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1509                octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1510
1511                memset(g->sg, 0, g->sg_size);
1512
1513                g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1514                                                 skb->data,
1515                                                 (skb->len - skb->data_len),
1516                                                 DMA_TO_DEVICE);
1517                if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1518                        dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1519                                __func__);
1520                        return NETDEV_TX_BUSY;
1521                }
1522                add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1523
1524                frags = skb_shinfo(skb)->nr_frags;
1525                i = 1;
1526                while (frags--) {
1527                        frag = &skb_shinfo(skb)->frags[i - 1];
1528
1529                        g->sg[(i >> 2)].ptr[(i & 3)] =
1530                                dma_map_page(&oct->pci_dev->dev,
1531                                             frag->page.p,
1532                                             frag->page_offset,
1533                                             frag->size,
1534                                             DMA_TO_DEVICE);
1535                        if (dma_mapping_error(&oct->pci_dev->dev,
1536                                              g->sg[i >> 2].ptr[i & 3])) {
1537                                dma_unmap_single(&oct->pci_dev->dev,
1538                                                 g->sg[0].ptr[0],
1539                                                 skb->len - skb->data_len,
1540                                                 DMA_TO_DEVICE);
1541                                for (j = 1; j < i; j++) {
1542                                        frag = &skb_shinfo(skb)->frags[j - 1];
1543                                        dma_unmap_page(&oct->pci_dev->dev,
1544                                                       g->sg[j >> 2].ptr[j & 3],
1545                                                       frag->size,
1546                                                       DMA_TO_DEVICE);
1547                                }
1548                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1549                                        __func__);
1550                                return NETDEV_TX_BUSY;
1551                        }
1552
1553                        add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
1554                        i++;
1555                }
1556
1557                dptr = g->sg_dma_ptr;
1558
1559                ndata.cmd.cmd3.dptr = dptr;
1560                finfo->dptr = dptr;
1561                finfo->g = g;
1562
1563                ndata.reqtype = REQTYPE_NORESP_NET_SG;
1564        }
1565
1566        irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1567        tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1568
1569        if (skb_shinfo(skb)->gso_size) {
1570                tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1571                tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1572        }
1573
1574        /* HW insert VLAN tag */
1575        if (skb_vlan_tag_present(skb)) {
1576                irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1577                irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1578        }
1579
1580        xmit_more = netdev_xmit_more();
1581
1582        if (unlikely(cmdsetup.s.timestamp))
1583                status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1584        else
1585                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1586        if (status == IQ_SEND_FAILED)
1587                goto lio_xmit_failed;
1588
1589        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1590
1591        if (status == IQ_SEND_STOP) {
1592                dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1593                        iq_no);
1594                netif_stop_subqueue(netdev, q_idx);
1595        }
1596
1597        netif_trans_update(netdev);
1598
1599        if (tx_info->s.gso_segs)
1600                stats->tx_done += tx_info->s.gso_segs;
1601        else
1602                stats->tx_done++;
1603        stats->tx_tot_bytes += ndata.datasize;
1604
1605        return NETDEV_TX_OK;
1606
1607lio_xmit_failed:
1608        stats->tx_dropped++;
1609        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1610                   iq_no, stats->tx_dropped);
1611        if (dptr)
1612                dma_unmap_single(&oct->pci_dev->dev, dptr,
1613                                 ndata.datasize, DMA_TO_DEVICE);
1614
1615        octeon_ring_doorbell_locked(oct, iq_no);
1616
1617        tx_buffer_free(skb);
1618        return NETDEV_TX_OK;
1619}
1620
1621/** \brief Network device Tx timeout
1622 * @param netdev    pointer to network device
1623 */
1624static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1625{
1626        struct lio *lio;
1627
1628        lio = GET_LIO(netdev);
1629
1630        netif_info(lio, tx_err, lio->netdev,
1631                   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1632                   netdev->stats.tx_dropped);
1633        netif_trans_update(netdev);
1634        wake_txqs(netdev);
1635}
1636
1637static int
1638liquidio_vlan_rx_add_vid(struct net_device *netdev,
1639                         __be16 proto __attribute__((unused)), u16 vid)
1640{
1641        struct lio *lio = GET_LIO(netdev);
1642        struct octeon_device *oct = lio->oct_dev;
1643        struct octnic_ctrl_pkt nctrl;
1644        struct completion compl;
1645        u16 response_code;
1646        int ret = 0;
1647
1648        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1649
1650        nctrl.ncmd.u64 = 0;
1651        nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1652        nctrl.ncmd.s.param1 = vid;
1653        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1654        nctrl.wait_time = 100;
1655        nctrl.netpndev = (u64)netdev;
1656        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1657        init_completion(&compl);
1658        nctrl.completion = &compl;
1659        nctrl.response_code = &response_code;
1660
1661        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1662        if (ret < 0) {
1663                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1664                        ret);
1665                return -EIO;
1666        }
1667
1668        if (!wait_for_completion_timeout(&compl,
1669                                         msecs_to_jiffies(nctrl.wait_time)))
1670                return -EPERM;
1671
1672        if (READ_ONCE(response_code))
1673                return -EPERM;
1674
1675        return 0;
1676}
1677
1678static int
1679liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1680                          __be16 proto __attribute__((unused)), u16 vid)
1681{
1682        struct lio *lio = GET_LIO(netdev);
1683        struct octeon_device *oct = lio->oct_dev;
1684        struct octnic_ctrl_pkt nctrl;
1685        int ret = 0;
1686
1687        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1688
1689        nctrl.ncmd.u64 = 0;
1690        nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1691        nctrl.ncmd.s.param1 = vid;
1692        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1693        nctrl.wait_time = 100;
1694        nctrl.netpndev = (u64)netdev;
1695        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1696
1697        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1698        if (ret < 0) {
1699                dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1700                        ret);
1701        }
1702        return ret;
1703}
1704
1705/** Sending command to enable/disable RX checksum offload
1706 * @param netdev                pointer to network device
1707 * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
1708 * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
1709 *                              OCTNET_CMD_RXCSUM_DISABLE
1710 * @returns                     SUCCESS or FAILURE
1711 */
1712static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1713                                       u8 rx_cmd)
1714{
1715        struct lio *lio = GET_LIO(netdev);
1716        struct octeon_device *oct = lio->oct_dev;
1717        struct octnic_ctrl_pkt nctrl;
1718        int ret = 0;
1719
1720        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1721
1722        nctrl.ncmd.u64 = 0;
1723        nctrl.ncmd.s.cmd = command;
1724        nctrl.ncmd.s.param1 = rx_cmd;
1725        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1726        nctrl.wait_time = 100;
1727        nctrl.netpndev = (u64)netdev;
1728        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1729
1730        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1731        if (ret < 0) {
1732                dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1733                        ret);
1734        }
1735        return ret;
1736}
1737
1738/** Sending command to add/delete VxLAN UDP port to firmware
1739 * @param netdev                pointer to network device
1740 * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
1741 * @param vxlan_port            VxLAN port to be added or deleted
1742 * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
1743 *                              OCTNET_CMD_VXLAN_PORT_DEL
1744 * @returns                     SUCCESS or FAILURE
1745 */
1746static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1747                                       u16 vxlan_port, u8 vxlan_cmd_bit)
1748{
1749        struct lio *lio = GET_LIO(netdev);
1750        struct octeon_device *oct = lio->oct_dev;
1751        struct octnic_ctrl_pkt nctrl;
1752        int ret = 0;
1753
1754        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1755
1756        nctrl.ncmd.u64 = 0;
1757        nctrl.ncmd.s.cmd = command;
1758        nctrl.ncmd.s.more = vxlan_cmd_bit;
1759        nctrl.ncmd.s.param1 = vxlan_port;
1760        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1761        nctrl.wait_time = 100;
1762        nctrl.netpndev = (u64)netdev;
1763        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1764
1765        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1766        if (ret < 0) {
1767                dev_err(&oct->pci_dev->dev,
1768                        "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1769                        ret);
1770        }
1771        return ret;
1772}
1773
1774/** \brief Net device fix features
1775 * @param netdev  pointer to network device
1776 * @param request features requested
1777 * @returns updated features list
1778 */
1779static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1780                                               netdev_features_t request)
1781{
1782        struct lio *lio = netdev_priv(netdev);
1783
1784        if ((request & NETIF_F_RXCSUM) &&
1785            !(lio->dev_capability & NETIF_F_RXCSUM))
1786                request &= ~NETIF_F_RXCSUM;
1787
1788        if ((request & NETIF_F_HW_CSUM) &&
1789            !(lio->dev_capability & NETIF_F_HW_CSUM))
1790                request &= ~NETIF_F_HW_CSUM;
1791
1792        if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1793                request &= ~NETIF_F_TSO;
1794
1795        if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1796                request &= ~NETIF_F_TSO6;
1797
1798        if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1799                request &= ~NETIF_F_LRO;
1800
1801        /* Disable LRO if RXCSUM is off */
1802        if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1803            (lio->dev_capability & NETIF_F_LRO))
1804                request &= ~NETIF_F_LRO;
1805
1806        return request;
1807}
1808
1809/** \brief Net device set features
1810 * @param netdev  pointer to network device
1811 * @param features features to enable/disable
1812 */
1813static int liquidio_set_features(struct net_device *netdev,
1814                                 netdev_features_t features)
1815{
1816        struct lio *lio = netdev_priv(netdev);
1817
1818        if (!((netdev->features ^ features) & NETIF_F_LRO))
1819                return 0;
1820
1821        if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1822                liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1823                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1824        else if (!(features & NETIF_F_LRO) &&
1825                 (lio->dev_capability & NETIF_F_LRO))
1826                liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1827                                     OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1828        if (!(netdev->features & NETIF_F_RXCSUM) &&
1829            (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1830            (features & NETIF_F_RXCSUM))
1831                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1832                                            OCTNET_CMD_RXCSUM_ENABLE);
1833        else if ((netdev->features & NETIF_F_RXCSUM) &&
1834                 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1835                 !(features & NETIF_F_RXCSUM))
1836                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1837                                            OCTNET_CMD_RXCSUM_DISABLE);
1838
1839        return 0;
1840}
1841
1842static void liquidio_add_vxlan_port(struct net_device *netdev,
1843                                    struct udp_tunnel_info *ti)
1844{
1845        if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
1846                return;
1847
1848        liquidio_vxlan_port_command(netdev,
1849                                    OCTNET_CMD_VXLAN_PORT_CONFIG,
1850                                    htons(ti->port),
1851                                    OCTNET_CMD_VXLAN_PORT_ADD);
1852}
1853
1854static void liquidio_del_vxlan_port(struct net_device *netdev,
1855                                    struct udp_tunnel_info *ti)
1856{
1857        if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
1858                return;
1859
1860        liquidio_vxlan_port_command(netdev,
1861                                    OCTNET_CMD_VXLAN_PORT_CONFIG,
1862                                    htons(ti->port),
1863                                    OCTNET_CMD_VXLAN_PORT_DEL);
1864}
1865
1866static const struct net_device_ops lionetdevops = {
1867        .ndo_open               = liquidio_open,
1868        .ndo_stop               = liquidio_stop,
1869        .ndo_start_xmit         = liquidio_xmit,
1870        .ndo_get_stats64        = liquidio_get_stats64,
1871        .ndo_set_mac_address    = liquidio_set_mac,
1872        .ndo_set_rx_mode        = liquidio_set_mcast_list,
1873        .ndo_tx_timeout         = liquidio_tx_timeout,
1874        .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
1875        .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
1876        .ndo_change_mtu         = liquidio_change_mtu,
1877        .ndo_do_ioctl           = liquidio_ioctl,
1878        .ndo_fix_features       = liquidio_fix_features,
1879        .ndo_set_features       = liquidio_set_features,
1880        .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
1881        .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
1882};
1883
1884static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1885{
1886        struct octeon_device *oct = (struct octeon_device *)buf;
1887        struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1888        union oct_link_status *ls;
1889        int gmxport = 0;
1890        int i;
1891
1892        if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1893                dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1894                        recv_pkt->buffer_size[0],
1895                        recv_pkt->rh.r_nic_info.gmxport);
1896                goto nic_info_err;
1897        }
1898
1899        gmxport = recv_pkt->rh.r_nic_info.gmxport;
1900        ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1901                OCT_DROQ_INFO_SIZE);
1902
1903        octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1904
1905        for (i = 0; i < oct->ifcount; i++) {
1906                if (oct->props[i].gmxport == gmxport) {
1907                        update_link_status(oct->props[i].netdev, ls);
1908                        break;
1909                }
1910        }
1911
1912nic_info_err:
1913        for (i = 0; i < recv_pkt->buffer_count; i++)
1914                recv_buffer_free(recv_pkt->buffer_ptr[i]);
1915        octeon_free_recv_info(recv_info);
1916        return 0;
1917}
1918
1919/**
1920 * \brief Setup network interfaces
1921 * @param octeon_dev  octeon device
1922 *
1923 * Called during init time for each device. It assumes the NIC
1924 * is already up and running.  The link information for each
1925 * interface is passed in link_info.
1926 */
1927static int setup_nic_devices(struct octeon_device *octeon_dev)
1928{
1929        int retval, num_iqueues, num_oqueues;
1930        struct liquidio_if_cfg_context *ctx;
1931        u32 resp_size, ctx_size, data_size;
1932        struct liquidio_if_cfg_resp *resp;
1933        struct octeon_soft_command *sc;
1934        union oct_nic_if_cfg if_cfg;
1935        struct octdev_props *props;
1936        struct net_device *netdev;
1937        struct lio_version *vdata;
1938        struct lio *lio = NULL;
1939        u8 mac[ETH_ALEN], i, j;
1940        u32 ifidx_or_pfnum;
1941
1942        ifidx_or_pfnum = octeon_dev->pf_num;
1943
1944        /* This is to handle link status changes */
1945        octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1946                                    lio_nic_info, octeon_dev);
1947
1948        /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
1949         * They are handled directly.
1950         */
1951        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1952                                        free_netbuf);
1953
1954        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1955                                        free_netsgbuf);
1956
1957        octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1958                                        free_netsgbuf_with_resp);
1959
1960        for (i = 0; i < octeon_dev->ifcount; i++) {
1961                resp_size = sizeof(struct liquidio_if_cfg_resp);
1962                ctx_size = sizeof(struct liquidio_if_cfg_context);
1963                data_size = sizeof(struct lio_version);
1964                sc = (struct octeon_soft_command *)
1965                        octeon_alloc_soft_command(octeon_dev, data_size,
1966                                                  resp_size, ctx_size);
1967                resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1968                ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
1969                vdata = (struct lio_version *)sc->virtdptr;
1970
1971                *((u64 *)vdata) = 0;
1972                vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1973                vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1974                vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1975
1976                WRITE_ONCE(ctx->cond, 0);
1977                ctx->octeon_id = lio_get_device_id(octeon_dev);
1978                init_waitqueue_head(&ctx->wc);
1979
1980                if_cfg.u64 = 0;
1981
1982                if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1983                if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1984                if_cfg.s.base_queue = 0;
1985
1986                sc->iq_no = 0;
1987
1988                octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1989                                            OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1990                                            0);
1991
1992                sc->callback = lio_if_cfg_callback;
1993                sc->callback_arg = sc;
1994                sc->wait_time = 5000;
1995
1996                retval = octeon_send_soft_command(octeon_dev, sc);
1997                if (retval == IQ_SEND_FAILED) {
1998                        dev_err(&octeon_dev->pci_dev->dev,
1999                                "iq/oq config failed status: %x\n", retval);
2000                        /* Soft instr is freed by driver in case of failure. */
2001                        goto setup_nic_dev_fail;
2002                }
2003
2004                /* Sleep on a wait queue till the cond flag indicates that the
2005                 * response arrived or timed-out.
2006                 */
2007                if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
2008                        dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
2009                        goto setup_nic_wait_intr;
2010                }
2011
2012                retval = resp->status;
2013                if (retval) {
2014                        dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
2015                        goto setup_nic_dev_fail;
2016                }
2017
2018                octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2019                                    (sizeof(struct liquidio_if_cfg_info)) >> 3);
2020
2021                num_iqueues = hweight64(resp->cfg_info.iqmask);
2022                num_oqueues = hweight64(resp->cfg_info.oqmask);
2023
2024                if (!(num_iqueues) || !(num_oqueues)) {
2025                        dev_err(&octeon_dev->pci_dev->dev,
2026                                "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2027                                resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2028                        goto setup_nic_dev_fail;
2029                }
2030                dev_dbg(&octeon_dev->pci_dev->dev,
2031                        "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2032                        i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2033                        num_iqueues, num_oqueues);
2034
2035                netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2036
2037                if (!netdev) {
2038                        dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2039                        goto setup_nic_dev_fail;
2040                }
2041
2042                SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2043
2044                /* Associate the routines that will handle different
2045                 * netdev tasks.
2046                 */
2047                netdev->netdev_ops = &lionetdevops;
2048
2049                lio = GET_LIO(netdev);
2050
2051                memset(lio, 0, sizeof(struct lio));
2052
2053                lio->ifidx = ifidx_or_pfnum;
2054
2055                props = &octeon_dev->props[i];
2056                props->gmxport = resp->cfg_info.linfo.gmxport;
2057                props->netdev = netdev;
2058
2059                lio->linfo.num_rxpciq = num_oqueues;
2060                lio->linfo.num_txpciq = num_iqueues;
2061
2062                for (j = 0; j < num_oqueues; j++) {
2063                        lio->linfo.rxpciq[j].u64 =
2064                            resp->cfg_info.linfo.rxpciq[j].u64;
2065                }
2066                for (j = 0; j < num_iqueues; j++) {
2067                        lio->linfo.txpciq[j].u64 =
2068                            resp->cfg_info.linfo.txpciq[j].u64;
2069                }
2070
2071                lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2072                lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2073                lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2074                lio->linfo.macaddr_is_admin_asgnd =
2075                        resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2076
2077                lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2078
2079                lio->dev_capability = NETIF_F_HIGHDMA
2080                                      | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2081                                      | NETIF_F_SG | NETIF_F_RXCSUM
2082                                      | NETIF_F_TSO | NETIF_F_TSO6
2083                                      | NETIF_F_GRO
2084                                      | NETIF_F_LRO;
2085                netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2086
2087                /* Copy of transmit encapsulation capabilities:
2088                 * TSO, TSO6, Checksums for this device
2089                 */
2090                lio->enc_dev_capability = NETIF_F_IP_CSUM
2091                                          | NETIF_F_IPV6_CSUM
2092                                          | NETIF_F_GSO_UDP_TUNNEL
2093                                          | NETIF_F_HW_CSUM | NETIF_F_SG
2094                                          | NETIF_F_RXCSUM
2095                                          | NETIF_F_TSO | NETIF_F_TSO6
2096                                          | NETIF_F_LRO;
2097
2098                netdev->hw_enc_features =
2099                    (lio->enc_dev_capability & ~NETIF_F_LRO);
2100                netdev->vlan_features = lio->dev_capability;
2101                /* Add any unchangeable hw features */
2102                lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2103                                       NETIF_F_HW_VLAN_CTAG_RX |
2104                                       NETIF_F_HW_VLAN_CTAG_TX;
2105
2106                netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2107
2108                netdev->hw_features = lio->dev_capability;
2109                netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2110
2111                /* MTU range: 68 - 16000 */
2112                netdev->min_mtu = LIO_MIN_MTU_SIZE;
2113                netdev->max_mtu = LIO_MAX_MTU_SIZE;
2114
2115                /* Point to the  properties for octeon device to which this
2116                 * interface belongs.
2117                 */
2118                lio->oct_dev = octeon_dev;
2119                lio->octprops = props;
2120                lio->netdev = netdev;
2121
2122                dev_dbg(&octeon_dev->pci_dev->dev,
2123                        "if%d gmx: %d hw_addr: 0x%llx\n", i,
2124                        lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2125
2126                /* 64-bit swap required on LE machines */
2127                octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2128                for (j = 0; j < ETH_ALEN; j++)
2129                        mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2130
2131                /* Copy MAC Address to OS network device structure */
2132                ether_addr_copy(netdev->dev_addr, mac);
2133
2134                if (liquidio_setup_io_queues(octeon_dev, i,
2135                                             lio->linfo.num_txpciq,
2136                                             lio->linfo.num_rxpciq)) {
2137                        dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2138                        goto setup_nic_dev_fail;
2139                }
2140
2141                ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2142
2143                /* For VFs, enable Octeon device interrupts here,
2144                 * as this is contingent upon IO queue setup
2145                 */
2146                octeon_dev->fn_list.enable_interrupt(octeon_dev,
2147                                                     OCTEON_ALL_INTR);
2148
2149                /* By default all interfaces on a single Octeon uses the same
2150                 * tx and rx queues
2151                 */
2152                lio->txq = lio->linfo.txpciq[0].s.q_no;
2153                lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2154
2155                lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2156                lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2157
2158                if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2159                        dev_err(&octeon_dev->pci_dev->dev,
2160                                "Gather list allocation failed\n");
2161                        goto setup_nic_dev_fail;
2162                }
2163
2164                /* Register ethtool support */
2165                liquidio_set_ethtool_ops(netdev);
2166                if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2167                        octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2168                else
2169                        octeon_dev->priv_flags = 0x0;
2170
2171                if (netdev->features & NETIF_F_LRO)
2172                        liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2173                                             OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2174
2175                if (setup_link_status_change_wq(netdev))
2176                        goto setup_nic_dev_fail;
2177
2178                if (setup_rx_oom_poll_fn(netdev))
2179                        goto setup_nic_dev_fail;
2180
2181                /* Register the network device with the OS */
2182                if (register_netdev(netdev)) {
2183                        dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2184                        goto setup_nic_dev_fail;
2185                }
2186
2187                dev_dbg(&octeon_dev->pci_dev->dev,
2188                        "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2189                        i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2190                netif_carrier_off(netdev);
2191                lio->link_changes++;
2192
2193                ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2194
2195                /* Sending command to firmware to enable Rx checksum offload
2196                 * by default at the time of setup of Liquidio driver for
2197                 * this device
2198                 */
2199                liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2200                                            OCTNET_CMD_RXCSUM_ENABLE);
2201                liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2202                                     OCTNET_CMD_TXCSUM_ENABLE);
2203
2204                dev_dbg(&octeon_dev->pci_dev->dev,
2205                        "NIC ifidx:%d Setup successful\n", i);
2206
2207                octeon_free_soft_command(octeon_dev, sc);
2208
2209                octeon_dev->no_speed_setting = 1;
2210        }
2211
2212        return 0;
2213
2214setup_nic_dev_fail:
2215
2216        octeon_free_soft_command(octeon_dev, sc);
2217
2218setup_nic_wait_intr:
2219
2220        while (i--) {
2221                dev_err(&octeon_dev->pci_dev->dev,
2222                        "NIC ifidx:%d Setup failed\n", i);
2223                liquidio_destroy_nic_device(octeon_dev, i);
2224        }
2225        return -ENODEV;
2226}
2227
2228/**
2229 * \brief initialize the NIC
2230 * @param oct octeon device
2231 *
2232 * This initialization routine is called once the Octeon device application is
2233 * up and running
2234 */
2235static int liquidio_init_nic_module(struct octeon_device *oct)
2236{
2237        int num_nic_ports = 1;
2238        int i, retval = 0;
2239
2240        dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2241
2242        /* only default iq and oq were initialized
2243         * initialize the rest as well run port_config command for each port
2244         */
2245        oct->ifcount = num_nic_ports;
2246        memset(oct->props, 0,
2247               sizeof(struct octdev_props) * num_nic_ports);
2248
2249        for (i = 0; i < MAX_OCTEON_LINKS; i++)
2250                oct->props[i].gmxport = -1;
2251
2252        retval = setup_nic_devices(oct);
2253        if (retval) {
2254                dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2255                goto octnet_init_failure;
2256        }
2257
2258        dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2259
2260        return retval;
2261
2262octnet_init_failure:
2263
2264        oct->ifcount = 0;
2265
2266        return retval;
2267}
2268
2269/**
2270 * \brief Device initialization for each Octeon device that is probed
2271 * @param octeon_dev  octeon device
2272 */
2273static int octeon_device_init(struct octeon_device *oct)
2274{
2275        u32 rev_id;
2276        int j;
2277
2278        atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2279
2280        /* Enable access to the octeon device and make its DMA capability
2281         * known to the OS.
2282         */
2283        if (octeon_pci_os_setup(oct))
2284                return 1;
2285        atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2286
2287        oct->chip_id = OCTEON_CN23XX_VF_VID;
2288        pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2289        oct->rev_id = rev_id & 0xff;
2290
2291        if (cn23xx_setup_octeon_vf_device(oct))
2292                return 1;
2293
2294        atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2295
2296        oct->app_mode = CVM_DRV_NIC_APP;
2297
2298        /* Initialize the dispatch mechanism used to push packets arriving on
2299         * Octeon Output queues.
2300         */
2301        if (octeon_init_dispatch_list(oct))
2302                return 1;
2303
2304        atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2305
2306        if (octeon_set_io_queues_off(oct)) {
2307                dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2308                return 1;
2309        }
2310
2311        if (oct->fn_list.setup_device_regs(oct)) {
2312                dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2313                return 1;
2314        }
2315
2316        /* Initialize soft command buffer pool */
2317        if (octeon_setup_sc_buffer_pool(oct)) {
2318                dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2319                return 1;
2320        }
2321        atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2322
2323        /* Setup the data structures that manage this Octeon's Input queues. */
2324        if (octeon_setup_instr_queues(oct)) {
2325                dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2326                return 1;
2327        }
2328        atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2329
2330        /* Initialize lists to manage the requests of different types that
2331         * arrive from user & kernel applications for this octeon device.
2332         */
2333        if (octeon_setup_response_list(oct)) {
2334                dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2335                return 1;
2336        }
2337        atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2338
2339        if (octeon_setup_output_queues(oct)) {
2340                dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2341                return 1;
2342        }
2343        atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2344
2345        if (oct->fn_list.setup_mbox(oct)) {
2346                dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2347                return 1;
2348        }
2349        atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2350
2351        if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2352                dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2353                return 1;
2354        }
2355        atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2356
2357        dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
2358                 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
2359
2360        /* Setup the interrupt handler and record the INT SUM register address*/
2361        if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2362                return 1;
2363
2364        atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2365
2366        /* ***************************************************************
2367         * The interrupts need to be enabled for the PF<-->VF handshake.
2368         * They are [re]-enabled after the PF<-->VF handshake so that the
2369         * correct OQ tick value is used (i.e. the value retrieved from
2370         * the PF as part of the handshake).
2371         */
2372
2373        /* Enable Octeon device interrupts */
2374        oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2375
2376        if (cn23xx_octeon_pfvf_handshake(oct))
2377                return 1;
2378
2379        /* Here we [re]-enable the interrupts so that the correct OQ tick value
2380         * is used (i.e. the value that was retrieved during the handshake)
2381         */
2382
2383        /* Enable Octeon device interrupts */
2384        oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2385        /* *************************************************************** */
2386
2387        /* Enable the input and output queues for this Octeon device */
2388        if (oct->fn_list.enable_io_queues(oct)) {
2389                dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2390                return 1;
2391        }
2392
2393        atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2394
2395        atomic_set(&oct->status, OCT_DEV_HOST_OK);
2396
2397        /* Send Credit for Octeon Output queues. Credits are always sent after
2398         * the output queue is enabled.
2399         */
2400        for (j = 0; j < oct->num_oqs; j++)
2401                writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2402
2403        /* Packets can start arriving on the output queues from this point. */
2404
2405        atomic_set(&oct->status, OCT_DEV_CORE_OK);
2406
2407        atomic_set(&oct->status, OCT_DEV_RUNNING);
2408
2409        if (liquidio_init_nic_module(oct))
2410                return 1;
2411
2412        return 0;
2413}
2414
2415static int __init liquidio_vf_init(void)
2416{
2417        octeon_init_device_list(0);
2418        return pci_register_driver(&liquidio_vf_pci_driver);
2419}
2420
2421static void __exit liquidio_vf_exit(void)
2422{
2423        pci_unregister_driver(&liquidio_vf_pci_driver);
2424
2425        pr_info("LiquidIO_VF network module is now unloaded\n");
2426}
2427
2428module_init(liquidio_vf_init);
2429module_exit(liquidio_vf_exit);
2430