linux/drivers/net/ethernet/cavium/liquidio/lio_core.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/pci.h>
  19#include <linux/if_vlan.h>
  20#include "liquidio_common.h"
  21#include "octeon_droq.h"
  22#include "octeon_iq.h"
  23#include "response_manager.h"
  24#include "octeon_device.h"
  25#include "octeon_nic.h"
  26#include "octeon_main.h"
  27#include "octeon_network.h"
  28
  29/* OOM task polling interval */
  30#define LIO_OOM_POLL_INTERVAL_MS 250
  31
  32#define OCTNIC_MAX_SG  MAX_SKB_FRAGS
  33
  34/**
  35 * lio_delete_glists - Delete gather lists
  36 * @lio: per-network private data
  37 */
  38void lio_delete_glists(struct lio *lio)
  39{
  40        struct octnic_gather *g;
  41        int i;
  42
  43        kfree(lio->glist_lock);
  44        lio->glist_lock = NULL;
  45
  46        if (!lio->glist)
  47                return;
  48
  49        for (i = 0; i < lio->oct_dev->num_iqs; i++) {
  50                do {
  51                        g = (struct octnic_gather *)
  52                            lio_list_delete_head(&lio->glist[i]);
  53                        kfree(g);
  54                } while (g);
  55
  56                if (lio->glists_virt_base && lio->glists_virt_base[i] &&
  57                    lio->glists_dma_base && lio->glists_dma_base[i]) {
  58                        lio_dma_free(lio->oct_dev,
  59                                     lio->glist_entry_size * lio->tx_qsize,
  60                                     lio->glists_virt_base[i],
  61                                     lio->glists_dma_base[i]);
  62                }
  63        }
  64
  65        kfree(lio->glists_virt_base);
  66        lio->glists_virt_base = NULL;
  67
  68        kfree(lio->glists_dma_base);
  69        lio->glists_dma_base = NULL;
  70
  71        kfree(lio->glist);
  72        lio->glist = NULL;
  73}
  74
  75/**
  76 * lio_setup_glists - Setup gather lists
  77 * @oct: octeon_device
  78 * @lio: per-network private data
  79 * @num_iqs: count of iqs to allocate
  80 */
  81int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
  82{
  83        struct octnic_gather *g;
  84        int i, j;
  85
  86        lio->glist_lock =
  87            kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
  88        if (!lio->glist_lock)
  89                return -ENOMEM;
  90
  91        lio->glist =
  92            kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
  93        if (!lio->glist) {
  94                kfree(lio->glist_lock);
  95                lio->glist_lock = NULL;
  96                return -ENOMEM;
  97        }
  98
  99        lio->glist_entry_size =
 100                ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
 101
 102        /* allocate memory to store virtual and dma base address of
 103         * per glist consistent memory
 104         */
 105        lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
 106                                        GFP_KERNEL);
 107        lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
 108                                       GFP_KERNEL);
 109
 110        if (!lio->glists_virt_base || !lio->glists_dma_base) {
 111                lio_delete_glists(lio);
 112                return -ENOMEM;
 113        }
 114
 115        for (i = 0; i < num_iqs; i++) {
 116                int numa_node = dev_to_node(&oct->pci_dev->dev);
 117
 118                spin_lock_init(&lio->glist_lock[i]);
 119
 120                INIT_LIST_HEAD(&lio->glist[i]);
 121
 122                lio->glists_virt_base[i] =
 123                        lio_dma_alloc(oct,
 124                                      lio->glist_entry_size * lio->tx_qsize,
 125                                      &lio->glists_dma_base[i]);
 126
 127                if (!lio->glists_virt_base[i]) {
 128                        lio_delete_glists(lio);
 129                        return -ENOMEM;
 130                }
 131
 132                for (j = 0; j < lio->tx_qsize; j++) {
 133                        g = kzalloc_node(sizeof(*g), GFP_KERNEL,
 134                                         numa_node);
 135                        if (!g)
 136                                g = kzalloc(sizeof(*g), GFP_KERNEL);
 137                        if (!g)
 138                                break;
 139
 140                        g->sg = lio->glists_virt_base[i] +
 141                                (j * lio->glist_entry_size);
 142
 143                        g->sg_dma_ptr = lio->glists_dma_base[i] +
 144                                        (j * lio->glist_entry_size);
 145
 146                        list_add_tail(&g->list, &lio->glist[i]);
 147                }
 148
 149                if (j != lio->tx_qsize) {
 150                        lio_delete_glists(lio);
 151                        return -ENOMEM;
 152                }
 153        }
 154
 155        return 0;
 156}
 157
 158int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
 159{
 160        struct lio *lio = GET_LIO(netdev);
 161        struct octeon_device *oct = lio->oct_dev;
 162        struct octnic_ctrl_pkt nctrl;
 163        int ret = 0;
 164
 165        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 166
 167        nctrl.ncmd.u64 = 0;
 168        nctrl.ncmd.s.cmd = cmd;
 169        nctrl.ncmd.s.param1 = param1;
 170        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 171        nctrl.netpndev = (u64)netdev;
 172        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 173
 174        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 175        if (ret) {
 176                dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
 177                        ret);
 178                if (ret > 0)
 179                        ret = -EIO;
 180        }
 181        return ret;
 182}
 183
 184void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
 185                                        unsigned int bytes_compl)
 186{
 187        struct netdev_queue *netdev_queue = txq;
 188
 189        netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
 190}
 191
 192void octeon_update_tx_completion_counters(void *buf, int reqtype,
 193                                          unsigned int *pkts_compl,
 194                                          unsigned int *bytes_compl)
 195{
 196        struct octnet_buf_free_info *finfo;
 197        struct sk_buff *skb = NULL;
 198        struct octeon_soft_command *sc;
 199
 200        switch (reqtype) {
 201        case REQTYPE_NORESP_NET:
 202        case REQTYPE_NORESP_NET_SG:
 203                finfo = buf;
 204                skb = finfo->skb;
 205                break;
 206
 207        case REQTYPE_RESP_NET_SG:
 208        case REQTYPE_RESP_NET:
 209                sc = buf;
 210                skb = sc->callback_arg;
 211                break;
 212
 213        default:
 214                return;
 215        }
 216
 217        (*pkts_compl)++;
 218        *bytes_compl += skb->len;
 219}
 220
 221int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
 222{
 223        struct octnet_buf_free_info *finfo;
 224        struct sk_buff *skb;
 225        struct octeon_soft_command *sc;
 226        struct netdev_queue *txq;
 227
 228        switch (reqtype) {
 229        case REQTYPE_NORESP_NET:
 230        case REQTYPE_NORESP_NET_SG:
 231                finfo = buf;
 232                skb = finfo->skb;
 233                break;
 234
 235        case REQTYPE_RESP_NET_SG:
 236        case REQTYPE_RESP_NET:
 237                sc = buf;
 238                skb = sc->callback_arg;
 239                break;
 240
 241        default:
 242                return 0;
 243        }
 244
 245        txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
 246        netdev_tx_sent_queue(txq, skb->len);
 247
 248        return netif_xmit_stopped(txq);
 249}
 250
 251void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
 252{
 253        struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
 254        struct net_device *netdev = (struct net_device *)nctrl->netpndev;
 255        struct lio *lio = GET_LIO(netdev);
 256        struct octeon_device *oct = lio->oct_dev;
 257        u8 *mac;
 258
 259        if (nctrl->sc_status)
 260                return;
 261
 262        switch (nctrl->ncmd.s.cmd) {
 263        case OCTNET_CMD_CHANGE_DEVFLAGS:
 264        case OCTNET_CMD_SET_MULTI_LIST:
 265        case OCTNET_CMD_SET_UC_LIST:
 266                break;
 267
 268        case OCTNET_CMD_CHANGE_MACADDR:
 269                mac = ((u8 *)&nctrl->udd[0]) + 2;
 270                if (nctrl->ncmd.s.param1) {
 271                        /* vfidx is 0 based, but vf_num (param1) is 1 based */
 272                        int vfidx = nctrl->ncmd.s.param1 - 1;
 273                        bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
 274
 275                        if (mac_is_admin_assigned)
 276                                netif_info(lio, probe, lio->netdev,
 277                                           "MAC Address %pM is configured for VF %d\n",
 278                                           mac, vfidx);
 279                } else {
 280                        netif_info(lio, probe, lio->netdev,
 281                                   " MACAddr changed to %pM\n",
 282                                   mac);
 283                }
 284                break;
 285
 286        case OCTNET_CMD_GPIO_ACCESS:
 287                netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
 288
 289                break;
 290
 291        case OCTNET_CMD_ID_ACTIVE:
 292                netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
 293
 294                break;
 295
 296        case OCTNET_CMD_LRO_ENABLE:
 297                dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
 298                break;
 299
 300        case OCTNET_CMD_LRO_DISABLE:
 301                dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
 302                         netdev->name);
 303                break;
 304
 305        case OCTNET_CMD_VERBOSE_ENABLE:
 306                dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
 307                         netdev->name);
 308                break;
 309
 310        case OCTNET_CMD_VERBOSE_DISABLE:
 311                dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
 312                         netdev->name);
 313                break;
 314
 315        case OCTNET_CMD_VLAN_FILTER_CTL:
 316                if (nctrl->ncmd.s.param1)
 317                        dev_info(&oct->pci_dev->dev,
 318                                 "%s VLAN filter enabled\n", netdev->name);
 319                else
 320                        dev_info(&oct->pci_dev->dev,
 321                                 "%s VLAN filter disabled\n", netdev->name);
 322                break;
 323
 324        case OCTNET_CMD_ADD_VLAN_FILTER:
 325                dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
 326                         netdev->name, nctrl->ncmd.s.param1);
 327                break;
 328
 329        case OCTNET_CMD_DEL_VLAN_FILTER:
 330                dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
 331                         netdev->name, nctrl->ncmd.s.param1);
 332                break;
 333
 334        case OCTNET_CMD_SET_SETTINGS:
 335                dev_info(&oct->pci_dev->dev, "%s settings changed\n",
 336                         netdev->name);
 337
 338                break;
 339
 340        /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
 341         * Command passed by NIC driver
 342         */
 343        case OCTNET_CMD_TNL_RX_CSUM_CTL:
 344                if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
 345                        netif_info(lio, probe, lio->netdev,
 346                                   "RX Checksum Offload Enabled\n");
 347                } else if (nctrl->ncmd.s.param1 ==
 348                           OCTNET_CMD_RXCSUM_DISABLE) {
 349                        netif_info(lio, probe, lio->netdev,
 350                                   "RX Checksum Offload Disabled\n");
 351                }
 352                break;
 353
 354                /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
 355                 * Command passed by NIC driver
 356                 */
 357        case OCTNET_CMD_TNL_TX_CSUM_CTL:
 358                if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
 359                        netif_info(lio, probe, lio->netdev,
 360                                   "TX Checksum Offload Enabled\n");
 361                } else if (nctrl->ncmd.s.param1 ==
 362                           OCTNET_CMD_TXCSUM_DISABLE) {
 363                        netif_info(lio, probe, lio->netdev,
 364                                   "TX Checksum Offload Disabled\n");
 365                }
 366                break;
 367
 368                /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
 369                 * Command passed by NIC driver
 370                 */
 371        case OCTNET_CMD_VXLAN_PORT_CONFIG:
 372                if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
 373                        netif_info(lio, probe, lio->netdev,
 374                                   "VxLAN Destination UDP PORT:%d ADDED\n",
 375                                   nctrl->ncmd.s.param1);
 376                } else if (nctrl->ncmd.s.more ==
 377                           OCTNET_CMD_VXLAN_PORT_DEL) {
 378                        netif_info(lio, probe, lio->netdev,
 379                                   "VxLAN Destination UDP PORT:%d DELETED\n",
 380                                   nctrl->ncmd.s.param1);
 381                }
 382                break;
 383
 384        case OCTNET_CMD_SET_FLOW_CTL:
 385                netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
 386                break;
 387
 388        case OCTNET_CMD_QUEUE_COUNT_CTL:
 389                netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
 390                           nctrl->ncmd.s.param1);
 391                break;
 392
 393        default:
 394                dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
 395                        nctrl->ncmd.s.cmd);
 396        }
 397}
 398
 399void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
 400{
 401        bool macaddr_changed = false;
 402        struct net_device *netdev;
 403        struct lio *lio;
 404
 405        rtnl_lock();
 406
 407        netdev = oct->props[0].netdev;
 408        lio = GET_LIO(netdev);
 409
 410        lio->linfo.macaddr_is_admin_asgnd = true;
 411
 412        if (!ether_addr_equal(netdev->dev_addr, mac)) {
 413                macaddr_changed = true;
 414                ether_addr_copy(netdev->dev_addr, mac);
 415                ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
 416                call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
 417        }
 418
 419        rtnl_unlock();
 420
 421        if (macaddr_changed)
 422                dev_info(&oct->pci_dev->dev,
 423                         "PF changed VF's MAC address to %pM\n", mac);
 424
 425        /* no need to notify the firmware of the macaddr change because
 426         * the PF did that already
 427         */
 428}
 429
 430void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
 431                                  struct octeon_droq *droq)
 432{
 433        struct net_device *netdev = oct->props[0].netdev;
 434        struct lio *lio = GET_LIO(netdev);
 435        struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
 436
 437        queue_delayed_work(wq->wq, &wq->wk.work,
 438                           msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
 439}
 440
 441static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
 442{
 443        struct cavium_wk *wk = (struct cavium_wk *)work;
 444        struct lio *lio = (struct lio *)wk->ctxptr;
 445        struct octeon_device *oct = lio->oct_dev;
 446        int q_no = wk->ctxul;
 447        struct octeon_droq *droq = oct->droq[q_no];
 448
 449        if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
 450                return;
 451
 452        if (octeon_retry_droq_refill(droq))
 453                octeon_schedule_rxq_oom_work(oct, droq);
 454}
 455
 456int setup_rx_oom_poll_fn(struct net_device *netdev)
 457{
 458        struct lio *lio = GET_LIO(netdev);
 459        struct octeon_device *oct = lio->oct_dev;
 460        struct cavium_wq *wq;
 461        int q, q_no;
 462
 463        for (q = 0; q < oct->num_oqs; q++) {
 464                q_no = lio->linfo.rxpciq[q].s.q_no;
 465                wq = &lio->rxq_status_wq[q_no];
 466                wq->wq = alloc_workqueue("rxq-oom-status",
 467                                         WQ_MEM_RECLAIM, 0);
 468                if (!wq->wq) {
 469                        dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
 470                        return -ENOMEM;
 471                }
 472
 473                INIT_DELAYED_WORK(&wq->wk.work,
 474                                  octnet_poll_check_rxq_oom_status);
 475                wq->wk.ctxptr = lio;
 476                wq->wk.ctxul = q_no;
 477        }
 478
 479        return 0;
 480}
 481
 482void cleanup_rx_oom_poll_fn(struct net_device *netdev)
 483{
 484        struct lio *lio = GET_LIO(netdev);
 485        struct octeon_device *oct = lio->oct_dev;
 486        struct cavium_wq *wq;
 487        int q_no;
 488
 489        for (q_no = 0; q_no < oct->num_oqs; q_no++) {
 490                wq = &lio->rxq_status_wq[q_no];
 491                if (wq->wq) {
 492                        cancel_delayed_work_sync(&wq->wk.work);
 493                        flush_workqueue(wq->wq);
 494                        destroy_workqueue(wq->wq);
 495                        wq->wq = NULL;
 496                }
 497        }
 498}
 499
 500/* Runs in interrupt context. */
 501static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
 502{
 503        struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
 504        struct net_device *netdev;
 505        struct lio *lio;
 506
 507        netdev = oct->props[iq->ifidx].netdev;
 508
 509        /* This is needed because the first IQ does not have
 510         * a netdev associated with it.
 511         */
 512        if (!netdev)
 513                return;
 514
 515        lio = GET_LIO(netdev);
 516        if (__netif_subqueue_stopped(netdev, iq->q_index) &&
 517            lio->linfo.link.s.link_up &&
 518            (!octnet_iq_is_full(oct, iq_num))) {
 519                netif_wake_subqueue(netdev, iq->q_index);
 520                INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
 521                                          tx_restart, 1);
 522        }
 523}
 524
 525/**
 526 * octeon_setup_droq - Setup output queue
 527 * @oct: octeon device
 528 * @q_no: which queue
 529 * @num_descs: how many descriptors
 530 * @desc_size: size of each descriptor
 531 * @app_ctx: application context
 532 */
 533static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
 534                             int desc_size, void *app_ctx)
 535{
 536        int ret_val;
 537
 538        dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
 539        /* droq creation and local register settings. */
 540        ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
 541        if (ret_val < 0)
 542                return ret_val;
 543
 544        if (ret_val == 1) {
 545                dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
 546                return 0;
 547        }
 548
 549        /* Enable the droq queues */
 550        octeon_set_droq_pkt_op(oct, q_no, 1);
 551
 552        /* Send Credit for Octeon Output queues. Credits are always
 553         * sent after the output queue is enabled.
 554         */
 555        writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
 556
 557        return ret_val;
 558}
 559
 560/**
 561 * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
 562 * @octeon_id:octeon device id.
 563 * @skbuff:   skbuff struct to be passed to network layer.
 564 * @len:      size of total data received.
 565 * @rh:       Control header associated with the packet
 566 * @param:    additional control data with the packet
 567 * @arg:      farg registered in droq_ops
 568 */
 569static void
 570liquidio_push_packet(u32 __maybe_unused octeon_id,
 571                     void *skbuff,
 572                     u32 len,
 573                     union octeon_rh *rh,
 574                     void *param,
 575                     void *arg)
 576{
 577        struct net_device *netdev = (struct net_device *)arg;
 578        struct octeon_droq *droq =
 579            container_of(param, struct octeon_droq, napi);
 580        struct sk_buff *skb = (struct sk_buff *)skbuff;
 581        struct skb_shared_hwtstamps *shhwtstamps;
 582        struct napi_struct *napi = param;
 583        u16 vtag = 0;
 584        u32 r_dh_off;
 585        u64 ns;
 586
 587        if (netdev) {
 588                struct lio *lio = GET_LIO(netdev);
 589                struct octeon_device *oct = lio->oct_dev;
 590
 591                /* Do not proceed if the interface is not in RUNNING state. */
 592                if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
 593                        recv_buffer_free(skb);
 594                        droq->stats.rx_dropped++;
 595                        return;
 596                }
 597
 598                skb->dev = netdev;
 599
 600                skb_record_rx_queue(skb, droq->q_no);
 601                if (likely(len > MIN_SKB_SIZE)) {
 602                        struct octeon_skb_page_info *pg_info;
 603                        unsigned char *va;
 604
 605                        pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 606                        if (pg_info->page) {
 607                                /* For Paged allocation use the frags */
 608                                va = page_address(pg_info->page) +
 609                                        pg_info->page_offset;
 610                                memcpy(skb->data, va, MIN_SKB_SIZE);
 611                                skb_put(skb, MIN_SKB_SIZE);
 612                                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 613                                                pg_info->page,
 614                                                pg_info->page_offset +
 615                                                MIN_SKB_SIZE,
 616                                                len - MIN_SKB_SIZE,
 617                                                LIO_RXBUFFER_SZ);
 618                        }
 619                } else {
 620                        struct octeon_skb_page_info *pg_info =
 621                                ((struct octeon_skb_page_info *)(skb->cb));
 622                        skb_copy_to_linear_data(skb, page_address(pg_info->page)
 623                                                + pg_info->page_offset, len);
 624                        skb_put(skb, len);
 625                        put_page(pg_info->page);
 626                }
 627
 628                r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
 629
 630                if (oct->ptp_enable) {
 631                        if (rh->r_dh.has_hwtstamp) {
 632                                /* timestamp is included from the hardware at
 633                                 * the beginning of the packet.
 634                                 */
 635                                if (ifstate_check
 636                                        (lio,
 637                                         LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
 638                                        /* Nanoseconds are in the first 64-bits
 639                                         * of the packet.
 640                                         */
 641                                        memcpy(&ns, (skb->data + r_dh_off),
 642                                               sizeof(ns));
 643                                        r_dh_off -= BYTES_PER_DHLEN_UNIT;
 644                                        shhwtstamps = skb_hwtstamps(skb);
 645                                        shhwtstamps->hwtstamp =
 646                                                ns_to_ktime(ns +
 647                                                            lio->ptp_adjust);
 648                                }
 649                        }
 650                }
 651
 652                if (rh->r_dh.has_hash) {
 653                        __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
 654                        u32 hash = be32_to_cpu(*hash_be);
 655
 656                        skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
 657                        r_dh_off -= BYTES_PER_DHLEN_UNIT;
 658                }
 659
 660                skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
 661                skb->protocol = eth_type_trans(skb, skb->dev);
 662
 663                if ((netdev->features & NETIF_F_RXCSUM) &&
 664                    (((rh->r_dh.encap_on) &&
 665                      (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
 666                     (!(rh->r_dh.encap_on) &&
 667                      ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
 668                        CNNIC_CSUM_VERIFIED))))
 669                        /* checksum has already been verified */
 670                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 671                else
 672                        skb->ip_summed = CHECKSUM_NONE;
 673
 674                /* Setting Encapsulation field on basis of status received
 675                 * from the firmware
 676                 */
 677                if (rh->r_dh.encap_on) {
 678                        skb->encapsulation = 1;
 679                        skb->csum_level = 1;
 680                        droq->stats.rx_vxlan++;
 681                }
 682
 683                /* inbound VLAN tag */
 684                if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 685                    rh->r_dh.vlan) {
 686                        u16 priority = rh->r_dh.priority;
 687                        u16 vid = rh->r_dh.vlan;
 688
 689                        vtag = (priority << VLAN_PRIO_SHIFT) | vid;
 690                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
 691                }
 692
 693                napi_gro_receive(napi, skb);
 694
 695                droq->stats.rx_bytes_received += len -
 696                        rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
 697                droq->stats.rx_pkts_received++;
 698        } else {
 699                recv_buffer_free(skb);
 700        }
 701}
 702
 703/**
 704 * napi_schedule_wrapper - wrapper for calling napi_schedule
 705 * @param: parameters to pass to napi_schedule
 706 *
 707 * Used when scheduling on different CPUs
 708 */
 709static void napi_schedule_wrapper(void *param)
 710{
 711        struct napi_struct *napi = param;
 712
 713        napi_schedule(napi);
 714}
 715
 716/**
 717 * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
 718 * @arg: pointer to octeon output queue
 719 */
 720static void liquidio_napi_drv_callback(void *arg)
 721{
 722        struct octeon_device *oct;
 723        struct octeon_droq *droq = arg;
 724        int this_cpu = smp_processor_id();
 725
 726        oct = droq->oct_dev;
 727
 728        if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
 729            droq->cpu_id == this_cpu) {
 730                napi_schedule_irqoff(&droq->napi);
 731        } else {
 732                call_single_data_t *csd = &droq->csd;
 733
 734                csd->func = napi_schedule_wrapper;
 735                csd->info = &droq->napi;
 736                csd->flags = 0;
 737
 738                smp_call_function_single_async(droq->cpu_id, csd);
 739        }
 740}
 741
 742/**
 743 * liquidio_napi_poll - Entry point for NAPI polling
 744 * @napi: NAPI structure
 745 * @budget: maximum number of items to process
 746 */
 747static int liquidio_napi_poll(struct napi_struct *napi, int budget)
 748{
 749        struct octeon_instr_queue *iq;
 750        struct octeon_device *oct;
 751        struct octeon_droq *droq;
 752        int tx_done = 0, iq_no;
 753        int work_done;
 754
 755        droq = container_of(napi, struct octeon_droq, napi);
 756        oct = droq->oct_dev;
 757        iq_no = droq->q_no;
 758
 759        /* Handle Droq descriptors */
 760        work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
 761
 762        /* Flush the instruction queue */
 763        iq = oct->instr_queue[iq_no];
 764        if (iq) {
 765                /* TODO: move this check to inside octeon_flush_iq,
 766                 * once check_db_timeout is removed
 767                 */
 768                if (atomic_read(&iq->instr_pending))
 769                        /* Process iq buffers with in the budget limits */
 770                        tx_done = octeon_flush_iq(oct, iq, budget);
 771                else
 772                        tx_done = 1;
 773                /* Update iq read-index rather than waiting for next interrupt.
 774                 * Return back if tx_done is false.
 775                 */
 776                /* sub-queue status update */
 777                lio_update_txq_status(oct, iq_no);
 778        } else {
 779                dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
 780                        __func__, iq_no);
 781        }
 782
 783#define MAX_REG_CNT  2000000U
 784        /* force enable interrupt if reg cnts are high to avoid wraparound */
 785        if ((work_done < budget && tx_done) ||
 786            (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
 787            (droq->pkt_count >= MAX_REG_CNT)) {
 788                napi_complete_done(napi, work_done);
 789
 790                octeon_enable_irq(droq->oct_dev, droq->q_no);
 791                return 0;
 792        }
 793
 794        return (!tx_done) ? (budget) : (work_done);
 795}
 796
 797/**
 798 * liquidio_setup_io_queues - Setup input and output queues
 799 * @octeon_dev: octeon device
 800 * @ifidx: Interface index
 801 * @num_iqs: input io queue count
 802 * @num_oqs: output io queue count
 803 *
 804 * Note: Queues are with respect to the octeon device. Thus
 805 * an input queue is for egress packets, and output queues
 806 * are for ingress packets.
 807 */
 808int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
 809                             u32 num_iqs, u32 num_oqs)
 810{
 811        struct octeon_droq_ops droq_ops;
 812        struct net_device *netdev;
 813        struct octeon_droq *droq;
 814        struct napi_struct *napi;
 815        int cpu_id_modulus;
 816        int num_tx_descs;
 817        struct lio *lio;
 818        int retval = 0;
 819        int q, q_no;
 820        int cpu_id;
 821
 822        netdev = octeon_dev->props[ifidx].netdev;
 823
 824        lio = GET_LIO(netdev);
 825
 826        memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
 827
 828        droq_ops.fptr = liquidio_push_packet;
 829        droq_ops.farg = netdev;
 830
 831        droq_ops.poll_mode = 1;
 832        droq_ops.napi_fn = liquidio_napi_drv_callback;
 833        cpu_id = 0;
 834        cpu_id_modulus = num_present_cpus();
 835
 836        /* set up DROQs. */
 837        for (q = 0; q < num_oqs; q++) {
 838                q_no = lio->linfo.rxpciq[q].s.q_no;
 839                dev_dbg(&octeon_dev->pci_dev->dev,
 840                        "%s index:%d linfo.rxpciq.s.q_no:%d\n",
 841                        __func__, q, q_no);
 842                retval = octeon_setup_droq(
 843                    octeon_dev, q_no,
 844                    CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
 845                                                lio->ifidx),
 846                    CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
 847                                                   lio->ifidx),
 848                    NULL);
 849                if (retval) {
 850                        dev_err(&octeon_dev->pci_dev->dev,
 851                                "%s : Runtime DROQ(RxQ) creation failed.\n",
 852                                __func__);
 853                        return 1;
 854                }
 855
 856                droq = octeon_dev->droq[q_no];
 857                napi = &droq->napi;
 858                dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
 859                        (u64)netdev, (u64)octeon_dev);
 860                netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
 861
 862                /* designate a CPU for this droq */
 863                droq->cpu_id = cpu_id;
 864                cpu_id++;
 865                if (cpu_id >= cpu_id_modulus)
 866                        cpu_id = 0;
 867
 868                octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
 869        }
 870
 871        if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
 872                /* 23XX PF/VF can send/recv control messages (via the first
 873                 * PF/VF-owned droq) from the firmware even if the ethX
 874                 * interface is down, so that's why poll_mode must be off
 875                 * for the first droq.
 876                 */
 877                octeon_dev->droq[0]->ops.poll_mode = 0;
 878        }
 879
 880        /* set up IQs. */
 881        for (q = 0; q < num_iqs; q++) {
 882                num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
 883                    octeon_get_conf(octeon_dev), lio->ifidx);
 884                retval = octeon_setup_iq(octeon_dev, ifidx, q,
 885                                         lio->linfo.txpciq[q], num_tx_descs,
 886                                         netdev_get_tx_queue(netdev, q));
 887                if (retval) {
 888                        dev_err(&octeon_dev->pci_dev->dev,
 889                                " %s : Runtime IQ(TxQ) creation failed.\n",
 890                                __func__);
 891                        return 1;
 892                }
 893
 894                /* XPS */
 895                if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
 896                    octeon_dev->ioq_vector) {
 897                        struct octeon_ioq_vector    *ioq_vector;
 898
 899                        ioq_vector = &octeon_dev->ioq_vector[q];
 900                        netif_set_xps_queue(netdev,
 901                                            &ioq_vector->affinity_mask,
 902                                            ioq_vector->iq_index);
 903                }
 904        }
 905
 906        return 0;
 907}
 908
 909static
 910int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
 911{
 912        struct octeon_device *oct = droq->oct_dev;
 913        struct octeon_device_priv *oct_priv =
 914            (struct octeon_device_priv *)oct->priv;
 915
 916        if (droq->ops.poll_mode) {
 917                droq->ops.napi_fn(droq);
 918        } else {
 919                if (ret & MSIX_PO_INT) {
 920                        if (OCTEON_CN23XX_VF(oct))
 921                                dev_err(&oct->pci_dev->dev,
 922                                        "should not come here should not get rx when poll mode = 0 for vf\n");
 923                        tasklet_schedule(&oct_priv->droq_tasklet);
 924                        return 1;
 925                }
 926                /* this will be flushed periodically by check iq db */
 927                if (ret & MSIX_PI_INT)
 928                        return 0;
 929        }
 930
 931        return 0;
 932}
 933
 934irqreturn_t
 935liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
 936{
 937        struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
 938        struct octeon_device *oct = ioq_vector->oct_dev;
 939        struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
 940        u64 ret;
 941
 942        ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
 943
 944        if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
 945                liquidio_schedule_msix_droq_pkt_handler(droq, ret);
 946
 947        return IRQ_HANDLED;
 948}
 949
 950/**
 951 * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
 952 * @oct: octeon device
 953 */
 954static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
 955{
 956        struct octeon_device_priv *oct_priv =
 957                (struct octeon_device_priv *)oct->priv;
 958        struct octeon_droq *droq;
 959        u64 oq_no;
 960
 961        if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
 962                for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
 963                     oq_no++) {
 964                        if (!(oct->droq_intr & BIT_ULL(oq_no)))
 965                                continue;
 966
 967                        droq = oct->droq[oq_no];
 968
 969                        if (droq->ops.poll_mode) {
 970                                droq->ops.napi_fn(droq);
 971                                oct_priv->napi_mask |= BIT_ULL(oq_no);
 972                        } else {
 973                                tasklet_schedule(&oct_priv->droq_tasklet);
 974                        }
 975                }
 976        }
 977}
 978
 979/**
 980 * liquidio_legacy_intr_handler - Interrupt handler for octeon
 981 * @irq: unused
 982 * @dev: octeon device
 983 */
 984static
 985irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
 986{
 987        struct octeon_device *oct = (struct octeon_device *)dev;
 988        irqreturn_t ret;
 989
 990        /* Disable our interrupts for the duration of ISR */
 991        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 992
 993        ret = oct->fn_list.process_interrupt_regs(oct);
 994
 995        if (ret == IRQ_HANDLED)
 996                liquidio_schedule_droq_pkt_handlers(oct);
 997
 998        /* Re-enable our interrupts  */
 999        if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1000                oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1001
1002        return ret;
1003}
1004
1005/**
1006 * octeon_setup_interrupt - Setup interrupt for octeon device
1007 * @oct: octeon device
1008 * @num_ioqs: number of queues
1009 *
1010 *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
1011 */
1012int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1013{
1014        struct msix_entry *msix_entries;
1015        char *queue_irq_names = NULL;
1016        int i, num_interrupts = 0;
1017        int num_alloc_ioq_vectors;
1018        char *aux_irq_name = NULL;
1019        int num_ioq_vectors;
1020        int irqret, err;
1021
1022        if (oct->msix_on) {
1023                oct->num_msix_irqs = num_ioqs;
1024                if (OCTEON_CN23XX_PF(oct)) {
1025                        num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1026
1027                        /* one non ioq interrupt for handling
1028                         * sli_mac_pf_int_sum
1029                         */
1030                        oct->num_msix_irqs += 1;
1031                } else if (OCTEON_CN23XX_VF(oct)) {
1032                        num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1033                }
1034
1035                /* allocate storage for the names assigned to each irq */
1036                oct->irq_name_storage =
1037                        kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1038                if (!oct->irq_name_storage) {
1039                        dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1040                        return -ENOMEM;
1041                }
1042
1043                queue_irq_names = oct->irq_name_storage;
1044
1045                if (OCTEON_CN23XX_PF(oct))
1046                        aux_irq_name = &queue_irq_names
1047                                [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1048
1049                oct->msix_entries = kcalloc(oct->num_msix_irqs,
1050                                            sizeof(struct msix_entry),
1051                                            GFP_KERNEL);
1052                if (!oct->msix_entries) {
1053                        dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1054                        kfree(oct->irq_name_storage);
1055                        oct->irq_name_storage = NULL;
1056                        return -ENOMEM;
1057                }
1058
1059                msix_entries = (struct msix_entry *)oct->msix_entries;
1060
1061                /*Assumption is that pf msix vectors start from pf srn to pf to
1062                 * trs and not from 0. if not change this code
1063                 */
1064                if (OCTEON_CN23XX_PF(oct)) {
1065                        for (i = 0; i < oct->num_msix_irqs - 1; i++)
1066                                msix_entries[i].entry =
1067                                        oct->sriov_info.pf_srn + i;
1068
1069                        msix_entries[oct->num_msix_irqs - 1].entry =
1070                                oct->sriov_info.trs;
1071                } else if (OCTEON_CN23XX_VF(oct)) {
1072                        for (i = 0; i < oct->num_msix_irqs; i++)
1073                                msix_entries[i].entry = i;
1074                }
1075                num_alloc_ioq_vectors = pci_enable_msix_range(
1076                                                oct->pci_dev, msix_entries,
1077                                                oct->num_msix_irqs,
1078                                                oct->num_msix_irqs);
1079                if (num_alloc_ioq_vectors < 0) {
1080                        dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1081                        kfree(oct->msix_entries);
1082                        oct->msix_entries = NULL;
1083                        kfree(oct->irq_name_storage);
1084                        oct->irq_name_storage = NULL;
1085                        return num_alloc_ioq_vectors;
1086                }
1087
1088                dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1089
1090                num_ioq_vectors = oct->num_msix_irqs;
1091                /* For PF, there is one non-ioq interrupt handler */
1092                if (OCTEON_CN23XX_PF(oct)) {
1093                        num_ioq_vectors -= 1;
1094
1095                        snprintf(aux_irq_name, INTRNAMSIZ,
1096                                 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1097                                 oct->pf_num);
1098                        irqret = request_irq(
1099                                        msix_entries[num_ioq_vectors].vector,
1100                                        liquidio_legacy_intr_handler, 0,
1101                                        aux_irq_name, oct);
1102                        if (irqret) {
1103                                dev_err(&oct->pci_dev->dev,
1104                                        "Request_irq failed for MSIX interrupt Error: %d\n",
1105                                        irqret);
1106                                pci_disable_msix(oct->pci_dev);
1107                                kfree(oct->msix_entries);
1108                                kfree(oct->irq_name_storage);
1109                                oct->irq_name_storage = NULL;
1110                                oct->msix_entries = NULL;
1111                                return irqret;
1112                        }
1113                }
1114                for (i = 0 ; i < num_ioq_vectors ; i++) {
1115                        if (OCTEON_CN23XX_PF(oct))
1116                                snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1117                                         INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1118                                         oct->octeon_id, oct->pf_num, i);
1119
1120                        if (OCTEON_CN23XX_VF(oct))
1121                                snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1122                                         INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1123                                         oct->octeon_id, oct->vf_num, i);
1124
1125                        irqret = request_irq(msix_entries[i].vector,
1126                                             liquidio_msix_intr_handler, 0,
1127                                             &queue_irq_names[IRQ_NAME_OFF(i)],
1128                                             &oct->ioq_vector[i]);
1129
1130                        if (irqret) {
1131                                dev_err(&oct->pci_dev->dev,
1132                                        "Request_irq failed for MSIX interrupt Error: %d\n",
1133                                        irqret);
1134                                /* Freeing the non-ioq irq vector here . */
1135                                free_irq(msix_entries[num_ioq_vectors].vector,
1136                                         oct);
1137
1138                                while (i) {
1139                                        i--;
1140                                        /* clearing affinity mask. */
1141                                        irq_set_affinity_hint(
1142                                                      msix_entries[i].vector,
1143                                                      NULL);
1144                                        free_irq(msix_entries[i].vector,
1145                                                 &oct->ioq_vector[i]);
1146                                }
1147                                pci_disable_msix(oct->pci_dev);
1148                                kfree(oct->msix_entries);
1149                                kfree(oct->irq_name_storage);
1150                                oct->irq_name_storage = NULL;
1151                                oct->msix_entries = NULL;
1152                                return irqret;
1153                        }
1154                        oct->ioq_vector[i].vector = msix_entries[i].vector;
1155                        /* assign the cpu mask for this msix interrupt vector */
1156                        irq_set_affinity_hint(msix_entries[i].vector,
1157                                              &oct->ioq_vector[i].affinity_mask
1158                                              );
1159                }
1160                dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1161                        oct->octeon_id);
1162        } else {
1163                err = pci_enable_msi(oct->pci_dev);
1164                if (err)
1165                        dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1166                                 err);
1167                else
1168                        oct->flags |= LIO_FLAG_MSI_ENABLED;
1169
1170                /* allocate storage for the names assigned to the irq */
1171                oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1172                if (!oct->irq_name_storage)
1173                        return -ENOMEM;
1174
1175                queue_irq_names = oct->irq_name_storage;
1176
1177                if (OCTEON_CN23XX_PF(oct))
1178                        snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1179                                 "LiquidIO%u-pf%u-rxtx-%u",
1180                                 oct->octeon_id, oct->pf_num, 0);
1181
1182                if (OCTEON_CN23XX_VF(oct))
1183                        snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1184                                 "LiquidIO%u-vf%u-rxtx-%u",
1185                                 oct->octeon_id, oct->vf_num, 0);
1186
1187                irqret = request_irq(oct->pci_dev->irq,
1188                                     liquidio_legacy_intr_handler,
1189                                     IRQF_SHARED,
1190                                     &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1191                if (irqret) {
1192                        if (oct->flags & LIO_FLAG_MSI_ENABLED)
1193                                pci_disable_msi(oct->pci_dev);
1194                        dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1195                                irqret);
1196                        kfree(oct->irq_name_storage);
1197                        oct->irq_name_storage = NULL;
1198                        return irqret;
1199                }
1200        }
1201        return 0;
1202}
1203
1204/**
1205 * liquidio_change_mtu - Net device change_mtu
1206 * @netdev: network device
1207 * @new_mtu: the new max transmit unit size
1208 */
1209int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1210{
1211        struct lio *lio = GET_LIO(netdev);
1212        struct octeon_device *oct = lio->oct_dev;
1213        struct octeon_soft_command *sc;
1214        union octnet_cmd *ncmd;
1215        int ret = 0;
1216
1217        sc = (struct octeon_soft_command *)
1218                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1219        if (!sc) {
1220                netif_info(lio, rx_err, lio->netdev,
1221                           "Failed to allocate soft command\n");
1222                return -ENOMEM;
1223        }
1224
1225        ncmd = (union octnet_cmd *)sc->virtdptr;
1226
1227        init_completion(&sc->complete);
1228        sc->sc_status = OCTEON_REQUEST_PENDING;
1229
1230        ncmd->u64 = 0;
1231        ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1232        ncmd->s.param1 = new_mtu;
1233
1234        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1235
1236        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1237
1238        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1239                                    OPCODE_NIC_CMD, 0, 0, 0);
1240
1241        ret = octeon_send_soft_command(oct, sc);
1242        if (ret == IQ_SEND_FAILED) {
1243                netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1244                octeon_free_soft_command(oct, sc);
1245                return -EINVAL;
1246        }
1247        /* Sleep on a wait queue till the cond flag indicates that the
1248         * response arrived or timed-out.
1249         */
1250        ret = wait_for_sc_completion_timeout(oct, sc, 0);
1251        if (ret)
1252                return ret;
1253
1254        if (sc->sc_status) {
1255                WRITE_ONCE(sc->caller_is_done, true);
1256                return -EINVAL;
1257        }
1258
1259        netdev->mtu = new_mtu;
1260        lio->mtu = new_mtu;
1261
1262        WRITE_ONCE(sc->caller_is_done, true);
1263        return 0;
1264}
1265
1266int lio_wait_for_clean_oq(struct octeon_device *oct)
1267{
1268        int retry = 100, pending_pkts = 0;
1269        int idx;
1270
1271        do {
1272                pending_pkts = 0;
1273
1274                for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1275                        if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1276                                continue;
1277                        pending_pkts +=
1278                                atomic_read(&oct->droq[idx]->pkts_pending);
1279                }
1280
1281                if (pending_pkts > 0)
1282                        schedule_timeout_uninterruptible(1);
1283
1284        } while (retry-- && pending_pkts);
1285
1286        return pending_pkts;
1287}
1288
1289static void
1290octnet_nic_stats_callback(struct octeon_device *oct_dev,
1291                          u32 status, void *ptr)
1292{
1293        struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1294        struct oct_nic_stats_resp *resp =
1295            (struct oct_nic_stats_resp *)sc->virtrptr;
1296        struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1297        struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1298        struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1299        struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1300
1301        if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1302                octeon_swap_8B_data((u64 *)&resp->stats,
1303                                    (sizeof(struct oct_link_stats)) >> 3);
1304
1305                /* RX link-level stats */
1306                rstats->total_rcvd = rsp_rstats->total_rcvd;
1307                rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1308                rstats->total_bcst = rsp_rstats->total_bcst;
1309                rstats->total_mcst = rsp_rstats->total_mcst;
1310                rstats->runts      = rsp_rstats->runts;
1311                rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1312                /* Accounts for over/under-run of buffers */
1313                rstats->fifo_err  = rsp_rstats->fifo_err;
1314                rstats->dmac_drop = rsp_rstats->dmac_drop;
1315                rstats->fcs_err   = rsp_rstats->fcs_err;
1316                rstats->jabber_err = rsp_rstats->jabber_err;
1317                rstats->l2_err    = rsp_rstats->l2_err;
1318                rstats->frame_err = rsp_rstats->frame_err;
1319                rstats->red_drops = rsp_rstats->red_drops;
1320
1321                /* RX firmware stats */
1322                rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1323                rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1324                rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1325                rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1326                rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1327                rstats->fw_err_link = rsp_rstats->fw_err_link;
1328                rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1329                rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1330                rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1331
1332                /* Number of packets that are LROed      */
1333                rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1334                /* Number of octets that are LROed       */
1335                rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1336                /* Number of LRO packets formed          */
1337                rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1338                /* Number of times lRO of packet aborted */
1339                rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1340                rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1341                rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1342                rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1343                rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1344                /* intrmod: packet forward rate */
1345                rstats->fwd_rate = rsp_rstats->fwd_rate;
1346
1347                /* TX link-level stats */
1348                tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1349                tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1350                tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1351                tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1352                tstats->ctl_sent = rsp_tstats->ctl_sent;
1353                /* Packets sent after one collision*/
1354                tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1355                /* Packets sent after multiple collision*/
1356                tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1357                /* Packets not sent due to max collisions */
1358                tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1359                /* Packets not sent due to max deferrals */
1360                tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1361                /* Accounts for over/under-run of buffers */
1362                tstats->fifo_err = rsp_tstats->fifo_err;
1363                tstats->runts = rsp_tstats->runts;
1364                /* Total number of collisions detected */
1365                tstats->total_collisions = rsp_tstats->total_collisions;
1366
1367                /* firmware stats */
1368                tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1369                tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1370                tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1371                tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1372                tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1373                tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1374                tstats->fw_err_link = rsp_tstats->fw_err_link;
1375                tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1376                tstats->fw_tso = rsp_tstats->fw_tso;
1377                tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1378                tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1379                tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1380
1381                resp->status = 1;
1382        } else {
1383                dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1384                resp->status = -1;
1385        }
1386}
1387
1388static int lio_fetch_vf_stats(struct lio *lio)
1389{
1390        struct octeon_device *oct_dev = lio->oct_dev;
1391        struct octeon_soft_command *sc;
1392        struct oct_nic_vf_stats_resp *resp;
1393
1394        int retval;
1395
1396        /* Alloc soft command */
1397        sc = (struct octeon_soft_command *)
1398                octeon_alloc_soft_command(oct_dev,
1399                                          0,
1400                                          sizeof(struct oct_nic_vf_stats_resp),
1401                                          0);
1402
1403        if (!sc) {
1404                dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1405                retval = -ENOMEM;
1406                goto lio_fetch_vf_stats_exit;
1407        }
1408
1409        resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1410        memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1411
1412        init_completion(&sc->complete);
1413        sc->sc_status = OCTEON_REQUEST_PENDING;
1414
1415        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1416
1417        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1418                                    OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1419
1420        retval = octeon_send_soft_command(oct_dev, sc);
1421        if (retval == IQ_SEND_FAILED) {
1422                octeon_free_soft_command(oct_dev, sc);
1423                goto lio_fetch_vf_stats_exit;
1424        }
1425
1426        retval =
1427                wait_for_sc_completion_timeout(oct_dev, sc,
1428                                               (2 * LIO_SC_MAX_TMO_MS));
1429        if (retval)  {
1430                dev_err(&oct_dev->pci_dev->dev,
1431                        "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1432                goto lio_fetch_vf_stats_exit;
1433        }
1434
1435        if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1436                octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1437                                    (sizeof(u64)) >> 3);
1438
1439                if (resp->spoofmac_cnt != 0) {
1440                        dev_warn(&oct_dev->pci_dev->dev,
1441                                 "%llu Spoofed packets detected\n",
1442                                 resp->spoofmac_cnt);
1443                }
1444        }
1445        WRITE_ONCE(sc->caller_is_done, 1);
1446
1447lio_fetch_vf_stats_exit:
1448        return retval;
1449}
1450
1451void lio_fetch_stats(struct work_struct *work)
1452{
1453        struct cavium_wk *wk = (struct cavium_wk *)work;
1454        struct lio *lio = wk->ctxptr;
1455        struct octeon_device *oct_dev = lio->oct_dev;
1456        struct octeon_soft_command *sc;
1457        struct oct_nic_stats_resp *resp;
1458        unsigned long time_in_jiffies;
1459        int retval;
1460
1461        if (OCTEON_CN23XX_PF(oct_dev)) {
1462                /* report spoofchk every 2 seconds */
1463                if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1464                    (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1465                    oct_dev->sriov_info.num_vfs_alloced) {
1466                        lio_fetch_vf_stats(lio);
1467                }
1468
1469                oct_dev->vfstats_poll++;
1470        }
1471
1472        /* Alloc soft command */
1473        sc = (struct octeon_soft_command *)
1474                octeon_alloc_soft_command(oct_dev,
1475                                          0,
1476                                          sizeof(struct oct_nic_stats_resp),
1477                                          0);
1478
1479        if (!sc) {
1480                dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1481                goto lio_fetch_stats_exit;
1482        }
1483
1484        resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1485        memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1486
1487        init_completion(&sc->complete);
1488        sc->sc_status = OCTEON_REQUEST_PENDING;
1489
1490        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1491
1492        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1493                                    OPCODE_NIC_PORT_STATS, 0, 0, 0);
1494
1495        retval = octeon_send_soft_command(oct_dev, sc);
1496        if (retval == IQ_SEND_FAILED) {
1497                octeon_free_soft_command(oct_dev, sc);
1498                goto lio_fetch_stats_exit;
1499        }
1500
1501        retval = wait_for_sc_completion_timeout(oct_dev, sc,
1502                                                (2 * LIO_SC_MAX_TMO_MS));
1503        if (retval)  {
1504                dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1505                goto lio_fetch_stats_exit;
1506        }
1507
1508        octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1509        WRITE_ONCE(sc->caller_is_done, true);
1510
1511lio_fetch_stats_exit:
1512        time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1513        if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1514                schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
1515
1516        return;
1517}
1518
1519int liquidio_set_speed(struct lio *lio, int speed)
1520{
1521        struct octeon_device *oct = lio->oct_dev;
1522        struct oct_nic_seapi_resp *resp;
1523        struct octeon_soft_command *sc;
1524        union octnet_cmd *ncmd;
1525        int retval;
1526        u32 var;
1527
1528        if (oct->speed_setting == speed)
1529                return 0;
1530
1531        if (!OCTEON_CN23XX_PF(oct)) {
1532                dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1533                        __func__);
1534                return -EOPNOTSUPP;
1535        }
1536
1537        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1538                                       sizeof(struct oct_nic_seapi_resp),
1539                                       0);
1540        if (!sc)
1541                return -ENOMEM;
1542
1543        ncmd = sc->virtdptr;
1544        resp = sc->virtrptr;
1545        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1546
1547        init_completion(&sc->complete);
1548        sc->sc_status = OCTEON_REQUEST_PENDING;
1549
1550        ncmd->u64 = 0;
1551        ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1552        ncmd->s.param1 = speed;
1553
1554        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1555
1556        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1557
1558        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1559                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1560
1561        retval = octeon_send_soft_command(oct, sc);
1562        if (retval == IQ_SEND_FAILED) {
1563                dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1564                octeon_free_soft_command(oct, sc);
1565                retval = -EBUSY;
1566        } else {
1567                /* Wait for response or timeout */
1568                retval = wait_for_sc_completion_timeout(oct, sc, 0);
1569                if (retval)
1570                        return retval;
1571
1572                retval = resp->status;
1573
1574                if (retval) {
1575                        dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1576                                __func__, retval);
1577                        WRITE_ONCE(sc->caller_is_done, true);
1578
1579                        return -EIO;
1580                }
1581
1582                var = be32_to_cpu((__force __be32)resp->speed);
1583                if (var != speed) {
1584                        dev_err(&oct->pci_dev->dev,
1585                                "%s: setting failed speed= %x, expect %x\n",
1586                                __func__, var, speed);
1587                }
1588
1589                oct->speed_setting = var;
1590                WRITE_ONCE(sc->caller_is_done, true);
1591        }
1592
1593        return retval;
1594}
1595
1596int liquidio_get_speed(struct lio *lio)
1597{
1598        struct octeon_device *oct = lio->oct_dev;
1599        struct oct_nic_seapi_resp *resp;
1600        struct octeon_soft_command *sc;
1601        union octnet_cmd *ncmd;
1602        int retval;
1603
1604        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1605                                       sizeof(struct oct_nic_seapi_resp),
1606                                       0);
1607        if (!sc)
1608                return -ENOMEM;
1609
1610        ncmd = sc->virtdptr;
1611        resp = sc->virtrptr;
1612        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1613
1614        init_completion(&sc->complete);
1615        sc->sc_status = OCTEON_REQUEST_PENDING;
1616
1617        ncmd->u64 = 0;
1618        ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1619
1620        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1621
1622        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1623
1624        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1625                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1626
1627        retval = octeon_send_soft_command(oct, sc);
1628        if (retval == IQ_SEND_FAILED) {
1629                dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1630                octeon_free_soft_command(oct, sc);
1631                retval = -EIO;
1632        } else {
1633                retval = wait_for_sc_completion_timeout(oct, sc, 0);
1634                if (retval)
1635                        return retval;
1636
1637                retval = resp->status;
1638                if (retval) {
1639                        dev_err(&oct->pci_dev->dev,
1640                                "%s failed retval=%d\n", __func__, retval);
1641                        retval = -EIO;
1642                } else {
1643                        u32 var;
1644
1645                        var = be32_to_cpu((__force __be32)resp->speed);
1646                        oct->speed_setting = var;
1647                        if (var == 0xffff) {
1648                                /* unable to access boot variables
1649                                 * get the default value based on the NIC type
1650                                 */
1651                                if (oct->subsystem_id ==
1652                                                OCTEON_CN2350_25GB_SUBSYS_ID ||
1653                                    oct->subsystem_id ==
1654                                                OCTEON_CN2360_25GB_SUBSYS_ID) {
1655                                        oct->no_speed_setting = 1;
1656                                        oct->speed_setting = 25;
1657                                } else {
1658                                        oct->speed_setting = 10;
1659                                }
1660                        }
1661
1662                }
1663                WRITE_ONCE(sc->caller_is_done, true);
1664        }
1665
1666        return retval;
1667}
1668
1669int liquidio_set_fec(struct lio *lio, int on_off)
1670{
1671        struct oct_nic_seapi_resp *resp;
1672        struct octeon_soft_command *sc;
1673        struct octeon_device *oct;
1674        union octnet_cmd *ncmd;
1675        int retval;
1676        u32 var;
1677
1678        oct = lio->oct_dev;
1679
1680        if (oct->props[lio->ifidx].fec == on_off)
1681                return 0;
1682
1683        if (!OCTEON_CN23XX_PF(oct)) {
1684                dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
1685                        __func__);
1686                return -1;
1687        }
1688
1689        if (oct->speed_boot != 25)  {
1690                dev_err(&oct->pci_dev->dev,
1691                        "Set FEC only when link speed is 25G during insmod\n");
1692                return -1;
1693        }
1694
1695        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1696                                       sizeof(struct oct_nic_seapi_resp), 0);
1697        if (!sc) {
1698                dev_err(&oct->pci_dev->dev,
1699                        "Failed to allocate soft command\n");
1700                return -ENOMEM;
1701        }
1702
1703        ncmd = sc->virtdptr;
1704        resp = sc->virtrptr;
1705        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1706
1707        init_completion(&sc->complete);
1708        sc->sc_status = OCTEON_REQUEST_PENDING;
1709
1710        ncmd->u64 = 0;
1711        ncmd->s.cmd = SEAPI_CMD_FEC_SET;
1712        ncmd->s.param1 = on_off;
1713        /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
1714
1715        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1716
1717        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1718
1719        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1720                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1721
1722        retval = octeon_send_soft_command(oct, sc);
1723        if (retval == IQ_SEND_FAILED) {
1724                dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1725                octeon_free_soft_command(oct, sc);
1726                return -EIO;
1727        }
1728
1729        retval = wait_for_sc_completion_timeout(oct, sc, 0);
1730        if (retval)
1731                return (-EIO);
1732
1733        var = be32_to_cpu(resp->fec_setting);
1734        resp->fec_setting = var;
1735        if (var != on_off) {
1736                dev_err(&oct->pci_dev->dev,
1737                        "Setting failed fec= %x, expect %x\n",
1738                        var, on_off);
1739                oct->props[lio->ifidx].fec = var;
1740                if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1741                        oct->props[lio->ifidx].fec = 1;
1742                else
1743                        oct->props[lio->ifidx].fec = 0;
1744        }
1745
1746        WRITE_ONCE(sc->caller_is_done, true);
1747
1748        if (oct->props[lio->ifidx].fec !=
1749            oct->props[lio->ifidx].fec_boot) {
1750                dev_dbg(&oct->pci_dev->dev,
1751                        "Reload driver to change fec to %s\n",
1752                        oct->props[lio->ifidx].fec ? "on" : "off");
1753        }
1754
1755        return retval;
1756}
1757
1758int liquidio_get_fec(struct lio *lio)
1759{
1760        struct oct_nic_seapi_resp *resp;
1761        struct octeon_soft_command *sc;
1762        struct octeon_device *oct;
1763        union octnet_cmd *ncmd;
1764        int retval;
1765        u32 var;
1766
1767        oct = lio->oct_dev;
1768
1769        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1770                                       sizeof(struct oct_nic_seapi_resp), 0);
1771        if (!sc)
1772                return -ENOMEM;
1773
1774        ncmd = sc->virtdptr;
1775        resp = sc->virtrptr;
1776        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1777
1778        init_completion(&sc->complete);
1779        sc->sc_status = OCTEON_REQUEST_PENDING;
1780
1781        ncmd->u64 = 0;
1782        ncmd->s.cmd = SEAPI_CMD_FEC_GET;
1783
1784        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1785
1786        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1787
1788        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1789                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1790
1791        retval = octeon_send_soft_command(oct, sc);
1792        if (retval == IQ_SEND_FAILED) {
1793                dev_info(&oct->pci_dev->dev,
1794                         "%s: Failed to send soft command\n", __func__);
1795                octeon_free_soft_command(oct, sc);
1796                return -EIO;
1797        }
1798
1799        retval = wait_for_sc_completion_timeout(oct, sc, 0);
1800        if (retval)
1801                return retval;
1802
1803        var = be32_to_cpu(resp->fec_setting);
1804        resp->fec_setting = var;
1805        if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1806                oct->props[lio->ifidx].fec = 1;
1807        else
1808                oct->props[lio->ifidx].fec = 0;
1809
1810        WRITE_ONCE(sc->caller_is_done, true);
1811
1812        if (oct->props[lio->ifidx].fec !=
1813            oct->props[lio->ifidx].fec_boot) {
1814                dev_dbg(&oct->pci_dev->dev,
1815                        "Reload driver to change fec to %s\n",
1816                        oct->props[lio->ifidx].fec ? "on" : "off");
1817        }
1818
1819        return retval;
1820}
1821