linux/drivers/net/ethernet/cavium/liquidio/lio_core.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/pci.h>
  19#include <linux/if_vlan.h>
  20#include "liquidio_common.h"
  21#include "octeon_droq.h"
  22#include "octeon_iq.h"
  23#include "response_manager.h"
  24#include "octeon_device.h"
  25#include "octeon_nic.h"
  26#include "octeon_main.h"
  27#include "octeon_network.h"
  28
  29/* OOM task polling interval */
  30#define LIO_OOM_POLL_INTERVAL_MS 250
  31
  32#define OCTNIC_MAX_SG  MAX_SKB_FRAGS
  33
  34/**
  35 * \brief Delete gather lists
  36 * @param lio per-network private data
  37 */
  38void lio_delete_glists(struct lio *lio)
  39{
  40        struct octnic_gather *g;
  41        int i;
  42
  43        kfree(lio->glist_lock);
  44        lio->glist_lock = NULL;
  45
  46        if (!lio->glist)
  47                return;
  48
  49        for (i = 0; i < lio->oct_dev->num_iqs; i++) {
  50                do {
  51                        g = (struct octnic_gather *)
  52                            lio_list_delete_head(&lio->glist[i]);
  53                        kfree(g);
  54                } while (g);
  55
  56                if (lio->glists_virt_base && lio->glists_virt_base[i] &&
  57                    lio->glists_dma_base && lio->glists_dma_base[i]) {
  58                        lio_dma_free(lio->oct_dev,
  59                                     lio->glist_entry_size * lio->tx_qsize,
  60                                     lio->glists_virt_base[i],
  61                                     lio->glists_dma_base[i]);
  62                }
  63        }
  64
  65        kfree(lio->glists_virt_base);
  66        lio->glists_virt_base = NULL;
  67
  68        kfree(lio->glists_dma_base);
  69        lio->glists_dma_base = NULL;
  70
  71        kfree(lio->glist);
  72        lio->glist = NULL;
  73}
  74
  75/**
  76 * \brief Setup gather lists
  77 * @param lio per-network private data
  78 */
  79int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
  80{
  81        struct octnic_gather *g;
  82        int i, j;
  83
  84        lio->glist_lock =
  85            kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
  86        if (!lio->glist_lock)
  87                return -ENOMEM;
  88
  89        lio->glist =
  90            kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
  91        if (!lio->glist) {
  92                kfree(lio->glist_lock);
  93                lio->glist_lock = NULL;
  94                return -ENOMEM;
  95        }
  96
  97        lio->glist_entry_size =
  98                ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
  99
 100        /* allocate memory to store virtual and dma base address of
 101         * per glist consistent memory
 102         */
 103        lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
 104                                        GFP_KERNEL);
 105        lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
 106                                       GFP_KERNEL);
 107
 108        if (!lio->glists_virt_base || !lio->glists_dma_base) {
 109                lio_delete_glists(lio);
 110                return -ENOMEM;
 111        }
 112
 113        for (i = 0; i < num_iqs; i++) {
 114                int numa_node = dev_to_node(&oct->pci_dev->dev);
 115
 116                spin_lock_init(&lio->glist_lock[i]);
 117
 118                INIT_LIST_HEAD(&lio->glist[i]);
 119
 120                lio->glists_virt_base[i] =
 121                        lio_dma_alloc(oct,
 122                                      lio->glist_entry_size * lio->tx_qsize,
 123                                      &lio->glists_dma_base[i]);
 124
 125                if (!lio->glists_virt_base[i]) {
 126                        lio_delete_glists(lio);
 127                        return -ENOMEM;
 128                }
 129
 130                for (j = 0; j < lio->tx_qsize; j++) {
 131                        g = kzalloc_node(sizeof(*g), GFP_KERNEL,
 132                                         numa_node);
 133                        if (!g)
 134                                g = kzalloc(sizeof(*g), GFP_KERNEL);
 135                        if (!g)
 136                                break;
 137
 138                        g->sg = lio->glists_virt_base[i] +
 139                                (j * lio->glist_entry_size);
 140
 141                        g->sg_dma_ptr = lio->glists_dma_base[i] +
 142                                        (j * lio->glist_entry_size);
 143
 144                        list_add_tail(&g->list, &lio->glist[i]);
 145                }
 146
 147                if (j != lio->tx_qsize) {
 148                        lio_delete_glists(lio);
 149                        return -ENOMEM;
 150                }
 151        }
 152
 153        return 0;
 154}
 155
 156int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
 157{
 158        struct lio *lio = GET_LIO(netdev);
 159        struct octeon_device *oct = lio->oct_dev;
 160        struct octnic_ctrl_pkt nctrl;
 161        int ret = 0;
 162
 163        memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
 164
 165        nctrl.ncmd.u64 = 0;
 166        nctrl.ncmd.s.cmd = cmd;
 167        nctrl.ncmd.s.param1 = param1;
 168        nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
 169        nctrl.netpndev = (u64)netdev;
 170        nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
 171
 172        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
 173        if (ret) {
 174                dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
 175                        ret);
 176                if (ret > 0)
 177                        ret = -EIO;
 178        }
 179        return ret;
 180}
 181
 182void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
 183                                        unsigned int bytes_compl)
 184{
 185        struct netdev_queue *netdev_queue = txq;
 186
 187        netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
 188}
 189
 190void octeon_update_tx_completion_counters(void *buf, int reqtype,
 191                                          unsigned int *pkts_compl,
 192                                          unsigned int *bytes_compl)
 193{
 194        struct octnet_buf_free_info *finfo;
 195        struct sk_buff *skb = NULL;
 196        struct octeon_soft_command *sc;
 197
 198        switch (reqtype) {
 199        case REQTYPE_NORESP_NET:
 200        case REQTYPE_NORESP_NET_SG:
 201                finfo = buf;
 202                skb = finfo->skb;
 203                break;
 204
 205        case REQTYPE_RESP_NET_SG:
 206        case REQTYPE_RESP_NET:
 207                sc = buf;
 208                skb = sc->callback_arg;
 209                break;
 210
 211        default:
 212                return;
 213        }
 214
 215        (*pkts_compl)++;
 216        *bytes_compl += skb->len;
 217}
 218
 219int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
 220{
 221        struct octnet_buf_free_info *finfo;
 222        struct sk_buff *skb;
 223        struct octeon_soft_command *sc;
 224        struct netdev_queue *txq;
 225
 226        switch (reqtype) {
 227        case REQTYPE_NORESP_NET:
 228        case REQTYPE_NORESP_NET_SG:
 229                finfo = buf;
 230                skb = finfo->skb;
 231                break;
 232
 233        case REQTYPE_RESP_NET_SG:
 234        case REQTYPE_RESP_NET:
 235                sc = buf;
 236                skb = sc->callback_arg;
 237                break;
 238
 239        default:
 240                return 0;
 241        }
 242
 243        txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
 244        netdev_tx_sent_queue(txq, skb->len);
 245
 246        return netif_xmit_stopped(txq);
 247}
 248
 249void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
 250{
 251        struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
 252        struct net_device *netdev = (struct net_device *)nctrl->netpndev;
 253        struct lio *lio = GET_LIO(netdev);
 254        struct octeon_device *oct = lio->oct_dev;
 255        u8 *mac;
 256
 257        if (nctrl->sc_status)
 258                return;
 259
 260        switch (nctrl->ncmd.s.cmd) {
 261        case OCTNET_CMD_CHANGE_DEVFLAGS:
 262        case OCTNET_CMD_SET_MULTI_LIST:
 263        case OCTNET_CMD_SET_UC_LIST:
 264                break;
 265
 266        case OCTNET_CMD_CHANGE_MACADDR:
 267                mac = ((u8 *)&nctrl->udd[0]) + 2;
 268                if (nctrl->ncmd.s.param1) {
 269                        /* vfidx is 0 based, but vf_num (param1) is 1 based */
 270                        int vfidx = nctrl->ncmd.s.param1 - 1;
 271                        bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
 272
 273                        if (mac_is_admin_assigned)
 274                                netif_info(lio, probe, lio->netdev,
 275                                           "MAC Address %pM is configured for VF %d\n",
 276                                           mac, vfidx);
 277                } else {
 278                        netif_info(lio, probe, lio->netdev,
 279                                   " MACAddr changed to %pM\n",
 280                                   mac);
 281                }
 282                break;
 283
 284        case OCTNET_CMD_GPIO_ACCESS:
 285                netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
 286
 287                break;
 288
 289        case OCTNET_CMD_ID_ACTIVE:
 290                netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
 291
 292                break;
 293
 294        case OCTNET_CMD_LRO_ENABLE:
 295                dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
 296                break;
 297
 298        case OCTNET_CMD_LRO_DISABLE:
 299                dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
 300                         netdev->name);
 301                break;
 302
 303        case OCTNET_CMD_VERBOSE_ENABLE:
 304                dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
 305                         netdev->name);
 306                break;
 307
 308        case OCTNET_CMD_VERBOSE_DISABLE:
 309                dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
 310                         netdev->name);
 311                break;
 312
 313        case OCTNET_CMD_VLAN_FILTER_CTL:
 314                if (nctrl->ncmd.s.param1)
 315                        dev_info(&oct->pci_dev->dev,
 316                                 "%s VLAN filter enabled\n", netdev->name);
 317                else
 318                        dev_info(&oct->pci_dev->dev,
 319                                 "%s VLAN filter disabled\n", netdev->name);
 320                break;
 321
 322        case OCTNET_CMD_ADD_VLAN_FILTER:
 323                dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
 324                         netdev->name, nctrl->ncmd.s.param1);
 325                break;
 326
 327        case OCTNET_CMD_DEL_VLAN_FILTER:
 328                dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
 329                         netdev->name, nctrl->ncmd.s.param1);
 330                break;
 331
 332        case OCTNET_CMD_SET_SETTINGS:
 333                dev_info(&oct->pci_dev->dev, "%s settings changed\n",
 334                         netdev->name);
 335
 336                break;
 337
 338        /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
 339         * Command passed by NIC driver
 340         */
 341        case OCTNET_CMD_TNL_RX_CSUM_CTL:
 342                if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
 343                        netif_info(lio, probe, lio->netdev,
 344                                   "RX Checksum Offload Enabled\n");
 345                } else if (nctrl->ncmd.s.param1 ==
 346                           OCTNET_CMD_RXCSUM_DISABLE) {
 347                        netif_info(lio, probe, lio->netdev,
 348                                   "RX Checksum Offload Disabled\n");
 349                }
 350                break;
 351
 352                /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
 353                 * Command passed by NIC driver
 354                 */
 355        case OCTNET_CMD_TNL_TX_CSUM_CTL:
 356                if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
 357                        netif_info(lio, probe, lio->netdev,
 358                                   "TX Checksum Offload Enabled\n");
 359                } else if (nctrl->ncmd.s.param1 ==
 360                           OCTNET_CMD_TXCSUM_DISABLE) {
 361                        netif_info(lio, probe, lio->netdev,
 362                                   "TX Checksum Offload Disabled\n");
 363                }
 364                break;
 365
 366                /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
 367                 * Command passed by NIC driver
 368                 */
 369        case OCTNET_CMD_VXLAN_PORT_CONFIG:
 370                if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
 371                        netif_info(lio, probe, lio->netdev,
 372                                   "VxLAN Destination UDP PORT:%d ADDED\n",
 373                                   nctrl->ncmd.s.param1);
 374                } else if (nctrl->ncmd.s.more ==
 375                           OCTNET_CMD_VXLAN_PORT_DEL) {
 376                        netif_info(lio, probe, lio->netdev,
 377                                   "VxLAN Destination UDP PORT:%d DELETED\n",
 378                                   nctrl->ncmd.s.param1);
 379                }
 380                break;
 381
 382        case OCTNET_CMD_SET_FLOW_CTL:
 383                netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
 384                break;
 385
 386        case OCTNET_CMD_QUEUE_COUNT_CTL:
 387                netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
 388                           nctrl->ncmd.s.param1);
 389                break;
 390
 391        default:
 392                dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
 393                        nctrl->ncmd.s.cmd);
 394        }
 395}
 396
 397void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
 398{
 399        bool macaddr_changed = false;
 400        struct net_device *netdev;
 401        struct lio *lio;
 402
 403        rtnl_lock();
 404
 405        netdev = oct->props[0].netdev;
 406        lio = GET_LIO(netdev);
 407
 408        lio->linfo.macaddr_is_admin_asgnd = true;
 409
 410        if (!ether_addr_equal(netdev->dev_addr, mac)) {
 411                macaddr_changed = true;
 412                ether_addr_copy(netdev->dev_addr, mac);
 413                ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
 414                call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
 415        }
 416
 417        rtnl_unlock();
 418
 419        if (macaddr_changed)
 420                dev_info(&oct->pci_dev->dev,
 421                         "PF changed VF's MAC address to %pM\n", mac);
 422
 423        /* no need to notify the firmware of the macaddr change because
 424         * the PF did that already
 425         */
 426}
 427
 428void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
 429                                  struct octeon_droq *droq)
 430{
 431        struct net_device *netdev = oct->props[0].netdev;
 432        struct lio *lio = GET_LIO(netdev);
 433        struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
 434
 435        queue_delayed_work(wq->wq, &wq->wk.work,
 436                           msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
 437}
 438
 439static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
 440{
 441        struct cavium_wk *wk = (struct cavium_wk *)work;
 442        struct lio *lio = (struct lio *)wk->ctxptr;
 443        struct octeon_device *oct = lio->oct_dev;
 444        int q_no = wk->ctxul;
 445        struct octeon_droq *droq = oct->droq[q_no];
 446
 447        if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
 448                return;
 449
 450        if (octeon_retry_droq_refill(droq))
 451                octeon_schedule_rxq_oom_work(oct, droq);
 452}
 453
 454int setup_rx_oom_poll_fn(struct net_device *netdev)
 455{
 456        struct lio *lio = GET_LIO(netdev);
 457        struct octeon_device *oct = lio->oct_dev;
 458        struct cavium_wq *wq;
 459        int q, q_no;
 460
 461        for (q = 0; q < oct->num_oqs; q++) {
 462                q_no = lio->linfo.rxpciq[q].s.q_no;
 463                wq = &lio->rxq_status_wq[q_no];
 464                wq->wq = alloc_workqueue("rxq-oom-status",
 465                                         WQ_MEM_RECLAIM, 0);
 466                if (!wq->wq) {
 467                        dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
 468                        return -ENOMEM;
 469                }
 470
 471                INIT_DELAYED_WORK(&wq->wk.work,
 472                                  octnet_poll_check_rxq_oom_status);
 473                wq->wk.ctxptr = lio;
 474                wq->wk.ctxul = q_no;
 475        }
 476
 477        return 0;
 478}
 479
 480void cleanup_rx_oom_poll_fn(struct net_device *netdev)
 481{
 482        struct lio *lio = GET_LIO(netdev);
 483        struct octeon_device *oct = lio->oct_dev;
 484        struct cavium_wq *wq;
 485        int q_no;
 486
 487        for (q_no = 0; q_no < oct->num_oqs; q_no++) {
 488                wq = &lio->rxq_status_wq[q_no];
 489                if (wq->wq) {
 490                        cancel_delayed_work_sync(&wq->wk.work);
 491                        flush_workqueue(wq->wq);
 492                        destroy_workqueue(wq->wq);
 493                        wq->wq = NULL;
 494                }
 495        }
 496}
 497
 498/* Runs in interrupt context. */
 499static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
 500{
 501        struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
 502        struct net_device *netdev;
 503        struct lio *lio;
 504
 505        netdev = oct->props[iq->ifidx].netdev;
 506
 507        /* This is needed because the first IQ does not have
 508         * a netdev associated with it.
 509         */
 510        if (!netdev)
 511                return;
 512
 513        lio = GET_LIO(netdev);
 514        if (__netif_subqueue_stopped(netdev, iq->q_index) &&
 515            lio->linfo.link.s.link_up &&
 516            (!octnet_iq_is_full(oct, iq_num))) {
 517                netif_wake_subqueue(netdev, iq->q_index);
 518                INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
 519                                          tx_restart, 1);
 520        }
 521}
 522
 523/**
 524 * \brief Setup output queue
 525 * @param oct octeon device
 526 * @param q_no which queue
 527 * @param num_descs how many descriptors
 528 * @param desc_size size of each descriptor
 529 * @param app_ctx application context
 530 */
 531static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
 532                             int desc_size, void *app_ctx)
 533{
 534        int ret_val;
 535
 536        dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
 537        /* droq creation and local register settings. */
 538        ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
 539        if (ret_val < 0)
 540                return ret_val;
 541
 542        if (ret_val == 1) {
 543                dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
 544                return 0;
 545        }
 546
 547        /* Enable the droq queues */
 548        octeon_set_droq_pkt_op(oct, q_no, 1);
 549
 550        /* Send Credit for Octeon Output queues. Credits are always
 551         * sent after the output queue is enabled.
 552         */
 553        writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
 554
 555        return ret_val;
 556}
 557
 558/** Routine to push packets arriving on Octeon interface upto network layer.
 559 * @param oct_id   - octeon device id.
 560 * @param skbuff   - skbuff struct to be passed to network layer.
 561 * @param len      - size of total data received.
 562 * @param rh       - Control header associated with the packet
 563 * @param param    - additional control data with the packet
 564 * @param arg      - farg registered in droq_ops
 565 */
 566static void
 567liquidio_push_packet(u32 octeon_id __attribute__((unused)),
 568                     void *skbuff,
 569                     u32 len,
 570                     union octeon_rh *rh,
 571                     void *param,
 572                     void *arg)
 573{
 574        struct net_device *netdev = (struct net_device *)arg;
 575        struct octeon_droq *droq =
 576            container_of(param, struct octeon_droq, napi);
 577        struct sk_buff *skb = (struct sk_buff *)skbuff;
 578        struct skb_shared_hwtstamps *shhwtstamps;
 579        struct napi_struct *napi = param;
 580        u16 vtag = 0;
 581        u32 r_dh_off;
 582        u64 ns;
 583
 584        if (netdev) {
 585                struct lio *lio = GET_LIO(netdev);
 586                struct octeon_device *oct = lio->oct_dev;
 587
 588                /* Do not proceed if the interface is not in RUNNING state. */
 589                if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
 590                        recv_buffer_free(skb);
 591                        droq->stats.rx_dropped++;
 592                        return;
 593                }
 594
 595                skb->dev = netdev;
 596
 597                skb_record_rx_queue(skb, droq->q_no);
 598                if (likely(len > MIN_SKB_SIZE)) {
 599                        struct octeon_skb_page_info *pg_info;
 600                        unsigned char *va;
 601
 602                        pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 603                        if (pg_info->page) {
 604                                /* For Paged allocation use the frags */
 605                                va = page_address(pg_info->page) +
 606                                        pg_info->page_offset;
 607                                memcpy(skb->data, va, MIN_SKB_SIZE);
 608                                skb_put(skb, MIN_SKB_SIZE);
 609                                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 610                                                pg_info->page,
 611                                                pg_info->page_offset +
 612                                                MIN_SKB_SIZE,
 613                                                len - MIN_SKB_SIZE,
 614                                                LIO_RXBUFFER_SZ);
 615                        }
 616                } else {
 617                        struct octeon_skb_page_info *pg_info =
 618                                ((struct octeon_skb_page_info *)(skb->cb));
 619                        skb_copy_to_linear_data(skb, page_address(pg_info->page)
 620                                                + pg_info->page_offset, len);
 621                        skb_put(skb, len);
 622                        put_page(pg_info->page);
 623                }
 624
 625                r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
 626
 627                if (oct->ptp_enable) {
 628                        if (rh->r_dh.has_hwtstamp) {
 629                                /* timestamp is included from the hardware at
 630                                 * the beginning of the packet.
 631                                 */
 632                                if (ifstate_check
 633                                        (lio,
 634                                         LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
 635                                        /* Nanoseconds are in the first 64-bits
 636                                         * of the packet.
 637                                         */
 638                                        memcpy(&ns, (skb->data + r_dh_off),
 639                                               sizeof(ns));
 640                                        r_dh_off -= BYTES_PER_DHLEN_UNIT;
 641                                        shhwtstamps = skb_hwtstamps(skb);
 642                                        shhwtstamps->hwtstamp =
 643                                                ns_to_ktime(ns +
 644                                                            lio->ptp_adjust);
 645                                }
 646                        }
 647                }
 648
 649                if (rh->r_dh.has_hash) {
 650                        __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
 651                        u32 hash = be32_to_cpu(*hash_be);
 652
 653                        skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
 654                        r_dh_off -= BYTES_PER_DHLEN_UNIT;
 655                }
 656
 657                skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
 658                skb->protocol = eth_type_trans(skb, skb->dev);
 659
 660                if ((netdev->features & NETIF_F_RXCSUM) &&
 661                    (((rh->r_dh.encap_on) &&
 662                      (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
 663                     (!(rh->r_dh.encap_on) &&
 664                      ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
 665                        CNNIC_CSUM_VERIFIED))))
 666                        /* checksum has already been verified */
 667                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 668                else
 669                        skb->ip_summed = CHECKSUM_NONE;
 670
 671                /* Setting Encapsulation field on basis of status received
 672                 * from the firmware
 673                 */
 674                if (rh->r_dh.encap_on) {
 675                        skb->encapsulation = 1;
 676                        skb->csum_level = 1;
 677                        droq->stats.rx_vxlan++;
 678                }
 679
 680                /* inbound VLAN tag */
 681                if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 682                    rh->r_dh.vlan) {
 683                        u16 priority = rh->r_dh.priority;
 684                        u16 vid = rh->r_dh.vlan;
 685
 686                        vtag = (priority << VLAN_PRIO_SHIFT) | vid;
 687                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
 688                }
 689
 690                napi_gro_receive(napi, skb);
 691
 692                droq->stats.rx_bytes_received += len -
 693                        rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
 694                droq->stats.rx_pkts_received++;
 695        } else {
 696                recv_buffer_free(skb);
 697        }
 698}
 699
 700/**
 701 * \brief wrapper for calling napi_schedule
 702 * @param param parameters to pass to napi_schedule
 703 *
 704 * Used when scheduling on different CPUs
 705 */
 706static void napi_schedule_wrapper(void *param)
 707{
 708        struct napi_struct *napi = param;
 709
 710        napi_schedule(napi);
 711}
 712
 713/**
 714 * \brief callback when receive interrupt occurs and we are in NAPI mode
 715 * @param arg pointer to octeon output queue
 716 */
 717static void liquidio_napi_drv_callback(void *arg)
 718{
 719        struct octeon_device *oct;
 720        struct octeon_droq *droq = arg;
 721        int this_cpu = smp_processor_id();
 722
 723        oct = droq->oct_dev;
 724
 725        if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
 726            droq->cpu_id == this_cpu) {
 727                napi_schedule_irqoff(&droq->napi);
 728        } else {
 729                call_single_data_t *csd = &droq->csd;
 730
 731                csd->func = napi_schedule_wrapper;
 732                csd->info = &droq->napi;
 733                csd->flags = 0;
 734
 735                smp_call_function_single_async(droq->cpu_id, csd);
 736        }
 737}
 738
 739/**
 740 * \brief Entry point for NAPI polling
 741 * @param napi NAPI structure
 742 * @param budget maximum number of items to process
 743 */
 744static int liquidio_napi_poll(struct napi_struct *napi, int budget)
 745{
 746        struct octeon_instr_queue *iq;
 747        struct octeon_device *oct;
 748        struct octeon_droq *droq;
 749        int tx_done = 0, iq_no;
 750        int work_done;
 751
 752        droq = container_of(napi, struct octeon_droq, napi);
 753        oct = droq->oct_dev;
 754        iq_no = droq->q_no;
 755
 756        /* Handle Droq descriptors */
 757        work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
 758
 759        /* Flush the instruction queue */
 760        iq = oct->instr_queue[iq_no];
 761        if (iq) {
 762                /* TODO: move this check to inside octeon_flush_iq,
 763                 * once check_db_timeout is removed
 764                 */
 765                if (atomic_read(&iq->instr_pending))
 766                        /* Process iq buffers with in the budget limits */
 767                        tx_done = octeon_flush_iq(oct, iq, budget);
 768                else
 769                        tx_done = 1;
 770                /* Update iq read-index rather than waiting for next interrupt.
 771                 * Return back if tx_done is false.
 772                 */
 773                /* sub-queue status update */
 774                lio_update_txq_status(oct, iq_no);
 775        } else {
 776                dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
 777                        __func__, iq_no);
 778        }
 779
 780#define MAX_REG_CNT  2000000U
 781        /* force enable interrupt if reg cnts are high to avoid wraparound */
 782        if ((work_done < budget && tx_done) ||
 783            (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
 784            (droq->pkt_count >= MAX_REG_CNT)) {
 785                tx_done = 1;
 786                napi_complete_done(napi, work_done);
 787
 788                octeon_enable_irq(droq->oct_dev, droq->q_no);
 789                return 0;
 790        }
 791
 792        return (!tx_done) ? (budget) : (work_done);
 793}
 794
 795/**
 796 * \brief Setup input and output queues
 797 * @param octeon_dev octeon device
 798 * @param ifidx Interface index
 799 *
 800 * Note: Queues are with respect to the octeon device. Thus
 801 * an input queue is for egress packets, and output queues
 802 * are for ingress packets.
 803 */
 804int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
 805                             u32 num_iqs, u32 num_oqs)
 806{
 807        struct octeon_droq_ops droq_ops;
 808        struct net_device *netdev;
 809        struct octeon_droq *droq;
 810        struct napi_struct *napi;
 811        int cpu_id_modulus;
 812        int num_tx_descs;
 813        struct lio *lio;
 814        int retval = 0;
 815        int q, q_no;
 816        int cpu_id;
 817
 818        netdev = octeon_dev->props[ifidx].netdev;
 819
 820        lio = GET_LIO(netdev);
 821
 822        memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
 823
 824        droq_ops.fptr = liquidio_push_packet;
 825        droq_ops.farg = netdev;
 826
 827        droq_ops.poll_mode = 1;
 828        droq_ops.napi_fn = liquidio_napi_drv_callback;
 829        cpu_id = 0;
 830        cpu_id_modulus = num_present_cpus();
 831
 832        /* set up DROQs. */
 833        for (q = 0; q < num_oqs; q++) {
 834                q_no = lio->linfo.rxpciq[q].s.q_no;
 835                dev_dbg(&octeon_dev->pci_dev->dev,
 836                        "%s index:%d linfo.rxpciq.s.q_no:%d\n",
 837                        __func__, q, q_no);
 838                retval = octeon_setup_droq(
 839                    octeon_dev, q_no,
 840                    CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
 841                                                lio->ifidx),
 842                    CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
 843                                                   lio->ifidx),
 844                    NULL);
 845                if (retval) {
 846                        dev_err(&octeon_dev->pci_dev->dev,
 847                                "%s : Runtime DROQ(RxQ) creation failed.\n",
 848                                __func__);
 849                        return 1;
 850                }
 851
 852                droq = octeon_dev->droq[q_no];
 853                napi = &droq->napi;
 854                dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
 855                        (u64)netdev, (u64)octeon_dev);
 856                netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
 857
 858                /* designate a CPU for this droq */
 859                droq->cpu_id = cpu_id;
 860                cpu_id++;
 861                if (cpu_id >= cpu_id_modulus)
 862                        cpu_id = 0;
 863
 864                octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
 865        }
 866
 867        if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
 868                /* 23XX PF/VF can send/recv control messages (via the first
 869                 * PF/VF-owned droq) from the firmware even if the ethX
 870                 * interface is down, so that's why poll_mode must be off
 871                 * for the first droq.
 872                 */
 873                octeon_dev->droq[0]->ops.poll_mode = 0;
 874        }
 875
 876        /* set up IQs. */
 877        for (q = 0; q < num_iqs; q++) {
 878                num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
 879                    octeon_get_conf(octeon_dev), lio->ifidx);
 880                retval = octeon_setup_iq(octeon_dev, ifidx, q,
 881                                         lio->linfo.txpciq[q], num_tx_descs,
 882                                         netdev_get_tx_queue(netdev, q));
 883                if (retval) {
 884                        dev_err(&octeon_dev->pci_dev->dev,
 885                                " %s : Runtime IQ(TxQ) creation failed.\n",
 886                                __func__);
 887                        return 1;
 888                }
 889
 890                /* XPS */
 891                if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
 892                    octeon_dev->ioq_vector) {
 893                        struct octeon_ioq_vector    *ioq_vector;
 894
 895                        ioq_vector = &octeon_dev->ioq_vector[q];
 896                        netif_set_xps_queue(netdev,
 897                                            &ioq_vector->affinity_mask,
 898                                            ioq_vector->iq_index);
 899                }
 900        }
 901
 902        return 0;
 903}
 904
 905static
 906int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
 907{
 908        struct octeon_device *oct = droq->oct_dev;
 909        struct octeon_device_priv *oct_priv =
 910            (struct octeon_device_priv *)oct->priv;
 911
 912        if (droq->ops.poll_mode) {
 913                droq->ops.napi_fn(droq);
 914        } else {
 915                if (ret & MSIX_PO_INT) {
 916                        if (OCTEON_CN23XX_VF(oct))
 917                                dev_err(&oct->pci_dev->dev,
 918                                        "should not come here should not get rx when poll mode = 0 for vf\n");
 919                        tasklet_schedule(&oct_priv->droq_tasklet);
 920                        return 1;
 921                }
 922                /* this will be flushed periodically by check iq db */
 923                if (ret & MSIX_PI_INT)
 924                        return 0;
 925        }
 926
 927        return 0;
 928}
 929
 930irqreturn_t
 931liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
 932{
 933        struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
 934        struct octeon_device *oct = ioq_vector->oct_dev;
 935        struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
 936        u64 ret;
 937
 938        ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
 939
 940        if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
 941                liquidio_schedule_msix_droq_pkt_handler(droq, ret);
 942
 943        return IRQ_HANDLED;
 944}
 945
 946/**
 947 * \brief Droq packet processor sceduler
 948 * @param oct octeon device
 949 */
 950static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
 951{
 952        struct octeon_device_priv *oct_priv =
 953                (struct octeon_device_priv *)oct->priv;
 954        struct octeon_droq *droq;
 955        u64 oq_no;
 956
 957        if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
 958                for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
 959                     oq_no++) {
 960                        if (!(oct->droq_intr & BIT_ULL(oq_no)))
 961                                continue;
 962
 963                        droq = oct->droq[oq_no];
 964
 965                        if (droq->ops.poll_mode) {
 966                                droq->ops.napi_fn(droq);
 967                                oct_priv->napi_mask |= BIT_ULL(oq_no);
 968                        } else {
 969                                tasklet_schedule(&oct_priv->droq_tasklet);
 970                        }
 971                }
 972        }
 973}
 974
 975/**
 976 * \brief Interrupt handler for octeon
 977 * @param irq unused
 978 * @param dev octeon device
 979 */
 980static
 981irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
 982                                         void *dev)
 983{
 984        struct octeon_device *oct = (struct octeon_device *)dev;
 985        irqreturn_t ret;
 986
 987        /* Disable our interrupts for the duration of ISR */
 988        oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
 989
 990        ret = oct->fn_list.process_interrupt_regs(oct);
 991
 992        if (ret == IRQ_HANDLED)
 993                liquidio_schedule_droq_pkt_handlers(oct);
 994
 995        /* Re-enable our interrupts  */
 996        if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
 997                oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
 998
 999        return ret;
1000}
1001
1002/**
1003 * \brief Setup interrupt for octeon device
1004 * @param oct octeon device
1005 *
1006 *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
1007 */
1008int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1009{
1010        struct msix_entry *msix_entries;
1011        char *queue_irq_names = NULL;
1012        int i, num_interrupts = 0;
1013        int num_alloc_ioq_vectors;
1014        char *aux_irq_name = NULL;
1015        int num_ioq_vectors;
1016        int irqret, err;
1017
1018        if (oct->msix_on) {
1019                oct->num_msix_irqs = num_ioqs;
1020                if (OCTEON_CN23XX_PF(oct)) {
1021                        num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1022
1023                        /* one non ioq interrupt for handling
1024                         * sli_mac_pf_int_sum
1025                         */
1026                        oct->num_msix_irqs += 1;
1027                } else if (OCTEON_CN23XX_VF(oct)) {
1028                        num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1029                }
1030
1031                /* allocate storage for the names assigned to each irq */
1032                oct->irq_name_storage =
1033                        kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1034                if (!oct->irq_name_storage) {
1035                        dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1036                        return -ENOMEM;
1037                }
1038
1039                queue_irq_names = oct->irq_name_storage;
1040
1041                if (OCTEON_CN23XX_PF(oct))
1042                        aux_irq_name = &queue_irq_names
1043                                [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1044
1045                oct->msix_entries = kcalloc(oct->num_msix_irqs,
1046                                            sizeof(struct msix_entry),
1047                                            GFP_KERNEL);
1048                if (!oct->msix_entries) {
1049                        dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1050                        kfree(oct->irq_name_storage);
1051                        oct->irq_name_storage = NULL;
1052                        return -ENOMEM;
1053                }
1054
1055                msix_entries = (struct msix_entry *)oct->msix_entries;
1056
1057                /*Assumption is that pf msix vectors start from pf srn to pf to
1058                 * trs and not from 0. if not change this code
1059                 */
1060                if (OCTEON_CN23XX_PF(oct)) {
1061                        for (i = 0; i < oct->num_msix_irqs - 1; i++)
1062                                msix_entries[i].entry =
1063                                        oct->sriov_info.pf_srn + i;
1064
1065                        msix_entries[oct->num_msix_irqs - 1].entry =
1066                                oct->sriov_info.trs;
1067                } else if (OCTEON_CN23XX_VF(oct)) {
1068                        for (i = 0; i < oct->num_msix_irqs; i++)
1069                                msix_entries[i].entry = i;
1070                }
1071                num_alloc_ioq_vectors = pci_enable_msix_range(
1072                                                oct->pci_dev, msix_entries,
1073                                                oct->num_msix_irqs,
1074                                                oct->num_msix_irqs);
1075                if (num_alloc_ioq_vectors < 0) {
1076                        dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1077                        kfree(oct->msix_entries);
1078                        oct->msix_entries = NULL;
1079                        kfree(oct->irq_name_storage);
1080                        oct->irq_name_storage = NULL;
1081                        return num_alloc_ioq_vectors;
1082                }
1083
1084                dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1085
1086                num_ioq_vectors = oct->num_msix_irqs;
1087                /** For PF, there is one non-ioq interrupt handler */
1088                if (OCTEON_CN23XX_PF(oct)) {
1089                        num_ioq_vectors -= 1;
1090
1091                        snprintf(aux_irq_name, INTRNAMSIZ,
1092                                 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1093                                 oct->pf_num);
1094                        irqret = request_irq(
1095                                        msix_entries[num_ioq_vectors].vector,
1096                                        liquidio_legacy_intr_handler, 0,
1097                                        aux_irq_name, oct);
1098                        if (irqret) {
1099                                dev_err(&oct->pci_dev->dev,
1100                                        "Request_irq failed for MSIX interrupt Error: %d\n",
1101                                        irqret);
1102                                pci_disable_msix(oct->pci_dev);
1103                                kfree(oct->msix_entries);
1104                                kfree(oct->irq_name_storage);
1105                                oct->irq_name_storage = NULL;
1106                                oct->msix_entries = NULL;
1107                                return irqret;
1108                        }
1109                }
1110                for (i = 0 ; i < num_ioq_vectors ; i++) {
1111                        if (OCTEON_CN23XX_PF(oct))
1112                                snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1113                                         INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1114                                         oct->octeon_id, oct->pf_num, i);
1115
1116                        if (OCTEON_CN23XX_VF(oct))
1117                                snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1118                                         INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1119                                         oct->octeon_id, oct->vf_num, i);
1120
1121                        irqret = request_irq(msix_entries[i].vector,
1122                                             liquidio_msix_intr_handler, 0,
1123                                             &queue_irq_names[IRQ_NAME_OFF(i)],
1124                                             &oct->ioq_vector[i]);
1125
1126                        if (irqret) {
1127                                dev_err(&oct->pci_dev->dev,
1128                                        "Request_irq failed for MSIX interrupt Error: %d\n",
1129                                        irqret);
1130                                /** Freeing the non-ioq irq vector here . */
1131                                free_irq(msix_entries[num_ioq_vectors].vector,
1132                                         oct);
1133
1134                                while (i) {
1135                                        i--;
1136                                        /** clearing affinity mask. */
1137                                        irq_set_affinity_hint(
1138                                                      msix_entries[i].vector,
1139                                                      NULL);
1140                                        free_irq(msix_entries[i].vector,
1141                                                 &oct->ioq_vector[i]);
1142                                }
1143                                pci_disable_msix(oct->pci_dev);
1144                                kfree(oct->msix_entries);
1145                                kfree(oct->irq_name_storage);
1146                                oct->irq_name_storage = NULL;
1147                                oct->msix_entries = NULL;
1148                                return irqret;
1149                        }
1150                        oct->ioq_vector[i].vector = msix_entries[i].vector;
1151                        /* assign the cpu mask for this msix interrupt vector */
1152                        irq_set_affinity_hint(msix_entries[i].vector,
1153                                              &oct->ioq_vector[i].affinity_mask
1154                                              );
1155                }
1156                dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1157                        oct->octeon_id);
1158        } else {
1159                err = pci_enable_msi(oct->pci_dev);
1160                if (err)
1161                        dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1162                                 err);
1163                else
1164                        oct->flags |= LIO_FLAG_MSI_ENABLED;
1165
1166                /* allocate storage for the names assigned to the irq */
1167                oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1168                if (!oct->irq_name_storage)
1169                        return -ENOMEM;
1170
1171                queue_irq_names = oct->irq_name_storage;
1172
1173                if (OCTEON_CN23XX_PF(oct))
1174                        snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1175                                 "LiquidIO%u-pf%u-rxtx-%u",
1176                                 oct->octeon_id, oct->pf_num, 0);
1177
1178                if (OCTEON_CN23XX_VF(oct))
1179                        snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1180                                 "LiquidIO%u-vf%u-rxtx-%u",
1181                                 oct->octeon_id, oct->vf_num, 0);
1182
1183                irqret = request_irq(oct->pci_dev->irq,
1184                                     liquidio_legacy_intr_handler,
1185                                     IRQF_SHARED,
1186                                     &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1187                if (irqret) {
1188                        if (oct->flags & LIO_FLAG_MSI_ENABLED)
1189                                pci_disable_msi(oct->pci_dev);
1190                        dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1191                                irqret);
1192                        kfree(oct->irq_name_storage);
1193                        oct->irq_name_storage = NULL;
1194                        return irqret;
1195                }
1196        }
1197        return 0;
1198}
1199
1200/**
1201 * \brief Net device change_mtu
1202 * @param netdev network device
1203 */
1204int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1205{
1206        struct lio *lio = GET_LIO(netdev);
1207        struct octeon_device *oct = lio->oct_dev;
1208        struct octeon_soft_command *sc;
1209        union octnet_cmd *ncmd;
1210        int ret = 0;
1211
1212        sc = (struct octeon_soft_command *)
1213                octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1214        if (!sc) {
1215                netif_info(lio, rx_err, lio->netdev,
1216                           "Failed to allocate soft command\n");
1217                return -ENOMEM;
1218        }
1219
1220        ncmd = (union octnet_cmd *)sc->virtdptr;
1221
1222        init_completion(&sc->complete);
1223        sc->sc_status = OCTEON_REQUEST_PENDING;
1224
1225        ncmd->u64 = 0;
1226        ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1227        ncmd->s.param1 = new_mtu;
1228
1229        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1230
1231        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1232
1233        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1234                                    OPCODE_NIC_CMD, 0, 0, 0);
1235
1236        ret = octeon_send_soft_command(oct, sc);
1237        if (ret == IQ_SEND_FAILED) {
1238                netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1239                octeon_free_soft_command(oct, sc);
1240                return -EINVAL;
1241        }
1242        /* Sleep on a wait queue till the cond flag indicates that the
1243         * response arrived or timed-out.
1244         */
1245        ret = wait_for_sc_completion_timeout(oct, sc, 0);
1246        if (ret)
1247                return ret;
1248
1249        if (sc->sc_status) {
1250                WRITE_ONCE(sc->caller_is_done, true);
1251                return -EINVAL;
1252        }
1253
1254        netdev->mtu = new_mtu;
1255        lio->mtu = new_mtu;
1256
1257        WRITE_ONCE(sc->caller_is_done, true);
1258        return 0;
1259}
1260
1261int lio_wait_for_clean_oq(struct octeon_device *oct)
1262{
1263        int retry = 100, pending_pkts = 0;
1264        int idx;
1265
1266        do {
1267                pending_pkts = 0;
1268
1269                for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1270                        if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1271                                continue;
1272                        pending_pkts +=
1273                                atomic_read(&oct->droq[idx]->pkts_pending);
1274                }
1275
1276                if (pending_pkts > 0)
1277                        schedule_timeout_uninterruptible(1);
1278
1279        } while (retry-- && pending_pkts);
1280
1281        return pending_pkts;
1282}
1283
1284static void
1285octnet_nic_stats_callback(struct octeon_device *oct_dev,
1286                          u32 status, void *ptr)
1287{
1288        struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1289        struct oct_nic_stats_resp *resp =
1290            (struct oct_nic_stats_resp *)sc->virtrptr;
1291        struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1292        struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1293        struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1294        struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1295
1296        if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1297                octeon_swap_8B_data((u64 *)&resp->stats,
1298                                    (sizeof(struct oct_link_stats)) >> 3);
1299
1300                /* RX link-level stats */
1301                rstats->total_rcvd = rsp_rstats->total_rcvd;
1302                rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1303                rstats->total_bcst = rsp_rstats->total_bcst;
1304                rstats->total_mcst = rsp_rstats->total_mcst;
1305                rstats->runts      = rsp_rstats->runts;
1306                rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1307                /* Accounts for over/under-run of buffers */
1308                rstats->fifo_err  = rsp_rstats->fifo_err;
1309                rstats->dmac_drop = rsp_rstats->dmac_drop;
1310                rstats->fcs_err   = rsp_rstats->fcs_err;
1311                rstats->jabber_err = rsp_rstats->jabber_err;
1312                rstats->l2_err    = rsp_rstats->l2_err;
1313                rstats->frame_err = rsp_rstats->frame_err;
1314                rstats->red_drops = rsp_rstats->red_drops;
1315
1316                /* RX firmware stats */
1317                rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1318                rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1319                rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1320                rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1321                rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1322                rstats->fw_err_link = rsp_rstats->fw_err_link;
1323                rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1324                rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1325                rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1326
1327                /* Number of packets that are LROed      */
1328                rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1329                /* Number of octets that are LROed       */
1330                rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1331                /* Number of LRO packets formed          */
1332                rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1333                /* Number of times lRO of packet aborted */
1334                rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1335                rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1336                rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1337                rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1338                rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1339                /* intrmod: packet forward rate */
1340                rstats->fwd_rate = rsp_rstats->fwd_rate;
1341
1342                /* TX link-level stats */
1343                tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1344                tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1345                tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1346                tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1347                tstats->ctl_sent = rsp_tstats->ctl_sent;
1348                /* Packets sent after one collision*/
1349                tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1350                /* Packets sent after multiple collision*/
1351                tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1352                /* Packets not sent due to max collisions */
1353                tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1354                /* Packets not sent due to max deferrals */
1355                tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1356                /* Accounts for over/under-run of buffers */
1357                tstats->fifo_err = rsp_tstats->fifo_err;
1358                tstats->runts = rsp_tstats->runts;
1359                /* Total number of collisions detected */
1360                tstats->total_collisions = rsp_tstats->total_collisions;
1361
1362                /* firmware stats */
1363                tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1364                tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1365                tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1366                tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1367                tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1368                tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1369                tstats->fw_err_link = rsp_tstats->fw_err_link;
1370                tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1371                tstats->fw_tso = rsp_tstats->fw_tso;
1372                tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1373                tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1374                tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1375
1376                resp->status = 1;
1377        } else {
1378                dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1379                resp->status = -1;
1380        }
1381}
1382
1383static int lio_fetch_vf_stats(struct lio *lio)
1384{
1385        struct octeon_device *oct_dev = lio->oct_dev;
1386        struct octeon_soft_command *sc;
1387        struct oct_nic_vf_stats_resp *resp;
1388
1389        int retval;
1390
1391        /* Alloc soft command */
1392        sc = (struct octeon_soft_command *)
1393                octeon_alloc_soft_command(oct_dev,
1394                                          0,
1395                                          sizeof(struct oct_nic_vf_stats_resp),
1396                                          0);
1397
1398        if (!sc) {
1399                dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1400                retval = -ENOMEM;
1401                goto lio_fetch_vf_stats_exit;
1402        }
1403
1404        resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1405        memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1406
1407        init_completion(&sc->complete);
1408        sc->sc_status = OCTEON_REQUEST_PENDING;
1409
1410        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1411
1412        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1413                                    OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1414
1415        retval = octeon_send_soft_command(oct_dev, sc);
1416        if (retval == IQ_SEND_FAILED) {
1417                octeon_free_soft_command(oct_dev, sc);
1418                goto lio_fetch_vf_stats_exit;
1419        }
1420
1421        retval =
1422                wait_for_sc_completion_timeout(oct_dev, sc,
1423                                               (2 * LIO_SC_MAX_TMO_MS));
1424        if (retval)  {
1425                dev_err(&oct_dev->pci_dev->dev,
1426                        "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1427                goto lio_fetch_vf_stats_exit;
1428        }
1429
1430        if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1431                octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1432                                    (sizeof(u64)) >> 3);
1433
1434                if (resp->spoofmac_cnt != 0) {
1435                        dev_warn(&oct_dev->pci_dev->dev,
1436                                 "%llu Spoofed packets detected\n",
1437                                 resp->spoofmac_cnt);
1438                }
1439        }
1440        WRITE_ONCE(sc->caller_is_done, 1);
1441
1442lio_fetch_vf_stats_exit:
1443        return retval;
1444}
1445
1446void lio_fetch_stats(struct work_struct *work)
1447{
1448        struct cavium_wk *wk = (struct cavium_wk *)work;
1449        struct lio *lio = wk->ctxptr;
1450        struct octeon_device *oct_dev = lio->oct_dev;
1451        struct octeon_soft_command *sc;
1452        struct oct_nic_stats_resp *resp;
1453        unsigned long time_in_jiffies;
1454        int retval;
1455
1456        if (OCTEON_CN23XX_PF(oct_dev)) {
1457                /* report spoofchk every 2 seconds */
1458                if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1459                    (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1460                    oct_dev->sriov_info.num_vfs_alloced) {
1461                        lio_fetch_vf_stats(lio);
1462                }
1463
1464                oct_dev->vfstats_poll++;
1465        }
1466
1467        /* Alloc soft command */
1468        sc = (struct octeon_soft_command *)
1469                octeon_alloc_soft_command(oct_dev,
1470                                          0,
1471                                          sizeof(struct oct_nic_stats_resp),
1472                                          0);
1473
1474        if (!sc) {
1475                dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1476                goto lio_fetch_stats_exit;
1477        }
1478
1479        resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1480        memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1481
1482        init_completion(&sc->complete);
1483        sc->sc_status = OCTEON_REQUEST_PENDING;
1484
1485        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1486
1487        octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1488                                    OPCODE_NIC_PORT_STATS, 0, 0, 0);
1489
1490        retval = octeon_send_soft_command(oct_dev, sc);
1491        if (retval == IQ_SEND_FAILED) {
1492                octeon_free_soft_command(oct_dev, sc);
1493                goto lio_fetch_stats_exit;
1494        }
1495
1496        retval = wait_for_sc_completion_timeout(oct_dev, sc,
1497                                                (2 * LIO_SC_MAX_TMO_MS));
1498        if (retval)  {
1499                dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1500                goto lio_fetch_stats_exit;
1501        }
1502
1503        octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1504        WRITE_ONCE(sc->caller_is_done, true);
1505
1506lio_fetch_stats_exit:
1507        time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1508        if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1509                schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
1510
1511        return;
1512}
1513
1514int liquidio_set_speed(struct lio *lio, int speed)
1515{
1516        struct octeon_device *oct = lio->oct_dev;
1517        struct oct_nic_seapi_resp *resp;
1518        struct octeon_soft_command *sc;
1519        union octnet_cmd *ncmd;
1520        int retval;
1521        u32 var;
1522
1523        if (oct->speed_setting == speed)
1524                return 0;
1525
1526        if (!OCTEON_CN23XX_PF(oct)) {
1527                dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1528                        __func__);
1529                return -EOPNOTSUPP;
1530        }
1531
1532        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1533                                       sizeof(struct oct_nic_seapi_resp),
1534                                       0);
1535        if (!sc)
1536                return -ENOMEM;
1537
1538        ncmd = sc->virtdptr;
1539        resp = sc->virtrptr;
1540        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1541
1542        init_completion(&sc->complete);
1543        sc->sc_status = OCTEON_REQUEST_PENDING;
1544
1545        ncmd->u64 = 0;
1546        ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1547        ncmd->s.param1 = speed;
1548
1549        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1550
1551        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1552
1553        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1554                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1555
1556        retval = octeon_send_soft_command(oct, sc);
1557        if (retval == IQ_SEND_FAILED) {
1558                dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1559                octeon_free_soft_command(oct, sc);
1560                retval = -EBUSY;
1561        } else {
1562                /* Wait for response or timeout */
1563                retval = wait_for_sc_completion_timeout(oct, sc, 0);
1564                if (retval)
1565                        return retval;
1566
1567                retval = resp->status;
1568
1569                if (retval) {
1570                        dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1571                                __func__, retval);
1572                        WRITE_ONCE(sc->caller_is_done, true);
1573
1574                        return -EIO;
1575                }
1576
1577                var = be32_to_cpu((__force __be32)resp->speed);
1578                if (var != speed) {
1579                        dev_err(&oct->pci_dev->dev,
1580                                "%s: setting failed speed= %x, expect %x\n",
1581                                __func__, var, speed);
1582                }
1583
1584                oct->speed_setting = var;
1585                WRITE_ONCE(sc->caller_is_done, true);
1586        }
1587
1588        return retval;
1589}
1590
1591int liquidio_get_speed(struct lio *lio)
1592{
1593        struct octeon_device *oct = lio->oct_dev;
1594        struct oct_nic_seapi_resp *resp;
1595        struct octeon_soft_command *sc;
1596        union octnet_cmd *ncmd;
1597        int retval;
1598
1599        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1600                                       sizeof(struct oct_nic_seapi_resp),
1601                                       0);
1602        if (!sc)
1603                return -ENOMEM;
1604
1605        ncmd = sc->virtdptr;
1606        resp = sc->virtrptr;
1607        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1608
1609        init_completion(&sc->complete);
1610        sc->sc_status = OCTEON_REQUEST_PENDING;
1611
1612        ncmd->u64 = 0;
1613        ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1614
1615        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1616
1617        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1618
1619        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1620                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1621
1622        retval = octeon_send_soft_command(oct, sc);
1623        if (retval == IQ_SEND_FAILED) {
1624                dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1625                octeon_free_soft_command(oct, sc);
1626                retval = -EIO;
1627        } else {
1628                retval = wait_for_sc_completion_timeout(oct, sc, 0);
1629                if (retval)
1630                        return retval;
1631
1632                retval = resp->status;
1633                if (retval) {
1634                        dev_err(&oct->pci_dev->dev,
1635                                "%s failed retval=%d\n", __func__, retval);
1636                        retval = -EIO;
1637                } else {
1638                        u32 var;
1639
1640                        var = be32_to_cpu((__force __be32)resp->speed);
1641                        oct->speed_setting = var;
1642                        if (var == 0xffff) {
1643                                /* unable to access boot variables
1644                                 * get the default value based on the NIC type
1645                                 */
1646                                if (oct->subsystem_id ==
1647                                                OCTEON_CN2350_25GB_SUBSYS_ID ||
1648                                    oct->subsystem_id ==
1649                                                OCTEON_CN2360_25GB_SUBSYS_ID) {
1650                                        oct->no_speed_setting = 1;
1651                                        oct->speed_setting = 25;
1652                                } else {
1653                                        oct->speed_setting = 10;
1654                                }
1655                        }
1656
1657                }
1658                WRITE_ONCE(sc->caller_is_done, true);
1659        }
1660
1661        return retval;
1662}
1663
1664int liquidio_set_fec(struct lio *lio, int on_off)
1665{
1666        struct oct_nic_seapi_resp *resp;
1667        struct octeon_soft_command *sc;
1668        struct octeon_device *oct;
1669        union octnet_cmd *ncmd;
1670        int retval;
1671        u32 var;
1672
1673        oct = lio->oct_dev;
1674
1675        if (oct->props[lio->ifidx].fec == on_off)
1676                return 0;
1677
1678        if (!OCTEON_CN23XX_PF(oct)) {
1679                dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
1680                        __func__);
1681                return -1;
1682        }
1683
1684        if (oct->speed_boot != 25)  {
1685                dev_err(&oct->pci_dev->dev,
1686                        "Set FEC only when link speed is 25G during insmod\n");
1687                return -1;
1688        }
1689
1690        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1691                                       sizeof(struct oct_nic_seapi_resp), 0);
1692        if (!sc) {
1693                dev_err(&oct->pci_dev->dev,
1694                        "Failed to allocate soft command\n");
1695                return -ENOMEM;
1696        }
1697
1698        ncmd = sc->virtdptr;
1699        resp = sc->virtrptr;
1700        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1701
1702        init_completion(&sc->complete);
1703        sc->sc_status = OCTEON_REQUEST_PENDING;
1704
1705        ncmd->u64 = 0;
1706        ncmd->s.cmd = SEAPI_CMD_FEC_SET;
1707        ncmd->s.param1 = on_off;
1708        /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
1709
1710        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1711
1712        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1713
1714        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1715                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1716
1717        retval = octeon_send_soft_command(oct, sc);
1718        if (retval == IQ_SEND_FAILED) {
1719                dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1720                octeon_free_soft_command(oct, sc);
1721                return -EIO;
1722        }
1723
1724        retval = wait_for_sc_completion_timeout(oct, sc, 0);
1725        if (retval)
1726                return (-EIO);
1727
1728        var = be32_to_cpu(resp->fec_setting);
1729        resp->fec_setting = var;
1730        if (var != on_off) {
1731                dev_err(&oct->pci_dev->dev,
1732                        "Setting failed fec= %x, expect %x\n",
1733                        var, on_off);
1734                oct->props[lio->ifidx].fec = var;
1735                if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1736                        oct->props[lio->ifidx].fec = 1;
1737                else
1738                        oct->props[lio->ifidx].fec = 0;
1739        }
1740
1741        WRITE_ONCE(sc->caller_is_done, true);
1742
1743        if (oct->props[lio->ifidx].fec !=
1744            oct->props[lio->ifidx].fec_boot) {
1745                dev_dbg(&oct->pci_dev->dev,
1746                        "Reload driver to change fec to %s\n",
1747                        oct->props[lio->ifidx].fec ? "on" : "off");
1748        }
1749
1750        return retval;
1751}
1752
1753int liquidio_get_fec(struct lio *lio)
1754{
1755        struct oct_nic_seapi_resp *resp;
1756        struct octeon_soft_command *sc;
1757        struct octeon_device *oct;
1758        union octnet_cmd *ncmd;
1759        int retval;
1760        u32 var;
1761
1762        oct = lio->oct_dev;
1763
1764        sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1765                                       sizeof(struct oct_nic_seapi_resp), 0);
1766        if (!sc)
1767                return -ENOMEM;
1768
1769        ncmd = sc->virtdptr;
1770        resp = sc->virtrptr;
1771        memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1772
1773        init_completion(&sc->complete);
1774        sc->sc_status = OCTEON_REQUEST_PENDING;
1775
1776        ncmd->u64 = 0;
1777        ncmd->s.cmd = SEAPI_CMD_FEC_GET;
1778
1779        octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1780
1781        sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1782
1783        octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1784                                    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1785
1786        retval = octeon_send_soft_command(oct, sc);
1787        if (retval == IQ_SEND_FAILED) {
1788                dev_info(&oct->pci_dev->dev,
1789                         "%s: Failed to send soft command\n", __func__);
1790                octeon_free_soft_command(oct, sc);
1791                return -EIO;
1792        }
1793
1794        retval = wait_for_sc_completion_timeout(oct, sc, 0);
1795        if (retval)
1796                return retval;
1797
1798        var = be32_to_cpu(resp->fec_setting);
1799        resp->fec_setting = var;
1800        if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1801                oct->props[lio->ifidx].fec = 1;
1802        else
1803                oct->props[lio->ifidx].fec = 0;
1804
1805        WRITE_ONCE(sc->caller_is_done, true);
1806
1807        if (oct->props[lio->ifidx].fec !=
1808            oct->props[lio->ifidx].fec_boot) {
1809                dev_dbg(&oct->pci_dev->dev,
1810                        "Reload driver to change fec to %s\n",
1811                        oct->props[lio->ifidx].fec ? "on" : "off");
1812        }
1813
1814        return retval;
1815}
1816