linux/drivers/net/ethernet/qlogic/qede/qede_main.c
<<
>>
Prefs
   1/* QLogic qede NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/module.h>
  33#include <linux/pci.h>
  34#include <linux/version.h>
  35#include <linux/device.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/errno.h>
  40#include <linux/list.h>
  41#include <linux/string.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/interrupt.h>
  44#include <asm/byteorder.h>
  45#include <asm/param.h>
  46#include <linux/io.h>
  47#include <linux/netdev_features.h>
  48#include <linux/udp.h>
  49#include <linux/tcp.h>
  50#include <net/udp_tunnel.h>
  51#include <linux/ip.h>
  52#include <net/ipv6.h>
  53#include <net/tcp.h>
  54#include <linux/if_ether.h>
  55#include <linux/if_vlan.h>
  56#include <linux/pkt_sched.h>
  57#include <linux/ethtool.h>
  58#include <linux/in.h>
  59#include <linux/random.h>
  60#include <net/ip6_checksum.h>
  61#include <linux/bitops.h>
  62#include <linux/vmalloc.h>
  63#include "qede.h"
  64#include "qede_ptp.h"
  65
  66static char version[] =
  67        "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
  68
  69MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
  70MODULE_LICENSE("GPL");
  71MODULE_VERSION(DRV_MODULE_VERSION);
  72
  73static uint debug;
  74module_param(debug, uint, 0);
  75MODULE_PARM_DESC(debug, " Default debug msglevel");
  76
  77static const struct qed_eth_ops *qed_ops;
  78
  79#define CHIP_NUM_57980S_40              0x1634
  80#define CHIP_NUM_57980S_10              0x1666
  81#define CHIP_NUM_57980S_MF              0x1636
  82#define CHIP_NUM_57980S_100             0x1644
  83#define CHIP_NUM_57980S_50              0x1654
  84#define CHIP_NUM_57980S_25              0x1656
  85#define CHIP_NUM_57980S_IOV             0x1664
  86#define CHIP_NUM_AH                     0x8070
  87#define CHIP_NUM_AH_IOV                 0x8090
  88
  89#ifndef PCI_DEVICE_ID_NX2_57980E
  90#define PCI_DEVICE_ID_57980S_40         CHIP_NUM_57980S_40
  91#define PCI_DEVICE_ID_57980S_10         CHIP_NUM_57980S_10
  92#define PCI_DEVICE_ID_57980S_MF         CHIP_NUM_57980S_MF
  93#define PCI_DEVICE_ID_57980S_100        CHIP_NUM_57980S_100
  94#define PCI_DEVICE_ID_57980S_50         CHIP_NUM_57980S_50
  95#define PCI_DEVICE_ID_57980S_25         CHIP_NUM_57980S_25
  96#define PCI_DEVICE_ID_57980S_IOV        CHIP_NUM_57980S_IOV
  97#define PCI_DEVICE_ID_AH                CHIP_NUM_AH
  98#define PCI_DEVICE_ID_AH_IOV            CHIP_NUM_AH_IOV
  99
 100#endif
 101
 102enum qede_pci_private {
 103        QEDE_PRIVATE_PF,
 104        QEDE_PRIVATE_VF
 105};
 106
 107static const struct pci_device_id qede_pci_tbl[] = {
 108        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
 109        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
 110        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
 111        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
 112        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
 113        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
 114#ifdef CONFIG_QED_SRIOV
 115        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
 116#endif
 117        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
 118#ifdef CONFIG_QED_SRIOV
 119        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
 120#endif
 121        { 0 }
 122};
 123
 124MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
 125
 126static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 127
 128#define TX_TIMEOUT              (5 * HZ)
 129
 130static void qede_remove(struct pci_dev *pdev);
 131static void qede_shutdown(struct pci_dev *pdev);
 132static void qede_link_update(void *dev, struct qed_link_output *link);
 133
 134/* The qede lock is used to protect driver state change and driver flows that
 135 * are not reentrant.
 136 */
 137void __qede_lock(struct qede_dev *edev)
 138{
 139        mutex_lock(&edev->qede_lock);
 140}
 141
 142void __qede_unlock(struct qede_dev *edev)
 143{
 144        mutex_unlock(&edev->qede_lock);
 145}
 146
 147#ifdef CONFIG_QED_SRIOV
 148static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
 149                            __be16 vlan_proto)
 150{
 151        struct qede_dev *edev = netdev_priv(ndev);
 152
 153        if (vlan > 4095) {
 154                DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
 155                return -EINVAL;
 156        }
 157
 158        if (vlan_proto != htons(ETH_P_8021Q))
 159                return -EPROTONOSUPPORT;
 160
 161        DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
 162                   vlan, vf);
 163
 164        return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
 165}
 166
 167static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
 168{
 169        struct qede_dev *edev = netdev_priv(ndev);
 170
 171        DP_VERBOSE(edev, QED_MSG_IOV,
 172                   "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
 173                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
 174
 175        if (!is_valid_ether_addr(mac)) {
 176                DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
 177                return -EINVAL;
 178        }
 179
 180        return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
 181}
 182
 183static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
 184{
 185        struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
 186        struct qed_dev_info *qed_info = &edev->dev_info.common;
 187        struct qed_update_vport_params *vport_params;
 188        int rc;
 189
 190        vport_params = vzalloc(sizeof(*vport_params));
 191        if (!vport_params)
 192                return -ENOMEM;
 193        DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
 194
 195        rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
 196
 197        /* Enable/Disable Tx switching for PF */
 198        if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
 199            qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
 200                vport_params->vport_id = 0;
 201                vport_params->update_tx_switching_flg = 1;
 202                vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
 203                edev->ops->vport_update(edev->cdev, vport_params);
 204        }
 205
 206        vfree(vport_params);
 207        return rc;
 208}
 209#endif
 210
 211static struct pci_driver qede_pci_driver = {
 212        .name = "qede",
 213        .id_table = qede_pci_tbl,
 214        .probe = qede_probe,
 215        .remove = qede_remove,
 216        .shutdown = qede_shutdown,
 217#ifdef CONFIG_QED_SRIOV
 218        .sriov_configure = qede_sriov_configure,
 219#endif
 220};
 221
 222static struct qed_eth_cb_ops qede_ll_ops = {
 223        {
 224#ifdef CONFIG_RFS_ACCEL
 225                .arfs_filter_op = qede_arfs_filter_op,
 226#endif
 227                .link_update = qede_link_update,
 228        },
 229        .force_mac = qede_force_mac,
 230        .ports_update = qede_udp_ports_update,
 231};
 232
 233static int qede_netdev_event(struct notifier_block *this, unsigned long event,
 234                             void *ptr)
 235{
 236        struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
 237        struct ethtool_drvinfo drvinfo;
 238        struct qede_dev *edev;
 239
 240        if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
 241                goto done;
 242
 243        /* Check whether this is a qede device */
 244        if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
 245                goto done;
 246
 247        memset(&drvinfo, 0, sizeof(drvinfo));
 248        ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
 249        if (strcmp(drvinfo.driver, "qede"))
 250                goto done;
 251        edev = netdev_priv(ndev);
 252
 253        switch (event) {
 254        case NETDEV_CHANGENAME:
 255                /* Notify qed of the name change */
 256                if (!edev->ops || !edev->ops->common)
 257                        goto done;
 258                edev->ops->common->set_name(edev->cdev, edev->ndev->name);
 259                break;
 260        case NETDEV_CHANGEADDR:
 261                edev = netdev_priv(ndev);
 262                qede_rdma_event_changeaddr(edev);
 263                break;
 264        }
 265
 266done:
 267        return NOTIFY_DONE;
 268}
 269
 270static struct notifier_block qede_netdev_notifier = {
 271        .notifier_call = qede_netdev_event,
 272};
 273
 274static
 275int __init qede_init(void)
 276{
 277        int ret;
 278
 279        pr_info("qede_init: %s\n", version);
 280
 281        qed_ops = qed_get_eth_ops();
 282        if (!qed_ops) {
 283                pr_notice("Failed to get qed ethtool operations\n");
 284                return -EINVAL;
 285        }
 286
 287        /* Must register notifier before pci ops, since we might miss
 288         * interface rename after pci probe and netdev registeration.
 289         */
 290        ret = register_netdevice_notifier_rh(&qede_netdev_notifier);
 291        if (ret) {
 292                pr_notice("Failed to register netdevice_notifier\n");
 293                qed_put_eth_ops();
 294                return -EINVAL;
 295        }
 296
 297        ret = pci_register_driver(&qede_pci_driver);
 298        if (ret) {
 299                pr_notice("Failed to register driver\n");
 300                unregister_netdevice_notifier_rh(&qede_netdev_notifier);
 301                qed_put_eth_ops();
 302                return -EINVAL;
 303        }
 304
 305        return 0;
 306}
 307
 308static void __exit qede_cleanup(void)
 309{
 310        if (debug & QED_LOG_INFO_MASK)
 311                pr_info("qede_cleanup called\n");
 312
 313        unregister_netdevice_notifier_rh(&qede_netdev_notifier);
 314        pci_unregister_driver(&qede_pci_driver);
 315        qed_put_eth_ops();
 316}
 317
 318module_init(qede_init);
 319module_exit(qede_cleanup);
 320
 321static int qede_open(struct net_device *ndev);
 322static int qede_close(struct net_device *ndev);
 323
 324void qede_fill_by_demand_stats(struct qede_dev *edev)
 325{
 326        struct qede_stats_common *p_common = &edev->stats.common;
 327        struct qed_eth_stats stats;
 328
 329        edev->ops->get_vport_stats(edev->cdev, &stats);
 330
 331        p_common->no_buff_discards = stats.common.no_buff_discards;
 332        p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
 333        p_common->ttl0_discard = stats.common.ttl0_discard;
 334        p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
 335        p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
 336        p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
 337        p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
 338        p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
 339        p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
 340        p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
 341        p_common->mac_filter_discards = stats.common.mac_filter_discards;
 342
 343        p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
 344        p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
 345        p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
 346        p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
 347        p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
 348        p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
 349        p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
 350        p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
 351        p_common->coalesced_events = stats.common.tpa_coalesced_events;
 352        p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
 353        p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
 354        p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
 355
 356        p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
 357        p_common->rx_65_to_127_byte_packets =
 358            stats.common.rx_65_to_127_byte_packets;
 359        p_common->rx_128_to_255_byte_packets =
 360            stats.common.rx_128_to_255_byte_packets;
 361        p_common->rx_256_to_511_byte_packets =
 362            stats.common.rx_256_to_511_byte_packets;
 363        p_common->rx_512_to_1023_byte_packets =
 364            stats.common.rx_512_to_1023_byte_packets;
 365        p_common->rx_1024_to_1518_byte_packets =
 366            stats.common.rx_1024_to_1518_byte_packets;
 367        p_common->rx_crc_errors = stats.common.rx_crc_errors;
 368        p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
 369        p_common->rx_pause_frames = stats.common.rx_pause_frames;
 370        p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
 371        p_common->rx_align_errors = stats.common.rx_align_errors;
 372        p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
 373        p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
 374        p_common->rx_jabbers = stats.common.rx_jabbers;
 375        p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
 376        p_common->rx_fragments = stats.common.rx_fragments;
 377        p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
 378        p_common->tx_65_to_127_byte_packets =
 379            stats.common.tx_65_to_127_byte_packets;
 380        p_common->tx_128_to_255_byte_packets =
 381            stats.common.tx_128_to_255_byte_packets;
 382        p_common->tx_256_to_511_byte_packets =
 383            stats.common.tx_256_to_511_byte_packets;
 384        p_common->tx_512_to_1023_byte_packets =
 385            stats.common.tx_512_to_1023_byte_packets;
 386        p_common->tx_1024_to_1518_byte_packets =
 387            stats.common.tx_1024_to_1518_byte_packets;
 388        p_common->tx_pause_frames = stats.common.tx_pause_frames;
 389        p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
 390        p_common->brb_truncates = stats.common.brb_truncates;
 391        p_common->brb_discards = stats.common.brb_discards;
 392        p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
 393
 394        if (QEDE_IS_BB(edev)) {
 395                struct qede_stats_bb *p_bb = &edev->stats.bb;
 396
 397                p_bb->rx_1519_to_1522_byte_packets =
 398                    stats.bb.rx_1519_to_1522_byte_packets;
 399                p_bb->rx_1519_to_2047_byte_packets =
 400                    stats.bb.rx_1519_to_2047_byte_packets;
 401                p_bb->rx_2048_to_4095_byte_packets =
 402                    stats.bb.rx_2048_to_4095_byte_packets;
 403                p_bb->rx_4096_to_9216_byte_packets =
 404                    stats.bb.rx_4096_to_9216_byte_packets;
 405                p_bb->rx_9217_to_16383_byte_packets =
 406                    stats.bb.rx_9217_to_16383_byte_packets;
 407                p_bb->tx_1519_to_2047_byte_packets =
 408                    stats.bb.tx_1519_to_2047_byte_packets;
 409                p_bb->tx_2048_to_4095_byte_packets =
 410                    stats.bb.tx_2048_to_4095_byte_packets;
 411                p_bb->tx_4096_to_9216_byte_packets =
 412                    stats.bb.tx_4096_to_9216_byte_packets;
 413                p_bb->tx_9217_to_16383_byte_packets =
 414                    stats.bb.tx_9217_to_16383_byte_packets;
 415                p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
 416                p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
 417        } else {
 418                struct qede_stats_ah *p_ah = &edev->stats.ah;
 419
 420                p_ah->rx_1519_to_max_byte_packets =
 421                    stats.ah.rx_1519_to_max_byte_packets;
 422                p_ah->tx_1519_to_max_byte_packets =
 423                    stats.ah.tx_1519_to_max_byte_packets;
 424        }
 425}
 426
 427static void qede_get_stats64(struct net_device *dev,
 428                             struct rtnl_link_stats64 *stats)
 429{
 430        struct qede_dev *edev = netdev_priv(dev);
 431        struct qede_stats_common *p_common;
 432
 433        qede_fill_by_demand_stats(edev);
 434        p_common = &edev->stats.common;
 435
 436        stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
 437                            p_common->rx_bcast_pkts;
 438        stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
 439                            p_common->tx_bcast_pkts;
 440
 441        stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
 442                          p_common->rx_bcast_bytes;
 443        stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
 444                          p_common->tx_bcast_bytes;
 445
 446        stats->tx_errors = p_common->tx_err_drop_pkts;
 447        stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
 448
 449        stats->rx_fifo_errors = p_common->no_buff_discards;
 450
 451        if (QEDE_IS_BB(edev))
 452                stats->collisions = edev->stats.bb.tx_total_collisions;
 453        stats->rx_crc_errors = p_common->rx_crc_errors;
 454        stats->rx_frame_errors = p_common->rx_align_errors;
 455}
 456
 457#ifdef CONFIG_QED_SRIOV
 458static int qede_get_vf_config(struct net_device *dev, int vfidx,
 459                              struct ifla_vf_info *ivi)
 460{
 461        struct qede_dev *edev = netdev_priv(dev);
 462
 463        if (!edev->ops)
 464                return -EINVAL;
 465
 466        return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
 467}
 468
 469static int qede_set_vf_rate(struct net_device *dev, int vfidx,
 470                            int min_tx_rate, int max_tx_rate)
 471{
 472        struct qede_dev *edev = netdev_priv(dev);
 473
 474        return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
 475                                        max_tx_rate);
 476}
 477
 478static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
 479{
 480        struct qede_dev *edev = netdev_priv(dev);
 481
 482        if (!edev->ops)
 483                return -EINVAL;
 484
 485        return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
 486}
 487
 488static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
 489                                  int link_state)
 490{
 491        struct qede_dev *edev = netdev_priv(dev);
 492
 493        if (!edev->ops)
 494                return -EINVAL;
 495
 496        return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
 497}
 498
 499static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
 500{
 501        struct qede_dev *edev = netdev_priv(dev);
 502
 503        if (!edev->ops)
 504                return -EINVAL;
 505
 506        return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
 507}
 508#endif
 509
 510static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 511{
 512        struct qede_dev *edev = netdev_priv(dev);
 513
 514        if (!netif_running(dev))
 515                return -EAGAIN;
 516
 517        switch (cmd) {
 518        case SIOCSHWTSTAMP:
 519                return qede_ptp_hw_ts(edev, ifr);
 520        default:
 521                DP_VERBOSE(edev, QED_MSG_DEBUG,
 522                           "default IOCTL cmd 0x%x\n", cmd);
 523                return -EOPNOTSUPP;
 524        }
 525
 526        return 0;
 527}
 528
 529static const struct net_device_ops qede_netdev_ops = {
 530        .ndo_size = sizeof(struct net_device_ops),
 531        .ndo_open = qede_open,
 532        .ndo_stop = qede_close,
 533        .ndo_start_xmit = qede_start_xmit,
 534        .ndo_set_rx_mode = qede_set_rx_mode,
 535        .ndo_set_mac_address = qede_set_mac_addr,
 536        .ndo_validate_addr = eth_validate_addr,
 537        .ndo_change_mtu_rh74 = qede_change_mtu,
 538        .ndo_do_ioctl = qede_ioctl,
 539#ifdef CONFIG_QED_SRIOV
 540        .ndo_set_vf_mac = qede_set_vf_mac,
 541        .extended.ndo_set_vf_vlan = qede_set_vf_vlan,
 542        .extended.ndo_set_vf_trust = qede_set_vf_trust,
 543#endif
 544        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 545        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
 546        .ndo_set_features = qede_set_features,
 547        .ndo_get_stats64 = qede_get_stats64,
 548#ifdef CONFIG_QED_SRIOV
 549        .ndo_set_vf_link_state = qede_set_vf_link_state,
 550        .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
 551        .ndo_get_vf_config = qede_get_vf_config,
 552        .ndo_set_vf_rate = qede_set_vf_rate,
 553#endif
 554        .extended.ndo_udp_tunnel_add = qede_udp_tunnel_add,
 555        .extended.ndo_udp_tunnel_del = qede_udp_tunnel_del,
 556        .ndo_features_check = qede_features_check,
 557#ifdef CONFIG_RFS_ACCEL
 558        .ndo_rx_flow_steer = qede_rx_flow_steer,
 559#endif
 560};
 561
 562static const struct net_device_ops qede_netdev_vf_ops = {
 563        .ndo_open = qede_open,
 564        .ndo_stop = qede_close,
 565        .ndo_start_xmit = qede_start_xmit,
 566        .ndo_set_rx_mode = qede_set_rx_mode,
 567        .ndo_set_mac_address = qede_set_mac_addr,
 568        .ndo_validate_addr = eth_validate_addr,
 569        .ndo_change_mtu_rh74 = qede_change_mtu,
 570        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 571        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
 572        .ndo_set_features = qede_set_features,
 573        .ndo_get_stats64 = qede_get_stats64,
 574        .ndo_features_check = qede_features_check,
 575};
 576
 577/* -------------------------------------------------------------------------
 578 * START OF PROBE / REMOVE
 579 * -------------------------------------------------------------------------
 580 */
 581
 582static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
 583                                            struct pci_dev *pdev,
 584                                            struct qed_dev_eth_info *info,
 585                                            u32 dp_module, u8 dp_level)
 586{
 587        struct net_device *ndev;
 588        struct qede_dev *edev;
 589
 590        ndev = alloc_etherdev_mqs(sizeof(*edev),
 591                                  info->num_queues, info->num_queues);
 592        if (!ndev) {
 593                pr_err("etherdev allocation failed\n");
 594                return NULL;
 595        }
 596
 597        edev = netdev_priv(ndev);
 598        edev->ndev = ndev;
 599        edev->cdev = cdev;
 600        edev->pdev = pdev;
 601        edev->dp_module = dp_module;
 602        edev->dp_level = dp_level;
 603        edev->ops = qed_ops;
 604        edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
 605        edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 606
 607        DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
 608                info->num_queues, info->num_queues);
 609
 610        SET_NETDEV_DEV(ndev, &pdev->dev);
 611
 612        memset(&edev->stats, 0, sizeof(edev->stats));
 613        memcpy(&edev->dev_info, info, sizeof(*info));
 614
 615        /* As ethtool doesn't have the ability to show WoL behavior as
 616         * 'default', if device supports it declare it's enabled.
 617         */
 618        if (edev->dev_info.common.wol_support)
 619                edev->wol_enabled = true;
 620
 621        INIT_LIST_HEAD(&edev->vlan_list);
 622
 623        return edev;
 624}
 625
 626static void qede_init_ndev(struct qede_dev *edev)
 627{
 628        struct net_device *ndev = edev->ndev;
 629        struct pci_dev *pdev = edev->pdev;
 630        bool udp_tunnel_enable = false;
 631        netdev_features_t hw_features;
 632
 633        pci_set_drvdata(pdev, ndev);
 634
 635        ndev->mem_start = edev->dev_info.common.pci_mem_start;
 636        ndev->base_addr = ndev->mem_start;
 637        ndev->mem_end = edev->dev_info.common.pci_mem_end;
 638        ndev->irq = edev->dev_info.common.pci_irq;
 639
 640        ndev->watchdog_timeo = TX_TIMEOUT;
 641
 642        if (IS_VF(edev))
 643                ndev->netdev_ops = &qede_netdev_vf_ops;
 644        else
 645                ndev->netdev_ops = &qede_netdev_ops;
 646
 647        qede_set_ethtool_ops(ndev);
 648
 649        ndev->priv_flags |= IFF_UNICAST_FLT;
 650
 651        /* user-changeble features */
 652        hw_features = NETIF_F_GRO | NETIF_F_SG |
 653                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 654                      NETIF_F_TSO | NETIF_F_TSO6;
 655
 656        if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
 657                hw_features |= NETIF_F_NTUPLE;
 658
 659        if (edev->dev_info.common.vxlan_enable ||
 660            edev->dev_info.common.geneve_enable)
 661                udp_tunnel_enable = true;
 662
 663        if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
 664                hw_features |= NETIF_F_TSO_ECN;
 665                ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 666                                        NETIF_F_SG | NETIF_F_TSO |
 667                                        NETIF_F_TSO_ECN | NETIF_F_TSO6 |
 668                                        NETIF_F_RXCSUM;
 669        }
 670
 671        if (udp_tunnel_enable) {
 672                hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
 673                                NETIF_F_GSO_UDP_TUNNEL_CSUM);
 674                ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
 675                                          NETIF_F_GSO_UDP_TUNNEL_CSUM);
 676        }
 677
 678        if (edev->dev_info.common.gre_enable) {
 679                hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
 680                ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
 681                                          NETIF_F_GSO_GRE_CSUM);
 682        }
 683
 684        ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
 685                              NETIF_F_HIGHDMA;
 686        ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
 687                         NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
 688                         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
 689
 690        ndev->hw_features = hw_features;
 691
 692        /* MTU range: 46 - 9600 */
 693        ndev->extended->min_mtu = ETH_ZLEN - ETH_HLEN;
 694        ndev->extended->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
 695
 696        /* Set network device HW mac */
 697        ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
 698
 699        ndev->mtu = edev->dev_info.common.mtu;
 700}
 701
 702/* This function converts from 32b param to two params of level and module
 703 * Input 32b decoding:
 704 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
 705 * 'happy' flow, e.g. memory allocation failed.
 706 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
 707 * and provide important parameters.
 708 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
 709 * module. VERBOSE prints are for tracking the specific flow in low level.
 710 *
 711 * Notice that the level should be that of the lowest required logs.
 712 */
 713void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
 714{
 715        *p_dp_level = QED_LEVEL_NOTICE;
 716        *p_dp_module = 0;
 717
 718        if (debug & QED_LOG_VERBOSE_MASK) {
 719                *p_dp_level = QED_LEVEL_VERBOSE;
 720                *p_dp_module = (debug & 0x3FFFFFFF);
 721        } else if (debug & QED_LOG_INFO_MASK) {
 722                *p_dp_level = QED_LEVEL_INFO;
 723        } else if (debug & QED_LOG_NOTICE_MASK) {
 724                *p_dp_level = QED_LEVEL_NOTICE;
 725        }
 726}
 727
 728static void qede_free_fp_array(struct qede_dev *edev)
 729{
 730        if (edev->fp_array) {
 731                struct qede_fastpath *fp;
 732                int i;
 733
 734                for_each_queue(i) {
 735                        fp = &edev->fp_array[i];
 736
 737                        kfree(fp->sb_info);
 738                        kfree(fp->rxq);
 739                        kfree(fp->txq);
 740                }
 741                kfree(edev->fp_array);
 742        }
 743
 744        edev->num_queues = 0;
 745        edev->fp_num_tx = 0;
 746        edev->fp_num_rx = 0;
 747}
 748
 749static int qede_alloc_fp_array(struct qede_dev *edev)
 750{
 751        u8 fp_combined, fp_rx = edev->fp_num_rx;
 752        struct qede_fastpath *fp;
 753        int i;
 754
 755        edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
 756                                 sizeof(*edev->fp_array), GFP_KERNEL);
 757        if (!edev->fp_array) {
 758                DP_NOTICE(edev, "fp array allocation failed\n");
 759                goto err;
 760        }
 761
 762        fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
 763
 764        /* Allocate the FP elements for Rx queues followed by combined and then
 765         * the Tx. This ordering should be maintained so that the respective
 766         * queues (Rx or Tx) will be together in the fastpath array and the
 767         * associated ids will be sequential.
 768         */
 769        for_each_queue(i) {
 770                fp = &edev->fp_array[i];
 771
 772                fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
 773                if (!fp->sb_info) {
 774                        DP_NOTICE(edev, "sb info struct allocation failed\n");
 775                        goto err;
 776                }
 777
 778                if (fp_rx) {
 779                        fp->type = QEDE_FASTPATH_RX;
 780                        fp_rx--;
 781                } else if (fp_combined) {
 782                        fp->type = QEDE_FASTPATH_COMBINED;
 783                        fp_combined--;
 784                } else {
 785                        fp->type = QEDE_FASTPATH_TX;
 786                }
 787
 788                if (fp->type & QEDE_FASTPATH_TX) {
 789                        fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
 790                        if (!fp->txq)
 791                                goto err;
 792                }
 793
 794                if (fp->type & QEDE_FASTPATH_RX) {
 795                        fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
 796                        if (!fp->rxq)
 797                                goto err;
 798                }
 799        }
 800
 801        return 0;
 802err:
 803        qede_free_fp_array(edev);
 804        return -ENOMEM;
 805}
 806
 807static void qede_sp_task(struct work_struct *work)
 808{
 809        struct qede_dev *edev = container_of(work, struct qede_dev,
 810                                             sp_task.work);
 811
 812        __qede_lock(edev);
 813
 814        if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
 815                if (edev->state == QEDE_STATE_OPEN)
 816                        qede_config_rx_mode(edev->ndev);
 817
 818#ifdef CONFIG_RFS_ACCEL
 819        if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
 820                if (edev->state == QEDE_STATE_OPEN)
 821                        qede_process_arfs_filters(edev, false);
 822        }
 823#endif
 824        __qede_unlock(edev);
 825}
 826
 827static void qede_update_pf_params(struct qed_dev *cdev)
 828{
 829        struct qed_pf_params pf_params;
 830
 831        /* 64 rx + 64 tx */
 832        memset(&pf_params, 0, sizeof(struct qed_pf_params));
 833        pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 2;
 834        pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
 835        qed_ops->common->update_pf_params(cdev, &pf_params);
 836}
 837
 838#define QEDE_FW_VER_STR_SIZE    80
 839
 840static void qede_log_probe(struct qede_dev *edev)
 841{
 842        struct qed_dev_info *p_dev_info = &edev->dev_info.common;
 843        u8 buf[QEDE_FW_VER_STR_SIZE];
 844        size_t left_size;
 845
 846        snprintf(buf, QEDE_FW_VER_STR_SIZE,
 847                 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
 848                 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
 849                 p_dev_info->fw_eng,
 850                 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
 851                 QED_MFW_VERSION_3_OFFSET,
 852                 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
 853                 QED_MFW_VERSION_2_OFFSET,
 854                 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
 855                 QED_MFW_VERSION_1_OFFSET,
 856                 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
 857                 QED_MFW_VERSION_0_OFFSET);
 858
 859        left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
 860        if (p_dev_info->mbi_version && left_size)
 861                snprintf(buf + strlen(buf), left_size,
 862                         " [MBI %d.%d.%d]",
 863                         (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
 864                         QED_MBI_VERSION_2_OFFSET,
 865                         (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
 866                         QED_MBI_VERSION_1_OFFSET,
 867                         (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
 868                         QED_MBI_VERSION_0_OFFSET);
 869
 870        pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
 871                PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
 872                buf, edev->ndev->name);
 873}
 874
 875enum qede_probe_mode {
 876        QEDE_PROBE_NORMAL,
 877};
 878
 879static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 880                        bool is_vf, enum qede_probe_mode mode)
 881{
 882        struct qed_probe_params probe_params;
 883        struct qed_slowpath_params sp_params;
 884        struct qed_dev_eth_info dev_info;
 885        struct qede_dev *edev;
 886        struct qed_dev *cdev;
 887        int rc;
 888
 889        if (unlikely(dp_level & QED_LEVEL_INFO))
 890                pr_notice("Starting qede probe\n");
 891
 892        memset(&probe_params, 0, sizeof(probe_params));
 893        probe_params.protocol = QED_PROTOCOL_ETH;
 894        probe_params.dp_module = dp_module;
 895        probe_params.dp_level = dp_level;
 896        probe_params.is_vf = is_vf;
 897        cdev = qed_ops->common->probe(pdev, &probe_params);
 898        if (!cdev) {
 899                rc = -ENODEV;
 900                goto err0;
 901        }
 902
 903        qede_update_pf_params(cdev);
 904
 905        /* Start the Slowpath-process */
 906        memset(&sp_params, 0, sizeof(sp_params));
 907        sp_params.int_mode = QED_INT_MODE_MSIX;
 908        sp_params.drv_major = QEDE_MAJOR_VERSION;
 909        sp_params.drv_minor = QEDE_MINOR_VERSION;
 910        sp_params.drv_rev = QEDE_REVISION_VERSION;
 911        sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
 912        strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
 913        rc = qed_ops->common->slowpath_start(cdev, &sp_params);
 914        if (rc) {
 915                pr_notice("Cannot start slowpath\n");
 916                goto err1;
 917        }
 918
 919        /* Learn information crucial for qede to progress */
 920        rc = qed_ops->fill_dev_info(cdev, &dev_info);
 921        if (rc)
 922                goto err2;
 923
 924        edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
 925                                   dp_level);
 926        if (!edev) {
 927                rc = -ENOMEM;
 928                goto err2;
 929        }
 930
 931        if (is_vf)
 932                edev->flags |= QEDE_FLAG_IS_VF;
 933
 934        qede_init_ndev(edev);
 935
 936        rc = qede_rdma_dev_add(edev);
 937        if (rc)
 938                goto err3;
 939
 940        /* Prepare the lock prior to the registeration of the netdev,
 941         * as once it's registered we might reach flows requiring it
 942         * [it's even possible to reach a flow needing it directly
 943         * from there, although it's unlikely].
 944         */
 945        INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
 946        mutex_init(&edev->qede_lock);
 947        rc = register_netdev(edev->ndev);
 948        if (rc) {
 949                DP_NOTICE(edev, "Cannot register net-device\n");
 950                goto err4;
 951        }
 952
 953        edev->ops->common->set_name(cdev, edev->ndev->name);
 954
 955        /* PTP not supported on VFs */
 956        if (!is_vf)
 957                qede_ptp_enable(edev, true);
 958
 959        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 960
 961#ifdef CONFIG_DCB
 962        if (!IS_VF(edev))
 963                qede_set_dcbnl_ops(edev->ndev);
 964#endif
 965
 966        edev->rx_copybreak = QEDE_RX_HDR_SIZE;
 967
 968        qede_log_probe(edev);
 969        return 0;
 970
 971err4:
 972        qede_rdma_dev_remove(edev);
 973err3:
 974        free_netdev(edev->ndev);
 975err2:
 976        qed_ops->common->slowpath_stop(cdev);
 977err1:
 978        qed_ops->common->remove(cdev);
 979err0:
 980        return rc;
 981}
 982
 983static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 984{
 985        bool is_vf = false;
 986        u32 dp_module = 0;
 987        u8 dp_level = 0;
 988
 989        switch ((enum qede_pci_private)id->driver_data) {
 990        case QEDE_PRIVATE_VF:
 991                if (debug & QED_LOG_VERBOSE_MASK)
 992                        dev_err(&pdev->dev, "Probing a VF\n");
 993                is_vf = true;
 994                break;
 995        default:
 996                if (debug & QED_LOG_VERBOSE_MASK)
 997                        dev_err(&pdev->dev, "Probing a PF\n");
 998        }
 999
1000        qede_config_debug(debug, &dp_module, &dp_level);
1001
1002        return __qede_probe(pdev, dp_module, dp_level, is_vf,
1003                            QEDE_PROBE_NORMAL);
1004}
1005
1006enum qede_remove_mode {
1007        QEDE_REMOVE_NORMAL,
1008};
1009
1010static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1011{
1012        struct net_device *ndev = pci_get_drvdata(pdev);
1013        struct qede_dev *edev = netdev_priv(ndev);
1014        struct qed_dev *cdev = edev->cdev;
1015
1016        DP_INFO(edev, "Starting qede_remove\n");
1017
1018        unregister_netdev(ndev);
1019        cancel_delayed_work_sync(&edev->sp_task);
1020
1021        qede_ptp_disable(edev);
1022
1023        qede_rdma_dev_remove(edev);
1024
1025        edev->ops->common->set_power_state(cdev, PCI_D0);
1026
1027        pci_set_drvdata(pdev, NULL);
1028
1029        /* Use global ops since we've freed edev */
1030        qed_ops->common->slowpath_stop(cdev);
1031        if (system_state == SYSTEM_POWER_OFF)
1032                return;
1033        qed_ops->common->remove(cdev);
1034
1035        /* Since this can happen out-of-sync with other flows,
1036         * don't release the netdevice until after slowpath stop
1037         * has been called to guarantee various other contexts
1038         * [e.g., QED register callbacks] won't break anything when
1039         * accessing the netdevice.
1040         */
1041         free_netdev(ndev);
1042
1043        dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1044}
1045
1046static void qede_remove(struct pci_dev *pdev)
1047{
1048        __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1049}
1050
1051static void qede_shutdown(struct pci_dev *pdev)
1052{
1053        __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1054}
1055
1056/* -------------------------------------------------------------------------
1057 * START OF LOAD / UNLOAD
1058 * -------------------------------------------------------------------------
1059 */
1060
1061static int qede_set_num_queues(struct qede_dev *edev)
1062{
1063        int rc;
1064        u16 rss_num;
1065
1066        /* Setup queues according to possible resources*/
1067        if (edev->req_queues)
1068                rss_num = edev->req_queues;
1069        else
1070                rss_num = netif_get_num_default_rss_queues() *
1071                          edev->dev_info.common.num_hwfns;
1072
1073        rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1074
1075        rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1076        if (rc > 0) {
1077                /* Managed to request interrupts for our queues */
1078                edev->num_queues = rc;
1079                DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1080                        QEDE_QUEUE_CNT(edev), rss_num);
1081                rc = 0;
1082        }
1083
1084        edev->fp_num_tx = edev->req_num_tx;
1085        edev->fp_num_rx = edev->req_num_rx;
1086
1087        return rc;
1088}
1089
1090static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1091                             u16 sb_id)
1092{
1093        if (sb_info->sb_virt) {
1094                edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
1095                dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1096                                  (void *)sb_info->sb_virt, sb_info->sb_phys);
1097                memset(sb_info, 0, sizeof(*sb_info));
1098        }
1099}
1100
1101/* This function allocates fast-path status block memory */
1102static int qede_alloc_mem_sb(struct qede_dev *edev,
1103                             struct qed_sb_info *sb_info, u16 sb_id)
1104{
1105        struct status_block *sb_virt;
1106        dma_addr_t sb_phys;
1107        int rc;
1108
1109        sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1110                                     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1111        if (!sb_virt) {
1112                DP_ERR(edev, "Status block allocation failed\n");
1113                return -ENOMEM;
1114        }
1115
1116        rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1117                                        sb_virt, sb_phys, sb_id,
1118                                        QED_SB_TYPE_L2_QUEUE);
1119        if (rc) {
1120                DP_ERR(edev, "Status block initialization failed\n");
1121                dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1122                                  sb_virt, sb_phys);
1123                return rc;
1124        }
1125
1126        return 0;
1127}
1128
1129static void qede_free_rx_buffers(struct qede_dev *edev,
1130                                 struct qede_rx_queue *rxq)
1131{
1132        u16 i;
1133
1134        for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1135                struct sw_rx_data *rx_buf;
1136                struct page *data;
1137
1138                rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1139                data = rx_buf->data;
1140
1141                dma_unmap_page(&edev->pdev->dev,
1142                               rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
1143
1144                rx_buf->data = NULL;
1145                __free_page(data);
1146        }
1147}
1148
1149static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1150{
1151        int i;
1152
1153        if (edev->gro_disable)
1154                return;
1155
1156        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1157                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1158                struct sw_rx_data *replace_buf = &tpa_info->buffer;
1159
1160                if (replace_buf->data) {
1161                        dma_unmap_page(&edev->pdev->dev,
1162                                       replace_buf->mapping,
1163                                       PAGE_SIZE, DMA_FROM_DEVICE);
1164                        __free_page(replace_buf->data);
1165                }
1166        }
1167}
1168
1169static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1170{
1171        qede_free_sge_mem(edev, rxq);
1172
1173        /* Free rx buffers */
1174        qede_free_rx_buffers(edev, rxq);
1175
1176        /* Free the parallel SW ring */
1177        kfree(rxq->sw_rx_ring);
1178
1179        /* Free the real RQ ring used by FW */
1180        edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1181        edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1182}
1183
1184static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1185{
1186        dma_addr_t mapping;
1187        int i;
1188
1189        if (edev->gro_disable)
1190                return 0;
1191
1192        if (edev->ndev->mtu > PAGE_SIZE) {
1193                edev->gro_disable = 1;
1194                return 0;
1195        }
1196
1197        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1198                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1199                struct sw_rx_data *replace_buf = &tpa_info->buffer;
1200
1201                replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
1202                if (unlikely(!replace_buf->data)) {
1203                        DP_NOTICE(edev,
1204                                  "Failed to allocate TPA skb pool [replacement buffer]\n");
1205                        goto err;
1206                }
1207
1208                mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
1209                                       PAGE_SIZE, DMA_FROM_DEVICE);
1210                if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1211                        DP_NOTICE(edev,
1212                                  "Failed to map TPA replacement buffer\n");
1213                        goto err;
1214                }
1215
1216                replace_buf->mapping = mapping;
1217                tpa_info->buffer.page_offset = 0;
1218                tpa_info->buffer_mapping = mapping;
1219                tpa_info->state = QEDE_AGG_STATE_NONE;
1220        }
1221
1222        return 0;
1223err:
1224        qede_free_sge_mem(edev, rxq);
1225        edev->gro_disable = 1;
1226        return -ENOMEM;
1227}
1228
1229/* This function allocates all memory needed per Rx queue */
1230static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1231{
1232        int i, rc, size;
1233
1234        rxq->num_rx_buffers = edev->q_num_rx_buffers;
1235
1236        rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1237
1238        /* Make sure that the headroom and  payload fit in a single page */
1239        if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
1240                rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
1241
1242        /* Segment size to spilt a page in multiple equal parts */
1243        rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
1244
1245        /* Allocate the parallel driver ring for Rx buffers */
1246        size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1247        rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1248        if (!rxq->sw_rx_ring) {
1249                DP_ERR(edev, "Rx buffers ring allocation failed\n");
1250                rc = -ENOMEM;
1251                goto err;
1252        }
1253
1254        /* Allocate FW Rx ring  */
1255        rc = edev->ops->common->chain_alloc(edev->cdev,
1256                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1257                                            QED_CHAIN_MODE_NEXT_PTR,
1258                                            QED_CHAIN_CNT_TYPE_U16,
1259                                            RX_RING_SIZE,
1260                                            sizeof(struct eth_rx_bd),
1261                                            &rxq->rx_bd_ring, NULL);
1262        if (rc)
1263                goto err;
1264
1265        /* Allocate FW completion ring */
1266        rc = edev->ops->common->chain_alloc(edev->cdev,
1267                                            QED_CHAIN_USE_TO_CONSUME,
1268                                            QED_CHAIN_MODE_PBL,
1269                                            QED_CHAIN_CNT_TYPE_U16,
1270                                            RX_RING_SIZE,
1271                                            sizeof(union eth_rx_cqe),
1272                                            &rxq->rx_comp_ring, NULL);
1273        if (rc)
1274                goto err;
1275
1276        /* Allocate buffers for the Rx ring */
1277        rxq->filled_buffers = 0;
1278        for (i = 0; i < rxq->num_rx_buffers; i++) {
1279                rc = qede_alloc_rx_buffer(rxq, false);
1280                if (rc) {
1281                        DP_ERR(edev,
1282                               "Rx buffers allocation failed at index %d\n", i);
1283                        goto err;
1284                }
1285        }
1286
1287        rc = qede_alloc_sge_mem(edev, rxq);
1288err:
1289        return rc;
1290}
1291
1292static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1293{
1294        /* Free the parallel SW ring */
1295        kfree(txq->sw_tx_ring);
1296
1297        /* Free the real RQ ring used by FW */
1298        edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1299}
1300
1301/* This function allocates all memory needed per Tx queue */
1302static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1303{
1304        int size, rc;
1305        union eth_tx_bd_types *p_virt;
1306
1307        txq->num_tx_buffers = edev->q_num_tx_buffers;
1308
1309        /* Allocate the parallel driver ring for Tx buffers */
1310        size = sizeof(*txq->sw_tx_ring) * txq->num_tx_buffers;
1311        txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
1312        if (!txq->sw_tx_ring) {
1313                DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
1314                goto err;
1315        }
1316
1317        rc = edev->ops->common->chain_alloc(edev->cdev,
1318                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1319                                            QED_CHAIN_MODE_PBL,
1320                                            QED_CHAIN_CNT_TYPE_U16,
1321                                            txq->num_tx_buffers,
1322                                            sizeof(*p_virt),
1323                                            &txq->tx_pbl, NULL);
1324        if (rc)
1325                goto err;
1326
1327        return 0;
1328
1329err:
1330        qede_free_mem_txq(edev, txq);
1331        return -ENOMEM;
1332}
1333
1334/* This function frees all memory of a single fp */
1335static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1336{
1337        qede_free_mem_sb(edev, fp->sb_info, fp->id);
1338
1339        if (fp->type & QEDE_FASTPATH_RX)
1340                qede_free_mem_rxq(edev, fp->rxq);
1341
1342        if (fp->type & QEDE_FASTPATH_TX)
1343                qede_free_mem_txq(edev, fp->txq);
1344}
1345
1346/* This function allocates all memory needed for a single fp (i.e. an entity
1347 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1348 */
1349static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1350{
1351        int rc;
1352
1353        rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1354        if (rc)
1355                goto err;
1356
1357        if (fp->type & QEDE_FASTPATH_RX) {
1358                rc = qede_alloc_mem_rxq(edev, fp->rxq);
1359                if (rc)
1360                        goto err;
1361        }
1362
1363        if (fp->type & QEDE_FASTPATH_TX) {
1364                rc = qede_alloc_mem_txq(edev, fp->txq);
1365                if (rc)
1366                        goto err;
1367        }
1368
1369        return 0;
1370err:
1371        return rc;
1372}
1373
1374static void qede_free_mem_load(struct qede_dev *edev)
1375{
1376        int i;
1377
1378        for_each_queue(i) {
1379                struct qede_fastpath *fp = &edev->fp_array[i];
1380
1381                qede_free_mem_fp(edev, fp);
1382        }
1383}
1384
1385/* This function allocates all qede memory at NIC load. */
1386static int qede_alloc_mem_load(struct qede_dev *edev)
1387{
1388        int rc = 0, queue_id;
1389
1390        for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1391                struct qede_fastpath *fp = &edev->fp_array[queue_id];
1392
1393                rc = qede_alloc_mem_fp(edev, fp);
1394                if (rc) {
1395                        DP_ERR(edev,
1396                               "Failed to allocate memory for fastpath - rss id = %d\n",
1397                               queue_id);
1398                        qede_free_mem_load(edev);
1399                        return rc;
1400                }
1401        }
1402
1403        return 0;
1404}
1405
1406/* This function inits fp content and resets the SB, RXQ and TXQ structures */
1407static void qede_init_fp(struct qede_dev *edev)
1408{
1409        int queue_id, rxq_index = 0, txq_index = 0;
1410        struct qede_fastpath *fp;
1411
1412        for_each_queue(queue_id) {
1413                fp = &edev->fp_array[queue_id];
1414
1415                fp->edev = edev;
1416                fp->id = queue_id;
1417
1418
1419                if (fp->type & QEDE_FASTPATH_RX) {
1420                        fp->rxq->rxq_id = rxq_index++;
1421                        fp->rxq->dev = &edev->pdev->dev;
1422                }
1423
1424                if (fp->type & QEDE_FASTPATH_TX) {
1425                        fp->txq->index = txq_index++;
1426                        if (edev->dev_info.is_legacy)
1427                                fp->txq->is_legacy = 1;
1428                        fp->txq->dev = &edev->pdev->dev;
1429                }
1430
1431                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1432                         edev->ndev->name, queue_id);
1433        }
1434
1435        edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
1436}
1437
1438static int qede_set_real_num_queues(struct qede_dev *edev)
1439{
1440        int rc = 0;
1441
1442        rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
1443        if (rc) {
1444                DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1445                return rc;
1446        }
1447
1448        rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1449        if (rc) {
1450                DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1451                return rc;
1452        }
1453
1454        return 0;
1455}
1456
1457static void qede_napi_disable_remove(struct qede_dev *edev)
1458{
1459        int i;
1460
1461        for_each_queue(i) {
1462                napi_disable(&edev->fp_array[i].napi);
1463
1464                netif_napi_del(&edev->fp_array[i].napi);
1465        }
1466}
1467
1468static void qede_napi_add_enable(struct qede_dev *edev)
1469{
1470        int i;
1471
1472        /* Add NAPI objects */
1473        for_each_queue(i) {
1474                netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1475                               qede_poll, NAPI_POLL_WEIGHT);
1476                napi_enable(&edev->fp_array[i].napi);
1477        }
1478}
1479
1480static void qede_sync_free_irqs(struct qede_dev *edev)
1481{
1482        int i;
1483
1484        for (i = 0; i < edev->int_info.used_cnt; i++) {
1485                if (edev->int_info.msix_cnt) {
1486                        synchronize_irq(edev->int_info.msix[i].vector);
1487                        free_irq(edev->int_info.msix[i].vector,
1488                                 &edev->fp_array[i]);
1489                } else {
1490                        edev->ops->common->simd_handler_clean(edev->cdev, i);
1491                }
1492        }
1493
1494        edev->int_info.used_cnt = 0;
1495}
1496
1497static int qede_req_msix_irqs(struct qede_dev *edev)
1498{
1499        int i, rc;
1500
1501        /* Sanitize number of interrupts == number of prepared RSS queues */
1502        if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1503                DP_ERR(edev,
1504                       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1505                       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1506                return -EINVAL;
1507        }
1508
1509        for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1510#ifdef CONFIG_RFS_ACCEL
1511                struct qede_fastpath *fp = &edev->fp_array[i];
1512
1513                if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1514                        rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1515                                              edev->int_info.msix[i].vector);
1516                        if (rc) {
1517                                DP_ERR(edev, "Failed to add CPU rmap\n");
1518                                qede_free_arfs(edev);
1519                        }
1520                }
1521#endif
1522                rc = request_irq(edev->int_info.msix[i].vector,
1523                                 qede_msix_fp_int, 0, edev->fp_array[i].name,
1524                                 &edev->fp_array[i]);
1525                if (rc) {
1526                        DP_ERR(edev, "Request fp %d irq failed\n", i);
1527                        qede_sync_free_irqs(edev);
1528                        return rc;
1529                }
1530                DP_VERBOSE(edev, NETIF_MSG_INTR,
1531                           "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1532                           edev->fp_array[i].name, i,
1533                           &edev->fp_array[i]);
1534                edev->int_info.used_cnt++;
1535        }
1536
1537        return 0;
1538}
1539
1540static void qede_simd_fp_handler(void *cookie)
1541{
1542        struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1543
1544        napi_schedule_irqoff(&fp->napi);
1545}
1546
1547static int qede_setup_irqs(struct qede_dev *edev)
1548{
1549        int i, rc = 0;
1550
1551        /* Learn Interrupt configuration */
1552        rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1553        if (rc)
1554                return rc;
1555
1556        if (edev->int_info.msix_cnt) {
1557                rc = qede_req_msix_irqs(edev);
1558                if (rc)
1559                        return rc;
1560                edev->ndev->irq = edev->int_info.msix[0].vector;
1561        } else {
1562                const struct qed_common_ops *ops;
1563
1564                /* qed should learn receive the RSS ids and callbacks */
1565                ops = edev->ops->common;
1566                for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1567                        ops->simd_handler_config(edev->cdev,
1568                                                 &edev->fp_array[i], i,
1569                                                 qede_simd_fp_handler);
1570                edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1571        }
1572        return 0;
1573}
1574
1575static int qede_drain_txq(struct qede_dev *edev,
1576                          struct qede_tx_queue *txq, bool allow_drain)
1577{
1578        int rc, cnt = 1000;
1579
1580        while (txq->sw_tx_cons != txq->sw_tx_prod) {
1581                if (!cnt) {
1582                        if (allow_drain) {
1583                                DP_NOTICE(edev,
1584                                          "Tx queue[%d] is stuck, requesting MCP to drain\n",
1585                                          txq->index);
1586                                rc = edev->ops->common->drain(edev->cdev);
1587                                if (rc)
1588                                        return rc;
1589                                return qede_drain_txq(edev, txq, false);
1590                        }
1591                        DP_NOTICE(edev,
1592                                  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1593                                  txq->index, txq->sw_tx_prod,
1594                                  txq->sw_tx_cons);
1595                        return -ENODEV;
1596                }
1597                cnt--;
1598                usleep_range(1000, 2000);
1599                barrier();
1600        }
1601
1602        /* FW finished processing, wait for HW to transmit all tx packets */
1603        usleep_range(1000, 2000);
1604
1605        return 0;
1606}
1607
1608static int qede_stop_txq(struct qede_dev *edev,
1609                         struct qede_tx_queue *txq, int rss_id)
1610{
1611        return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1612}
1613
1614static int qede_stop_queues(struct qede_dev *edev)
1615{
1616        struct qed_update_vport_params *vport_update_params;
1617        struct qed_dev *cdev = edev->cdev;
1618        struct qede_fastpath *fp;
1619        int rc, i;
1620
1621        /* Disable the vport */
1622        vport_update_params = vzalloc(sizeof(*vport_update_params));
1623        if (!vport_update_params)
1624                return -ENOMEM;
1625
1626        vport_update_params->vport_id = 0;
1627        vport_update_params->update_vport_active_flg = 1;
1628        vport_update_params->vport_active_flg = 0;
1629        vport_update_params->update_rss_flg = 0;
1630
1631        rc = edev->ops->vport_update(cdev, vport_update_params);
1632        vfree(vport_update_params);
1633
1634        if (rc) {
1635                DP_ERR(edev, "Failed to update vport\n");
1636                return rc;
1637        }
1638
1639        /* Flush Tx queues. If needed, request drain from MCP */
1640        for_each_queue(i) {
1641                fp = &edev->fp_array[i];
1642
1643                if (fp->type & QEDE_FASTPATH_TX) {
1644                        rc = qede_drain_txq(edev, fp->txq, true);
1645                        if (rc)
1646                                return rc;
1647                }
1648        }
1649
1650        /* Stop all Queues in reverse order */
1651        for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
1652                fp = &edev->fp_array[i];
1653
1654                /* Stop the Tx Queue(s) */
1655                if (fp->type & QEDE_FASTPATH_TX) {
1656                        rc = qede_stop_txq(edev, fp->txq, i);
1657                        if (rc)
1658                                return rc;
1659                }
1660
1661                /* Stop the Rx Queue */
1662                if (fp->type & QEDE_FASTPATH_RX) {
1663                        rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
1664                        if (rc) {
1665                                DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1666                                return rc;
1667                        }
1668                }
1669        }
1670
1671        /* Stop the vport */
1672        rc = edev->ops->vport_stop(cdev, 0);
1673        if (rc)
1674                DP_ERR(edev, "Failed to stop VPORT\n");
1675
1676        return rc;
1677}
1678
1679static int qede_start_txq(struct qede_dev *edev,
1680                          struct qede_fastpath *fp,
1681                          struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
1682{
1683        dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
1684        u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
1685        struct qed_queue_start_common_params params;
1686        struct qed_txq_start_ret_params ret_params;
1687        int rc;
1688
1689        memset(&params, 0, sizeof(params));
1690        memset(&ret_params, 0, sizeof(ret_params));
1691
1692        params.queue_id = txq->index;
1693        params.p_sb = fp->sb_info;
1694        params.sb_idx = sb_idx;
1695
1696        rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
1697                                   page_cnt, &ret_params);
1698        if (rc) {
1699                DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
1700                return rc;
1701        }
1702
1703        txq->doorbell_addr = ret_params.p_doorbell;
1704        txq->handle = ret_params.p_handle;
1705
1706        /* Determine the FW consumer address associated */
1707        txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
1708
1709        /* Prepare the doorbell parameters */
1710        SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
1711        SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1712        SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
1713                  DQ_XCM_ETH_TX_BD_PROD_CMD);
1714        txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1715
1716        return rc;
1717}
1718
1719static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
1720{
1721        int vlan_removal_en = 1;
1722        struct qed_dev *cdev = edev->cdev;
1723        struct qed_dev_info *qed_info = &edev->dev_info.common;
1724        struct qed_update_vport_params *vport_update_params;
1725        struct qed_queue_start_common_params q_params;
1726        struct qed_start_vport_params start = {0};
1727        int rc, i;
1728
1729        if (!edev->num_queues) {
1730                DP_ERR(edev,
1731                       "Cannot update V-VPORT as active as there are no Rx queues\n");
1732                return -EINVAL;
1733        }
1734
1735        vport_update_params = vzalloc(sizeof(*vport_update_params));
1736        if (!vport_update_params)
1737                return -ENOMEM;
1738
1739        start.handle_ptp_pkts = !!(edev->ptp);
1740        start.gro_enable = !edev->gro_disable;
1741        start.mtu = edev->ndev->mtu;
1742        start.vport_id = 0;
1743        start.drop_ttl0 = true;
1744        start.remove_inner_vlan = vlan_removal_en;
1745        start.clear_stats = clear_stats;
1746
1747        rc = edev->ops->vport_start(cdev, &start);
1748
1749        if (rc) {
1750                DP_ERR(edev, "Start V-PORT failed %d\n", rc);
1751                goto out;
1752        }
1753
1754        DP_VERBOSE(edev, NETIF_MSG_IFUP,
1755                   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1756                   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
1757
1758        for_each_queue(i) {
1759                struct qede_fastpath *fp = &edev->fp_array[i];
1760                dma_addr_t p_phys_table;
1761                u32 page_cnt;
1762
1763                if (fp->type & QEDE_FASTPATH_RX) {
1764                        struct qed_rxq_start_ret_params ret_params;
1765                        struct qede_rx_queue *rxq = fp->rxq;
1766                        __le16 *val;
1767
1768                        memset(&ret_params, 0, sizeof(ret_params));
1769                        memset(&q_params, 0, sizeof(q_params));
1770                        q_params.queue_id = rxq->rxq_id;
1771                        q_params.vport_id = 0;
1772                        q_params.p_sb = fp->sb_info;
1773                        q_params.sb_idx = RX_PI;
1774
1775                        p_phys_table =
1776                            qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
1777                        page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
1778
1779                        rc = edev->ops->q_rx_start(cdev, i, &q_params,
1780                                                   rxq->rx_buf_size,
1781                                                   rxq->rx_bd_ring.p_phys_addr,
1782                                                   p_phys_table,
1783                                                   page_cnt, &ret_params);
1784                        if (rc) {
1785                                DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
1786                                       rc);
1787                                goto out;
1788                        }
1789
1790                        /* Use the return parameters */
1791                        rxq->hw_rxq_prod_addr = ret_params.p_prod;
1792                        rxq->handle = ret_params.p_handle;
1793
1794                        val = &fp->sb_info->sb_virt->pi_array[RX_PI];
1795                        rxq->hw_cons_ptr = val;
1796
1797                        qede_update_rx_prod(edev, rxq);
1798                }
1799
1800                if (fp->type & QEDE_FASTPATH_TX) {
1801                        rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
1802                        if (rc)
1803                                goto out;
1804                }
1805        }
1806
1807        /* Prepare and send the vport enable */
1808        vport_update_params->vport_id = start.vport_id;
1809        vport_update_params->update_vport_active_flg = 1;
1810        vport_update_params->vport_active_flg = 1;
1811
1812        if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
1813            qed_info->tx_switching) {
1814                vport_update_params->update_tx_switching_flg = 1;
1815                vport_update_params->tx_switching_flg = 1;
1816        }
1817
1818        qede_fill_rss_params(edev, &vport_update_params->rss_params,
1819                             &vport_update_params->update_rss_flg);
1820
1821        rc = edev->ops->vport_update(cdev, vport_update_params);
1822        if (rc)
1823                DP_ERR(edev, "Update V-PORT failed %d\n", rc);
1824
1825out:
1826        vfree(vport_update_params);
1827        return rc;
1828}
1829
1830
1831enum qede_unload_mode {
1832        QEDE_UNLOAD_NORMAL,
1833};
1834
1835static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1836                        bool is_locked)
1837{
1838        struct qed_link_params link_params;
1839        int rc;
1840
1841        DP_INFO(edev, "Starting qede unload\n");
1842
1843        if (!is_locked)
1844                __qede_lock(edev);
1845
1846        edev->state = QEDE_STATE_CLOSED;
1847
1848        qede_rdma_dev_event_close(edev);
1849
1850        /* Close OS Tx */
1851        netif_tx_disable(edev->ndev);
1852        netif_carrier_off(edev->ndev);
1853
1854        /* Reset the link */
1855        memset(&link_params, 0, sizeof(link_params));
1856        link_params.link_up = false;
1857        edev->ops->common->set_link(edev->cdev, &link_params);
1858        rc = qede_stop_queues(edev);
1859        if (rc) {
1860                qede_sync_free_irqs(edev);
1861                goto out;
1862        }
1863
1864        DP_INFO(edev, "Stopped Queues\n");
1865
1866        qede_vlan_mark_nonconfigured(edev);
1867        edev->ops->fastpath_stop(edev->cdev);
1868
1869        if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1870                qede_poll_for_freeing_arfs_filters(edev);
1871                qede_free_arfs(edev);
1872        }
1873
1874        /* Release the interrupts */
1875        qede_sync_free_irqs(edev);
1876        edev->ops->common->set_fp_int(edev->cdev, 0);
1877
1878        qede_napi_disable_remove(edev);
1879
1880        qede_free_mem_load(edev);
1881        qede_free_fp_array(edev);
1882
1883out:
1884        if (!is_locked)
1885                __qede_unlock(edev);
1886        DP_INFO(edev, "Ending qede unload\n");
1887}
1888
1889enum qede_load_mode {
1890        QEDE_LOAD_NORMAL,
1891        QEDE_LOAD_RELOAD,
1892};
1893
1894static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
1895                     bool is_locked)
1896{
1897        struct qed_link_params link_params;
1898        int rc;
1899
1900        DP_INFO(edev, "Starting qede load\n");
1901
1902        if (!is_locked)
1903                __qede_lock(edev);
1904
1905        rc = qede_set_num_queues(edev);
1906        if (rc)
1907                goto out;
1908
1909        rc = qede_alloc_fp_array(edev);
1910        if (rc)
1911                goto out;
1912
1913        qede_init_fp(edev);
1914
1915        rc = qede_alloc_mem_load(edev);
1916        if (rc)
1917                goto err1;
1918        DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
1919                QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
1920
1921        rc = qede_set_real_num_queues(edev);
1922        if (rc)
1923                goto err2;
1924
1925        if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1926                rc = qede_alloc_arfs(edev);
1927                if (rc)
1928                        DP_NOTICE(edev, "aRFS memory allocation failed\n");
1929        }
1930
1931        qede_napi_add_enable(edev);
1932        DP_INFO(edev, "Napi added and enabled\n");
1933
1934        rc = qede_setup_irqs(edev);
1935        if (rc)
1936                goto err3;
1937        DP_INFO(edev, "Setup IRQs succeeded\n");
1938
1939        rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
1940        if (rc)
1941                goto err4;
1942        DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
1943
1944        /* Program un-configured VLANs */
1945        qede_configure_vlan_filters(edev);
1946
1947        /* Ask for link-up using current configuration */
1948        memset(&link_params, 0, sizeof(link_params));
1949        link_params.link_up = true;
1950        edev->ops->common->set_link(edev->cdev, &link_params);
1951
1952        qede_rdma_dev_event_open(edev);
1953
1954        edev->state = QEDE_STATE_OPEN;
1955
1956        DP_INFO(edev, "Ending successfully qede load\n");
1957
1958        goto out;
1959err4:
1960        qede_sync_free_irqs(edev);
1961        memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
1962err3:
1963        qede_napi_disable_remove(edev);
1964err2:
1965        qede_free_mem_load(edev);
1966err1:
1967        edev->ops->common->set_fp_int(edev->cdev, 0);
1968        qede_free_fp_array(edev);
1969        edev->num_queues = 0;
1970        edev->fp_num_tx = 0;
1971        edev->fp_num_rx = 0;
1972out:
1973        if (!is_locked)
1974                __qede_unlock(edev);
1975
1976        return rc;
1977}
1978
1979/* 'func' should be able to run between unload and reload assuming interface
1980 * is actually running, or afterwards in case it's currently DOWN.
1981 */
1982void qede_reload(struct qede_dev *edev,
1983                 struct qede_reload_args *args, bool is_locked)
1984{
1985        if (!is_locked)
1986                __qede_lock(edev);
1987
1988        /* Since qede_lock is held, internal state wouldn't change even
1989         * if netdev state would start transitioning. Check whether current
1990         * internal configuration indicates device is up, then reload.
1991         */
1992        if (edev->state == QEDE_STATE_OPEN) {
1993                qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
1994                if (args)
1995                        args->func(edev, args);
1996                qede_load(edev, QEDE_LOAD_RELOAD, true);
1997
1998                /* Since no one is going to do it for us, re-configure */
1999                qede_config_rx_mode(edev->ndev);
2000        } else if (args) {
2001                args->func(edev, args);
2002        }
2003
2004        if (!is_locked)
2005                __qede_unlock(edev);
2006}
2007
2008/* called with rtnl_lock */
2009static int qede_open(struct net_device *ndev)
2010{
2011        struct qede_dev *edev = netdev_priv(ndev);
2012        int rc;
2013
2014        netif_carrier_off(ndev);
2015
2016        edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2017
2018        rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2019        if (rc)
2020                return rc;
2021
2022        udp_tunnel_get_rx_info(ndev);
2023
2024        edev->ops->common->update_drv_state(edev->cdev, true);
2025
2026        return 0;
2027}
2028
2029static int qede_close(struct net_device *ndev)
2030{
2031        struct qede_dev *edev = netdev_priv(ndev);
2032
2033        qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2034
2035        edev->ops->common->update_drv_state(edev->cdev, false);
2036
2037        return 0;
2038}
2039
2040static void qede_link_update(void *dev, struct qed_link_output *link)
2041{
2042        struct qede_dev *edev = dev;
2043
2044        if (!netif_running(edev->ndev)) {
2045                DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
2046                return;
2047        }
2048
2049        if (link->link_up) {
2050                if (!netif_carrier_ok(edev->ndev)) {
2051                        DP_NOTICE(edev, "Link is up\n");
2052                        netif_tx_start_all_queues(edev->ndev);
2053                        netif_carrier_on(edev->ndev);
2054                }
2055        } else {
2056                if (netif_carrier_ok(edev->ndev)) {
2057                        DP_NOTICE(edev, "Link is down\n");
2058                        netif_tx_disable(edev->ndev);
2059                        netif_carrier_off(edev->ndev);
2060                }
2061        }
2062}
2063