linux/drivers/net/ethernet/neterion/vxge/vxge-main.c
<<
>>
Prefs
   1/******************************************************************************
   2* This software may be used and distributed according to the terms of
   3* the GNU General Public License (GPL), incorporated herein by reference.
   4* Drivers based on or derived from this code fall under the GPL and must
   5* retain the authorship, copyright and license notice.  This file is not
   6* a complete program and may only be used when the entire operating
   7* system is licensed under the GPL.
   8* See the file COPYING in this distribution for more information.
   9*
  10* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11*              Virtualized Server Adapter.
  12* Copyright(c) 2002-2010 Exar Corp.
  13*
  14* The module loadable parameters that are supported by the driver and a brief
  15* explanation of all the variables:
  16* vlan_tag_strip:
  17*       Strip VLAN Tag enable/disable. Instructs the device to remove
  18*       the VLAN tag from all received tagged frames that are not
  19*       replicated at the internal L2 switch.
  20*               0 - Do not strip the VLAN tag.
  21*               1 - Strip the VLAN tag.
  22*
  23* addr_learn_en:
  24*       Enable learning the mac address of the guest OS interface in
  25*       a virtualization environment.
  26*               0 - DISABLE
  27*               1 - ENABLE
  28*
  29* max_config_port:
  30*       Maximum number of port to be supported.
  31*               MIN -1 and MAX - 2
  32*
  33* max_config_vpath:
  34*       This configures the maximum no of VPATH configures for each
  35*       device function.
  36*               MIN - 1 and MAX - 17
  37*
  38* max_config_dev:
  39*       This configures maximum no of Device function to be enabled.
  40*               MIN - 1 and MAX - 17
  41*
  42******************************************************************************/
  43
  44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45
  46#include <linux/bitops.h>
  47#include <linux/if_vlan.h>
  48#include <linux/interrupt.h>
  49#include <linux/pci.h>
  50#include <linux/slab.h>
  51#include <linux/tcp.h>
  52#include <net/ip.h>
  53#include <linux/netdevice.h>
  54#include <linux/etherdevice.h>
  55#include <linux/firmware.h>
  56#include <linux/net_tstamp.h>
  57#include <linux/prefetch.h>
  58#include <linux/module.h>
  59#include "vxge-main.h"
  60#include "vxge-reg.h"
  61
  62MODULE_LICENSE("Dual BSD/GPL");
  63MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
  64        "Virtualized Server Adapter");
  65
  66static const struct pci_device_id vxge_id_table[] = {
  67        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
  68        PCI_ANY_ID},
  69        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
  70        PCI_ANY_ID},
  71        {0}
  72};
  73
  74MODULE_DEVICE_TABLE(pci, vxge_id_table);
  75
  76VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
  77VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
  78VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
  79VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
  80VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
  81VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
  82
  83static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
  84                {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
  85static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
  86        {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
  87module_param_array(bw_percentage, uint, NULL, 0);
  88
  89static struct vxge_drv_config *driver_config;
  90static void vxge_reset_all_vpaths(struct vxgedev *vdev);
  91
  92static inline int is_vxge_card_up(struct vxgedev *vdev)
  93{
  94        return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
  95}
  96
  97static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
  98{
  99        struct sk_buff **skb_ptr = NULL;
 100        struct sk_buff **temp;
 101#define NR_SKB_COMPLETED 16
 102        struct sk_buff *completed[NR_SKB_COMPLETED];
 103        int more;
 104
 105        do {
 106                more = 0;
 107                skb_ptr = completed;
 108
 109                if (__netif_tx_trylock(fifo->txq)) {
 110                        vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
 111                                                NR_SKB_COMPLETED, &more);
 112                        __netif_tx_unlock(fifo->txq);
 113                }
 114
 115                /* free SKBs */
 116                for (temp = completed; temp != skb_ptr; temp++)
 117                        dev_consume_skb_irq(*temp);
 118        } while (more);
 119}
 120
 121static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
 122{
 123        int i;
 124
 125        /* Complete all transmits */
 126        for (i = 0; i < vdev->no_of_vpath; i++)
 127                VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
 128}
 129
 130static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
 131{
 132        int i;
 133        struct vxge_ring *ring;
 134
 135        /* Complete all receives*/
 136        for (i = 0; i < vdev->no_of_vpath; i++) {
 137                ring = &vdev->vpaths[i].ring;
 138                vxge_hw_vpath_poll_rx(ring->handle);
 139        }
 140}
 141
 142/*
 143 * vxge_callback_link_up
 144 *
 145 * This function is called during interrupt context to notify link up state
 146 * change.
 147 */
 148static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
 149{
 150        struct net_device *dev = hldev->ndev;
 151        struct vxgedev *vdev = netdev_priv(dev);
 152
 153        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 154                vdev->ndev->name, __func__, __LINE__);
 155        netdev_notice(vdev->ndev, "Link Up\n");
 156        vdev->stats.link_up++;
 157
 158        netif_carrier_on(vdev->ndev);
 159        netif_tx_wake_all_queues(vdev->ndev);
 160
 161        vxge_debug_entryexit(VXGE_TRACE,
 162                "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
 163}
 164
 165/*
 166 * vxge_callback_link_down
 167 *
 168 * This function is called during interrupt context to notify link down state
 169 * change.
 170 */
 171static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
 172{
 173        struct net_device *dev = hldev->ndev;
 174        struct vxgedev *vdev = netdev_priv(dev);
 175
 176        vxge_debug_entryexit(VXGE_TRACE,
 177                "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
 178        netdev_notice(vdev->ndev, "Link Down\n");
 179
 180        vdev->stats.link_down++;
 181        netif_carrier_off(vdev->ndev);
 182        netif_tx_stop_all_queues(vdev->ndev);
 183
 184        vxge_debug_entryexit(VXGE_TRACE,
 185                "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
 186}
 187
 188/*
 189 * vxge_rx_alloc
 190 *
 191 * Allocate SKB.
 192 */
 193static struct sk_buff *
 194vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
 195{
 196        struct net_device    *dev;
 197        struct sk_buff       *skb;
 198        struct vxge_rx_priv *rx_priv;
 199
 200        dev = ring->ndev;
 201        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 202                ring->ndev->name, __func__, __LINE__);
 203
 204        rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
 205
 206        /* try to allocate skb first. this one may fail */
 207        skb = netdev_alloc_skb(dev, skb_size +
 208        VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
 209        if (skb == NULL) {
 210                vxge_debug_mem(VXGE_ERR,
 211                        "%s: out of memory to allocate SKB", dev->name);
 212                ring->stats.skb_alloc_fail++;
 213                return NULL;
 214        }
 215
 216        vxge_debug_mem(VXGE_TRACE,
 217                "%s: %s:%d  Skb : 0x%p", ring->ndev->name,
 218                __func__, __LINE__, skb);
 219
 220        skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
 221
 222        rx_priv->skb = skb;
 223        rx_priv->skb_data = NULL;
 224        rx_priv->data_size = skb_size;
 225        vxge_debug_entryexit(VXGE_TRACE,
 226                "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
 227
 228        return skb;
 229}
 230
 231/*
 232 * vxge_rx_map
 233 */
 234static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
 235{
 236        struct vxge_rx_priv *rx_priv;
 237        dma_addr_t dma_addr;
 238
 239        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 240                ring->ndev->name, __func__, __LINE__);
 241        rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
 242
 243        rx_priv->skb_data = rx_priv->skb->data;
 244        dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
 245                                  rx_priv->data_size, DMA_FROM_DEVICE);
 246
 247        if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
 248                ring->stats.pci_map_fail++;
 249                return -EIO;
 250        }
 251        vxge_debug_mem(VXGE_TRACE,
 252                "%s: %s:%d  1 buffer mode dma_addr = 0x%llx",
 253                ring->ndev->name, __func__, __LINE__,
 254                (unsigned long long)dma_addr);
 255        vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
 256
 257        rx_priv->data_dma = dma_addr;
 258        vxge_debug_entryexit(VXGE_TRACE,
 259                "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
 260
 261        return 0;
 262}
 263
 264/*
 265 * vxge_rx_initial_replenish
 266 * Allocation of RxD as an initial replenish procedure.
 267 */
 268static enum vxge_hw_status
 269vxge_rx_initial_replenish(void *dtrh, void *userdata)
 270{
 271        struct vxge_ring *ring = (struct vxge_ring *)userdata;
 272        struct vxge_rx_priv *rx_priv;
 273
 274        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 275                ring->ndev->name, __func__, __LINE__);
 276        if (vxge_rx_alloc(dtrh, ring,
 277                          VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
 278                return VXGE_HW_FAIL;
 279
 280        if (vxge_rx_map(dtrh, ring)) {
 281                rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
 282                dev_kfree_skb(rx_priv->skb);
 283
 284                return VXGE_HW_FAIL;
 285        }
 286        vxge_debug_entryexit(VXGE_TRACE,
 287                "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
 288
 289        return VXGE_HW_OK;
 290}
 291
 292static inline void
 293vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
 294                 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
 295{
 296
 297        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 298                        ring->ndev->name, __func__, __LINE__);
 299        skb_record_rx_queue(skb, ring->driver_id);
 300        skb->protocol = eth_type_trans(skb, ring->ndev);
 301
 302        u64_stats_update_begin(&ring->stats.syncp);
 303        ring->stats.rx_frms++;
 304        ring->stats.rx_bytes += pkt_length;
 305
 306        if (skb->pkt_type == PACKET_MULTICAST)
 307                ring->stats.rx_mcast++;
 308        u64_stats_update_end(&ring->stats.syncp);
 309
 310        vxge_debug_rx(VXGE_TRACE,
 311                "%s: %s:%d  skb protocol = %d",
 312                ring->ndev->name, __func__, __LINE__, skb->protocol);
 313
 314        if (ext_info->vlan &&
 315            ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
 316                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
 317        napi_gro_receive(ring->napi_p, skb);
 318
 319        vxge_debug_entryexit(VXGE_TRACE,
 320                "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
 321}
 322
 323static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
 324                                    struct vxge_rx_priv *rx_priv)
 325{
 326        dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
 327                                   rx_priv->data_size, DMA_FROM_DEVICE);
 328
 329        vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
 330        vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
 331}
 332
 333static inline void vxge_post(int *dtr_cnt, void **first_dtr,
 334                             void *post_dtr, struct __vxge_hw_ring *ringh)
 335{
 336        int dtr_count = *dtr_cnt;
 337        if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
 338                if (*first_dtr)
 339                        vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
 340                *first_dtr = post_dtr;
 341        } else
 342                vxge_hw_ring_rxd_post_post(ringh, post_dtr);
 343        dtr_count++;
 344        *dtr_cnt = dtr_count;
 345}
 346
 347/*
 348 * vxge_rx_1b_compl
 349 *
 350 * If the interrupt is because of a received frame or if the receive ring
 351 * contains fresh as yet un-processed frames, this function is called.
 352 */
 353static enum vxge_hw_status
 354vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
 355                 u8 t_code, void *userdata)
 356{
 357        struct vxge_ring *ring = (struct vxge_ring *)userdata;
 358        struct net_device *dev = ring->ndev;
 359        unsigned int dma_sizes;
 360        void *first_dtr = NULL;
 361        int dtr_cnt = 0;
 362        int data_size;
 363        dma_addr_t data_dma;
 364        int pkt_length;
 365        struct sk_buff *skb;
 366        struct vxge_rx_priv *rx_priv;
 367        struct vxge_hw_ring_rxd_info ext_info;
 368        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 369                ring->ndev->name, __func__, __LINE__);
 370
 371        if (ring->budget <= 0)
 372                goto out;
 373
 374        do {
 375                prefetch((char *)dtr + L1_CACHE_BYTES);
 376                rx_priv = vxge_hw_ring_rxd_private_get(dtr);
 377                skb = rx_priv->skb;
 378                data_size = rx_priv->data_size;
 379                data_dma = rx_priv->data_dma;
 380                prefetch(rx_priv->skb_data);
 381
 382                vxge_debug_rx(VXGE_TRACE,
 383                        "%s: %s:%d  skb = 0x%p",
 384                        ring->ndev->name, __func__, __LINE__, skb);
 385
 386                vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
 387                pkt_length = dma_sizes;
 388
 389                pkt_length -= ETH_FCS_LEN;
 390
 391                vxge_debug_rx(VXGE_TRACE,
 392                        "%s: %s:%d  Packet Length = %d",
 393                        ring->ndev->name, __func__, __LINE__, pkt_length);
 394
 395                vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
 396
 397                /* check skb validity */
 398                vxge_assert(skb);
 399
 400                prefetch((char *)skb + L1_CACHE_BYTES);
 401                if (unlikely(t_code)) {
 402                        if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
 403                                VXGE_HW_OK) {
 404
 405                                ring->stats.rx_errors++;
 406                                vxge_debug_rx(VXGE_TRACE,
 407                                        "%s: %s :%d Rx T_code is %d",
 408                                        ring->ndev->name, __func__,
 409                                        __LINE__, t_code);
 410
 411                                /* If the t_code is not supported and if the
 412                                 * t_code is other than 0x5 (unparseable packet
 413                                 * such as unknown UPV6 header), Drop it !!!
 414                                 */
 415                                vxge_re_pre_post(dtr, ring, rx_priv);
 416
 417                                vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
 418                                ring->stats.rx_dropped++;
 419                                continue;
 420                        }
 421                }
 422
 423                if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
 424                        if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
 425                                if (!vxge_rx_map(dtr, ring)) {
 426                                        skb_put(skb, pkt_length);
 427
 428                                        dma_unmap_single(&ring->pdev->dev,
 429                                                         data_dma, data_size,
 430                                                         DMA_FROM_DEVICE);
 431
 432                                        vxge_hw_ring_rxd_pre_post(ringh, dtr);
 433                                        vxge_post(&dtr_cnt, &first_dtr, dtr,
 434                                                ringh);
 435                                } else {
 436                                        dev_kfree_skb(rx_priv->skb);
 437                                        rx_priv->skb = skb;
 438                                        rx_priv->data_size = data_size;
 439                                        vxge_re_pre_post(dtr, ring, rx_priv);
 440
 441                                        vxge_post(&dtr_cnt, &first_dtr, dtr,
 442                                                ringh);
 443                                        ring->stats.rx_dropped++;
 444                                        break;
 445                                }
 446                        } else {
 447                                vxge_re_pre_post(dtr, ring, rx_priv);
 448
 449                                vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
 450                                ring->stats.rx_dropped++;
 451                                break;
 452                        }
 453                } else {
 454                        struct sk_buff *skb_up;
 455
 456                        skb_up = netdev_alloc_skb(dev, pkt_length +
 457                                VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
 458                        if (skb_up != NULL) {
 459                                skb_reserve(skb_up,
 460                                    VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
 461
 462                                dma_sync_single_for_cpu(&ring->pdev->dev,
 463                                                        data_dma, data_size,
 464                                                        DMA_FROM_DEVICE);
 465
 466                                vxge_debug_mem(VXGE_TRACE,
 467                                        "%s: %s:%d  skb_up = %p",
 468                                        ring->ndev->name, __func__,
 469                                        __LINE__, skb);
 470                                memcpy(skb_up->data, skb->data, pkt_length);
 471
 472                                vxge_re_pre_post(dtr, ring, rx_priv);
 473
 474                                vxge_post(&dtr_cnt, &first_dtr, dtr,
 475                                        ringh);
 476                                /* will netif_rx small SKB instead */
 477                                skb = skb_up;
 478                                skb_put(skb, pkt_length);
 479                        } else {
 480                                vxge_re_pre_post(dtr, ring, rx_priv);
 481
 482                                vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
 483                                vxge_debug_rx(VXGE_ERR,
 484                                        "%s: vxge_rx_1b_compl: out of "
 485                                        "memory", dev->name);
 486                                ring->stats.skb_alloc_fail++;
 487                                break;
 488                        }
 489                }
 490
 491                if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
 492                    !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
 493                    (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
 494                    ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
 495                    ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
 496                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 497                else
 498                        skb_checksum_none_assert(skb);
 499
 500
 501                if (ring->rx_hwts) {
 502                        struct skb_shared_hwtstamps *skb_hwts;
 503                        u32 ns = *(u32 *)(skb->head + pkt_length);
 504
 505                        skb_hwts = skb_hwtstamps(skb);
 506                        skb_hwts->hwtstamp = ns_to_ktime(ns);
 507                }
 508
 509                /* rth_hash_type and rth_it_hit are non-zero regardless of
 510                 * whether rss is enabled.  Only the rth_value is zero/non-zero
 511                 * if rss is disabled/enabled, so key off of that.
 512                 */
 513                if (ext_info.rth_value)
 514                        skb_set_hash(skb, ext_info.rth_value,
 515                                     PKT_HASH_TYPE_L3);
 516
 517                vxge_rx_complete(ring, skb, ext_info.vlan,
 518                        pkt_length, &ext_info);
 519
 520                ring->budget--;
 521                ring->pkts_processed++;
 522                if (!ring->budget)
 523                        break;
 524
 525        } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
 526                &t_code) == VXGE_HW_OK);
 527
 528        if (first_dtr)
 529                vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
 530
 531out:
 532        vxge_debug_entryexit(VXGE_TRACE,
 533                                "%s:%d  Exiting...",
 534                                __func__, __LINE__);
 535        return VXGE_HW_OK;
 536}
 537
 538/*
 539 * vxge_xmit_compl
 540 *
 541 * If an interrupt was raised to indicate DMA complete of the Tx packet,
 542 * this function is called. It identifies the last TxD whose buffer was
 543 * freed and frees all skbs whose data have already DMA'ed into the NICs
 544 * internal memory.
 545 */
 546static enum vxge_hw_status
 547vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
 548                enum vxge_hw_fifo_tcode t_code, void *userdata,
 549                struct sk_buff ***skb_ptr, int nr_skb, int *more)
 550{
 551        struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
 552        struct sk_buff *skb, **done_skb = *skb_ptr;
 553        int pkt_cnt = 0;
 554
 555        vxge_debug_entryexit(VXGE_TRACE,
 556                "%s:%d Entered....", __func__, __LINE__);
 557
 558        do {
 559                int frg_cnt;
 560                skb_frag_t *frag;
 561                int i = 0, j;
 562                struct vxge_tx_priv *txd_priv =
 563                        vxge_hw_fifo_txdl_private_get(dtr);
 564
 565                skb = txd_priv->skb;
 566                frg_cnt = skb_shinfo(skb)->nr_frags;
 567                frag = &skb_shinfo(skb)->frags[0];
 568
 569                vxge_debug_tx(VXGE_TRACE,
 570                                "%s: %s:%d fifo_hw = %p dtr = %p "
 571                                "tcode = 0x%x", fifo->ndev->name, __func__,
 572                                __LINE__, fifo_hw, dtr, t_code);
 573                /* check skb validity */
 574                vxge_assert(skb);
 575                vxge_debug_tx(VXGE_TRACE,
 576                        "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
 577                        fifo->ndev->name, __func__, __LINE__,
 578                        skb, txd_priv, frg_cnt);
 579                if (unlikely(t_code)) {
 580                        fifo->stats.tx_errors++;
 581                        vxge_debug_tx(VXGE_ERR,
 582                                "%s: tx: dtr %p completed due to "
 583                                "error t_code %01x", fifo->ndev->name,
 584                                dtr, t_code);
 585                        vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
 586                }
 587
 588                /*  for unfragmented skb */
 589                dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
 590                                 skb_headlen(skb), DMA_TO_DEVICE);
 591
 592                for (j = 0; j < frg_cnt; j++) {
 593                        dma_unmap_page(&fifo->pdev->dev,
 594                                       txd_priv->dma_buffers[i++],
 595                                       skb_frag_size(frag), DMA_TO_DEVICE);
 596                        frag += 1;
 597                }
 598
 599                vxge_hw_fifo_txdl_free(fifo_hw, dtr);
 600
 601                /* Updating the statistics block */
 602                u64_stats_update_begin(&fifo->stats.syncp);
 603                fifo->stats.tx_frms++;
 604                fifo->stats.tx_bytes += skb->len;
 605                u64_stats_update_end(&fifo->stats.syncp);
 606
 607                *done_skb++ = skb;
 608
 609                if (--nr_skb <= 0) {
 610                        *more = 1;
 611                        break;
 612                }
 613
 614                pkt_cnt++;
 615                if (pkt_cnt > fifo->indicate_max_pkts)
 616                        break;
 617
 618        } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
 619                                &dtr, &t_code) == VXGE_HW_OK);
 620
 621        *skb_ptr = done_skb;
 622        if (netif_tx_queue_stopped(fifo->txq))
 623                netif_tx_wake_queue(fifo->txq);
 624
 625        vxge_debug_entryexit(VXGE_TRACE,
 626                                "%s: %s:%d  Exiting...",
 627                                fifo->ndev->name, __func__, __LINE__);
 628        return VXGE_HW_OK;
 629}
 630
 631/* select a vpath to transmit the packet */
 632static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
 633{
 634        u16 queue_len, counter = 0;
 635        if (skb->protocol == htons(ETH_P_IP)) {
 636                struct iphdr *ip;
 637                struct tcphdr *th;
 638
 639                ip = ip_hdr(skb);
 640
 641                if (!ip_is_fragment(ip)) {
 642                        th = (struct tcphdr *)(((unsigned char *)ip) +
 643                                        ip->ihl*4);
 644
 645                        queue_len = vdev->no_of_vpath;
 646                        counter = (ntohs(th->source) +
 647                                ntohs(th->dest)) &
 648                                vdev->vpath_selector[queue_len - 1];
 649                        if (counter >= queue_len)
 650                                counter = queue_len - 1;
 651                }
 652        }
 653        return counter;
 654}
 655
 656static enum vxge_hw_status vxge_search_mac_addr_in_list(
 657        struct vxge_vpath *vpath, u64 del_mac)
 658{
 659        struct list_head *entry, *next;
 660        list_for_each_safe(entry, next, &vpath->mac_addr_list) {
 661                if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
 662                        return TRUE;
 663        }
 664        return FALSE;
 665}
 666
 667static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
 668{
 669        struct vxge_mac_addrs *new_mac_entry;
 670        u8 *mac_address = NULL;
 671
 672        if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
 673                return TRUE;
 674
 675        new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
 676        if (!new_mac_entry) {
 677                vxge_debug_mem(VXGE_ERR,
 678                        "%s: memory allocation failed",
 679                        VXGE_DRIVER_NAME);
 680                return FALSE;
 681        }
 682
 683        list_add(&new_mac_entry->item, &vpath->mac_addr_list);
 684
 685        /* Copy the new mac address to the list */
 686        mac_address = (u8 *)&new_mac_entry->macaddr;
 687        memcpy(mac_address, mac->macaddr, ETH_ALEN);
 688
 689        new_mac_entry->state = mac->state;
 690        vpath->mac_addr_cnt++;
 691
 692        if (is_multicast_ether_addr(mac->macaddr))
 693                vpath->mcast_addr_cnt++;
 694
 695        return TRUE;
 696}
 697
 698/* Add a mac address to DA table */
 699static enum vxge_hw_status
 700vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
 701{
 702        enum vxge_hw_status status = VXGE_HW_OK;
 703        struct vxge_vpath *vpath;
 704        enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
 705
 706        if (is_multicast_ether_addr(mac->macaddr))
 707                duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
 708        else
 709                duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
 710
 711        vpath = &vdev->vpaths[mac->vpath_no];
 712        status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
 713                                                mac->macmask, duplicate_mode);
 714        if (status != VXGE_HW_OK) {
 715                vxge_debug_init(VXGE_ERR,
 716                        "DA config add entry failed for vpath:%d",
 717                        vpath->device_id);
 718        } else
 719                if (FALSE == vxge_mac_list_add(vpath, mac))
 720                        status = -EPERM;
 721
 722        return status;
 723}
 724
 725static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
 726{
 727        struct macInfo mac_info;
 728        u8 *mac_address = NULL;
 729        u64 mac_addr = 0, vpath_vector = 0;
 730        int vpath_idx = 0;
 731        enum vxge_hw_status status = VXGE_HW_OK;
 732        struct vxge_vpath *vpath = NULL;
 733
 734        mac_address = (u8 *)&mac_addr;
 735        memcpy(mac_address, mac_header, ETH_ALEN);
 736
 737        /* Is this mac address already in the list? */
 738        for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
 739                vpath = &vdev->vpaths[vpath_idx];
 740                if (vxge_search_mac_addr_in_list(vpath, mac_addr))
 741                        return vpath_idx;
 742        }
 743
 744        memset(&mac_info, 0, sizeof(struct macInfo));
 745        memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
 746
 747        /* Any vpath has room to add mac address to its da table? */
 748        for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
 749                vpath = &vdev->vpaths[vpath_idx];
 750                if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
 751                        /* Add this mac address to this vpath */
 752                        mac_info.vpath_no = vpath_idx;
 753                        mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
 754                        status = vxge_add_mac_addr(vdev, &mac_info);
 755                        if (status != VXGE_HW_OK)
 756                                return -EPERM;
 757                        return vpath_idx;
 758                }
 759        }
 760
 761        mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
 762        vpath_idx = 0;
 763        mac_info.vpath_no = vpath_idx;
 764        /* Is the first vpath already selected as catch-basin ? */
 765        vpath = &vdev->vpaths[vpath_idx];
 766        if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
 767                /* Add this mac address to this vpath */
 768                if (FALSE == vxge_mac_list_add(vpath, &mac_info))
 769                        return -EPERM;
 770                return vpath_idx;
 771        }
 772
 773        /* Select first vpath as catch-basin */
 774        vpath_vector = vxge_mBIT(vpath->device_id);
 775        status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
 776                                vxge_hw_mgmt_reg_type_mrpcim,
 777                                0,
 778                                (ulong)offsetof(
 779                                        struct vxge_hw_mrpcim_reg,
 780                                        rts_mgr_cbasin_cfg),
 781                                vpath_vector);
 782        if (status != VXGE_HW_OK) {
 783                vxge_debug_tx(VXGE_ERR,
 784                        "%s: Unable to set the vpath-%d in catch-basin mode",
 785                        VXGE_DRIVER_NAME, vpath->device_id);
 786                return -EPERM;
 787        }
 788
 789        if (FALSE == vxge_mac_list_add(vpath, &mac_info))
 790                return -EPERM;
 791
 792        return vpath_idx;
 793}
 794
 795/**
 796 * vxge_xmit
 797 * @skb : the socket buffer containing the Tx data.
 798 * @dev : device pointer.
 799 *
 800 * This function is the Tx entry point of the driver. Neterion NIC supports
 801 * certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
 802*/
 803static netdev_tx_t
 804vxge_xmit(struct sk_buff *skb, struct net_device *dev)
 805{
 806        struct vxge_fifo *fifo = NULL;
 807        void *dtr_priv;
 808        void *dtr = NULL;
 809        struct vxgedev *vdev = NULL;
 810        enum vxge_hw_status status;
 811        int frg_cnt, first_frg_len;
 812        skb_frag_t *frag;
 813        int i = 0, j = 0, avail;
 814        u64 dma_pointer;
 815        struct vxge_tx_priv *txdl_priv = NULL;
 816        struct __vxge_hw_fifo *fifo_hw;
 817        int offload_type;
 818        int vpath_no = 0;
 819
 820        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 821                        dev->name, __func__, __LINE__);
 822
 823        /* A buffer with no data will be dropped */
 824        if (unlikely(skb->len <= 0)) {
 825                vxge_debug_tx(VXGE_ERR,
 826                        "%s: Buffer has no data..", dev->name);
 827                dev_kfree_skb_any(skb);
 828                return NETDEV_TX_OK;
 829        }
 830
 831        vdev = netdev_priv(dev);
 832
 833        if (unlikely(!is_vxge_card_up(vdev))) {
 834                vxge_debug_tx(VXGE_ERR,
 835                        "%s: vdev not initialized", dev->name);
 836                dev_kfree_skb_any(skb);
 837                return NETDEV_TX_OK;
 838        }
 839
 840        if (vdev->config.addr_learn_en) {
 841                vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
 842                if (vpath_no == -EPERM) {
 843                        vxge_debug_tx(VXGE_ERR,
 844                                "%s: Failed to store the mac address",
 845                                dev->name);
 846                        dev_kfree_skb_any(skb);
 847                        return NETDEV_TX_OK;
 848                }
 849        }
 850
 851        if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
 852                vpath_no = skb_get_queue_mapping(skb);
 853        else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
 854                vpath_no = vxge_get_vpath_no(vdev, skb);
 855
 856        vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
 857
 858        if (vpath_no >= vdev->no_of_vpath)
 859                vpath_no = 0;
 860
 861        fifo = &vdev->vpaths[vpath_no].fifo;
 862        fifo_hw = fifo->handle;
 863
 864        if (netif_tx_queue_stopped(fifo->txq))
 865                return NETDEV_TX_BUSY;
 866
 867        avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
 868        if (avail == 0) {
 869                vxge_debug_tx(VXGE_ERR,
 870                        "%s: No free TXDs available", dev->name);
 871                fifo->stats.txd_not_free++;
 872                goto _exit0;
 873        }
 874
 875        /* Last TXD?  Stop tx queue to avoid dropping packets.  TX
 876         * completion will resume the queue.
 877         */
 878        if (avail == 1)
 879                netif_tx_stop_queue(fifo->txq);
 880
 881        status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
 882        if (unlikely(status != VXGE_HW_OK)) {
 883                vxge_debug_tx(VXGE_ERR,
 884                   "%s: Out of descriptors .", dev->name);
 885                fifo->stats.txd_out_of_desc++;
 886                goto _exit0;
 887        }
 888
 889        vxge_debug_tx(VXGE_TRACE,
 890                "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
 891                dev->name, __func__, __LINE__,
 892                fifo_hw, dtr, dtr_priv);
 893
 894        if (skb_vlan_tag_present(skb)) {
 895                u16 vlan_tag = skb_vlan_tag_get(skb);
 896                vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
 897        }
 898
 899        first_frg_len = skb_headlen(skb);
 900
 901        dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
 902                                     first_frg_len, DMA_TO_DEVICE);
 903
 904        if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
 905                vxge_hw_fifo_txdl_free(fifo_hw, dtr);
 906                fifo->stats.pci_map_fail++;
 907                goto _exit0;
 908        }
 909
 910        txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
 911        txdl_priv->skb = skb;
 912        txdl_priv->dma_buffers[j] = dma_pointer;
 913
 914        frg_cnt = skb_shinfo(skb)->nr_frags;
 915        vxge_debug_tx(VXGE_TRACE,
 916                        "%s: %s:%d skb = %p txdl_priv = %p "
 917                        "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
 918                        __func__, __LINE__, skb, txdl_priv,
 919                        frg_cnt, (unsigned long long)dma_pointer);
 920
 921        vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
 922                first_frg_len);
 923
 924        frag = &skb_shinfo(skb)->frags[0];
 925        for (i = 0; i < frg_cnt; i++) {
 926                /* ignore 0 length fragment */
 927                if (!skb_frag_size(frag))
 928                        continue;
 929
 930                dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
 931                                                    0, skb_frag_size(frag),
 932                                                    DMA_TO_DEVICE);
 933
 934                if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
 935                        goto _exit2;
 936                vxge_debug_tx(VXGE_TRACE,
 937                        "%s: %s:%d frag = %d dma_pointer = 0x%llx",
 938                                dev->name, __func__, __LINE__, i,
 939                                (unsigned long long)dma_pointer);
 940
 941                txdl_priv->dma_buffers[j] = dma_pointer;
 942                vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
 943                                        skb_frag_size(frag));
 944                frag += 1;
 945        }
 946
 947        offload_type = vxge_offload_type(skb);
 948
 949        if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
 950                int mss = vxge_tcp_mss(skb);
 951                if (mss) {
 952                        vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
 953                                dev->name, __func__, __LINE__, mss);
 954                        vxge_hw_fifo_txdl_mss_set(dtr, mss);
 955                } else {
 956                        vxge_assert(skb->len <=
 957                                dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
 958                        vxge_assert(0);
 959                        goto _exit1;
 960                }
 961        }
 962
 963        if (skb->ip_summed == CHECKSUM_PARTIAL)
 964                vxge_hw_fifo_txdl_cksum_set_bits(dtr,
 965                                        VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
 966                                        VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
 967                                        VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
 968
 969        vxge_hw_fifo_txdl_post(fifo_hw, dtr);
 970
 971        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d  Exiting...",
 972                dev->name, __func__, __LINE__);
 973        return NETDEV_TX_OK;
 974
 975_exit2:
 976        vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
 977_exit1:
 978        j = 0;
 979        frag = &skb_shinfo(skb)->frags[0];
 980
 981        dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
 982                         skb_headlen(skb), DMA_TO_DEVICE);
 983
 984        for (; j < i; j++) {
 985                dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
 986                               skb_frag_size(frag), DMA_TO_DEVICE);
 987                frag += 1;
 988        }
 989
 990        vxge_hw_fifo_txdl_free(fifo_hw, dtr);
 991_exit0:
 992        netif_tx_stop_queue(fifo->txq);
 993        dev_kfree_skb_any(skb);
 994
 995        return NETDEV_TX_OK;
 996}
 997
 998/*
 999 * vxge_rx_term
1000 *
1001 * Function will be called by hw function to abort all outstanding receive
1002 * descriptors.
1003 */
1004static void
1005vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1006{
1007        struct vxge_ring *ring = (struct vxge_ring *)userdata;
1008        struct vxge_rx_priv *rx_priv =
1009                vxge_hw_ring_rxd_private_get(dtrh);
1010
1011        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1012                        ring->ndev->name, __func__, __LINE__);
1013        if (state != VXGE_HW_RXD_STATE_POSTED)
1014                return;
1015
1016        dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
1017                         rx_priv->data_size, DMA_FROM_DEVICE);
1018
1019        dev_kfree_skb(rx_priv->skb);
1020        rx_priv->skb_data = NULL;
1021
1022        vxge_debug_entryexit(VXGE_TRACE,
1023                "%s: %s:%d  Exiting...",
1024                ring->ndev->name, __func__, __LINE__);
1025}
1026
1027/*
1028 * vxge_tx_term
1029 *
1030 * Function will be called to abort all outstanding tx descriptors
1031 */
1032static void
1033vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1034{
1035        struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1036        skb_frag_t *frag;
1037        int i = 0, j, frg_cnt;
1038        struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1039        struct sk_buff *skb = txd_priv->skb;
1040
1041        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1042
1043        if (state != VXGE_HW_TXDL_STATE_POSTED)
1044                return;
1045
1046        /* check skb validity */
1047        vxge_assert(skb);
1048        frg_cnt = skb_shinfo(skb)->nr_frags;
1049        frag = &skb_shinfo(skb)->frags[0];
1050
1051        /*  for unfragmented skb */
1052        dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
1053                         skb_headlen(skb), DMA_TO_DEVICE);
1054
1055        for (j = 0; j < frg_cnt; j++) {
1056                dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
1057                               skb_frag_size(frag), DMA_TO_DEVICE);
1058                frag += 1;
1059        }
1060
1061        dev_kfree_skb(skb);
1062
1063        vxge_debug_entryexit(VXGE_TRACE,
1064                "%s:%d  Exiting...", __func__, __LINE__);
1065}
1066
1067static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1068{
1069        struct list_head *entry, *next;
1070        u64 del_mac = 0;
1071        u8 *mac_address = (u8 *) (&del_mac);
1072
1073        /* Copy the mac address to delete from the list */
1074        memcpy(mac_address, mac->macaddr, ETH_ALEN);
1075
1076        list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1077                if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1078                        list_del(entry);
1079                        kfree(entry);
1080                        vpath->mac_addr_cnt--;
1081
1082                        if (is_multicast_ether_addr(mac->macaddr))
1083                                vpath->mcast_addr_cnt--;
1084                        return TRUE;
1085                }
1086        }
1087
1088        return FALSE;
1089}
1090
1091/* delete a mac address from DA table */
1092static enum vxge_hw_status
1093vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1094{
1095        enum vxge_hw_status status = VXGE_HW_OK;
1096        struct vxge_vpath *vpath;
1097
1098        vpath = &vdev->vpaths[mac->vpath_no];
1099        status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1100                                                mac->macmask);
1101        if (status != VXGE_HW_OK) {
1102                vxge_debug_init(VXGE_ERR,
1103                        "DA config delete entry failed for vpath:%d",
1104                        vpath->device_id);
1105        } else
1106                vxge_mac_list_del(vpath, mac);
1107        return status;
1108}
1109
1110/**
1111 * vxge_set_multicast
1112 * @dev: pointer to the device structure
1113 *
1114 * Entry point for multicast address enable/disable
1115 * This function is a driver entry point which gets called by the kernel
1116 * whenever multicast addresses must be enabled/disabled. This also gets
1117 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1118 * determine, if multicast address must be enabled or if promiscuous mode
1119 * is to be disabled etc.
1120 */
1121static void vxge_set_multicast(struct net_device *dev)
1122{
1123        struct netdev_hw_addr *ha;
1124        struct vxgedev *vdev;
1125        int i, mcast_cnt = 0;
1126        struct vxge_vpath *vpath;
1127        enum vxge_hw_status status = VXGE_HW_OK;
1128        struct macInfo mac_info;
1129        int vpath_idx = 0;
1130        struct vxge_mac_addrs *mac_entry;
1131        struct list_head *list_head;
1132        struct list_head *entry, *next;
1133        u8 *mac_address = NULL;
1134
1135        vxge_debug_entryexit(VXGE_TRACE,
1136                "%s:%d", __func__, __LINE__);
1137
1138        vdev = netdev_priv(dev);
1139
1140        if (unlikely(!is_vxge_card_up(vdev)))
1141                return;
1142
1143        if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1144                for (i = 0; i < vdev->no_of_vpath; i++) {
1145                        vpath = &vdev->vpaths[i];
1146                        vxge_assert(vpath->is_open);
1147                        status = vxge_hw_vpath_mcast_enable(vpath->handle);
1148                        if (status != VXGE_HW_OK)
1149                                vxge_debug_init(VXGE_ERR, "failed to enable "
1150                                                "multicast, status %d", status);
1151                        vdev->all_multi_flg = 1;
1152                }
1153        } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1154                for (i = 0; i < vdev->no_of_vpath; i++) {
1155                        vpath = &vdev->vpaths[i];
1156                        vxge_assert(vpath->is_open);
1157                        status = vxge_hw_vpath_mcast_disable(vpath->handle);
1158                        if (status != VXGE_HW_OK)
1159                                vxge_debug_init(VXGE_ERR, "failed to disable "
1160                                                "multicast, status %d", status);
1161                        vdev->all_multi_flg = 0;
1162                }
1163        }
1164
1165
1166        if (!vdev->config.addr_learn_en) {
1167                for (i = 0; i < vdev->no_of_vpath; i++) {
1168                        vpath = &vdev->vpaths[i];
1169                        vxge_assert(vpath->is_open);
1170
1171                        if (dev->flags & IFF_PROMISC)
1172                                status = vxge_hw_vpath_promisc_enable(
1173                                        vpath->handle);
1174                        else
1175                                status = vxge_hw_vpath_promisc_disable(
1176                                        vpath->handle);
1177                        if (status != VXGE_HW_OK)
1178                                vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1179                                        ", status %d", dev->flags&IFF_PROMISC ?
1180                                        "enable" : "disable", status);
1181                }
1182        }
1183
1184        memset(&mac_info, 0, sizeof(struct macInfo));
1185        /* Update individual M_CAST address list */
1186        if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1187                mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1188                list_head = &vdev->vpaths[0].mac_addr_list;
1189                if ((netdev_mc_count(dev) +
1190                        (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1191                                vdev->vpaths[0].max_mac_addr_cnt)
1192                        goto _set_all_mcast;
1193
1194                /* Delete previous MC's */
1195                for (i = 0; i < mcast_cnt; i++) {
1196                        list_for_each_safe(entry, next, list_head) {
1197                                mac_entry = (struct vxge_mac_addrs *)entry;
1198                                /* Copy the mac address to delete */
1199                                mac_address = (u8 *)&mac_entry->macaddr;
1200                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1201
1202                                if (is_multicast_ether_addr(mac_info.macaddr)) {
1203                                        for (vpath_idx = 0; vpath_idx <
1204                                                vdev->no_of_vpath;
1205                                                vpath_idx++) {
1206                                                mac_info.vpath_no = vpath_idx;
1207                                                status = vxge_del_mac_addr(
1208                                                                vdev,
1209                                                                &mac_info);
1210                                        }
1211                                }
1212                        }
1213                }
1214
1215                /* Add new ones */
1216                netdev_for_each_mc_addr(ha, dev) {
1217                        memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1218                        for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1219                                        vpath_idx++) {
1220                                mac_info.vpath_no = vpath_idx;
1221                                mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1222                                status = vxge_add_mac_addr(vdev, &mac_info);
1223                                if (status != VXGE_HW_OK) {
1224                                        vxge_debug_init(VXGE_ERR,
1225                                                "%s:%d Setting individual"
1226                                                "multicast address failed",
1227                                                __func__, __LINE__);
1228                                        goto _set_all_mcast;
1229                                }
1230                        }
1231                }
1232
1233                return;
1234_set_all_mcast:
1235                mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1236                /* Delete previous MC's */
1237                for (i = 0; i < mcast_cnt; i++) {
1238                        list_for_each_safe(entry, next, list_head) {
1239                                mac_entry = (struct vxge_mac_addrs *)entry;
1240                                /* Copy the mac address to delete */
1241                                mac_address = (u8 *)&mac_entry->macaddr;
1242                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1243
1244                                if (is_multicast_ether_addr(mac_info.macaddr))
1245                                        break;
1246                        }
1247
1248                        for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1249                                        vpath_idx++) {
1250                                mac_info.vpath_no = vpath_idx;
1251                                status = vxge_del_mac_addr(vdev, &mac_info);
1252                        }
1253                }
1254
1255                /* Enable all multicast */
1256                for (i = 0; i < vdev->no_of_vpath; i++) {
1257                        vpath = &vdev->vpaths[i];
1258                        vxge_assert(vpath->is_open);
1259
1260                        status = vxge_hw_vpath_mcast_enable(vpath->handle);
1261                        if (status != VXGE_HW_OK) {
1262                                vxge_debug_init(VXGE_ERR,
1263                                        "%s:%d Enabling all multicasts failed",
1264                                         __func__, __LINE__);
1265                        }
1266                        vdev->all_multi_flg = 1;
1267                }
1268                dev->flags |= IFF_ALLMULTI;
1269        }
1270
1271        vxge_debug_entryexit(VXGE_TRACE,
1272                "%s:%d  Exiting...", __func__, __LINE__);
1273}
1274
1275/**
1276 * vxge_set_mac_addr
1277 * @dev: pointer to the device structure
1278 * @p: socket info
1279 *
1280 * Update entry "0" (default MAC addr)
1281 */
1282static int vxge_set_mac_addr(struct net_device *dev, void *p)
1283{
1284        struct sockaddr *addr = p;
1285        struct vxgedev *vdev;
1286        enum vxge_hw_status status = VXGE_HW_OK;
1287        struct macInfo mac_info_new, mac_info_old;
1288        int vpath_idx = 0;
1289
1290        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1291
1292        vdev = netdev_priv(dev);
1293
1294        if (!is_valid_ether_addr(addr->sa_data))
1295                return -EINVAL;
1296
1297        memset(&mac_info_new, 0, sizeof(struct macInfo));
1298        memset(&mac_info_old, 0, sizeof(struct macInfo));
1299
1300        vxge_debug_entryexit(VXGE_TRACE, "%s:%d  Exiting...",
1301                __func__, __LINE__);
1302
1303        /* Get the old address */
1304        memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1305
1306        /* Copy the new address */
1307        memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1308
1309        /* First delete the old mac address from all the vpaths
1310        as we can't specify the index while adding new mac address */
1311        for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1312                struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1313                if (!vpath->is_open) {
1314                        /* This can happen when this interface is added/removed
1315                        to the bonding interface. Delete this station address
1316                        from the linked list */
1317                        vxge_mac_list_del(vpath, &mac_info_old);
1318
1319                        /* Add this new address to the linked list
1320                        for later restoring */
1321                        vxge_mac_list_add(vpath, &mac_info_new);
1322
1323                        continue;
1324                }
1325                /* Delete the station address */
1326                mac_info_old.vpath_no = vpath_idx;
1327                status = vxge_del_mac_addr(vdev, &mac_info_old);
1328        }
1329
1330        if (unlikely(!is_vxge_card_up(vdev))) {
1331                memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1332                return VXGE_HW_OK;
1333        }
1334
1335        /* Set this mac address to all the vpaths */
1336        for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1337                mac_info_new.vpath_no = vpath_idx;
1338                mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1339                status = vxge_add_mac_addr(vdev, &mac_info_new);
1340                if (status != VXGE_HW_OK)
1341                        return -EINVAL;
1342        }
1343
1344        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1345
1346        return status;
1347}
1348
1349/*
1350 * vxge_vpath_intr_enable
1351 * @vdev: pointer to vdev
1352 * @vp_id: vpath for which to enable the interrupts
1353 *
1354 * Enables the interrupts for the vpath
1355*/
1356static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1357{
1358        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1359        int msix_id = 0;
1360        int tim_msix_id[4] = {0, 1, 0, 0};
1361        int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1362
1363        vxge_hw_vpath_intr_enable(vpath->handle);
1364
1365        if (vdev->config.intr_type == INTA)
1366                vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1367        else {
1368                vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1369                        alarm_msix_id);
1370
1371                msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1372                vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1373                vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1374
1375                /* enable the alarm vector */
1376                msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1377                        VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1378                vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1379        }
1380}
1381
1382/*
1383 * vxge_vpath_intr_disable
1384 * @vdev: pointer to vdev
1385 * @vp_id: vpath for which to disable the interrupts
1386 *
1387 * Disables the interrupts for the vpath
1388*/
1389static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1390{
1391        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1392        struct __vxge_hw_device *hldev;
1393        int msix_id;
1394
1395        hldev = pci_get_drvdata(vdev->pdev);
1396
1397        vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1398
1399        vxge_hw_vpath_intr_disable(vpath->handle);
1400
1401        if (vdev->config.intr_type == INTA)
1402                vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1403        else {
1404                msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1405                vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1406                vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1407
1408                /* disable the alarm vector */
1409                msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1410                        VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1411                vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1412        }
1413}
1414
1415/* list all mac addresses from DA table */
1416static enum vxge_hw_status
1417vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1418{
1419        enum vxge_hw_status status = VXGE_HW_OK;
1420        unsigned char macmask[ETH_ALEN];
1421        unsigned char macaddr[ETH_ALEN];
1422
1423        status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1424                                macaddr, macmask);
1425        if (status != VXGE_HW_OK) {
1426                vxge_debug_init(VXGE_ERR,
1427                        "DA config list entry failed for vpath:%d",
1428                        vpath->device_id);
1429                return status;
1430        }
1431
1432        while (!ether_addr_equal(mac->macaddr, macaddr)) {
1433                status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1434                                macaddr, macmask);
1435                if (status != VXGE_HW_OK)
1436                        break;
1437        }
1438
1439        return status;
1440}
1441
1442/* Store all mac addresses from the list to the DA table */
1443static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1444{
1445        enum vxge_hw_status status = VXGE_HW_OK;
1446        struct macInfo mac_info;
1447        u8 *mac_address = NULL;
1448        struct list_head *entry, *next;
1449
1450        memset(&mac_info, 0, sizeof(struct macInfo));
1451
1452        if (vpath->is_open) {
1453                list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1454                        mac_address =
1455                                (u8 *)&
1456                                ((struct vxge_mac_addrs *)entry)->macaddr;
1457                        memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1458                        ((struct vxge_mac_addrs *)entry)->state =
1459                                VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1460                        /* does this mac address already exist in da table? */
1461                        status = vxge_search_mac_addr_in_da_table(vpath,
1462                                &mac_info);
1463                        if (status != VXGE_HW_OK) {
1464                                /* Add this mac address to the DA table */
1465                                status = vxge_hw_vpath_mac_addr_add(
1466                                        vpath->handle, mac_info.macaddr,
1467                                        mac_info.macmask,
1468                                    VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1469                                if (status != VXGE_HW_OK) {
1470                                        vxge_debug_init(VXGE_ERR,
1471                                            "DA add entry failed for vpath:%d",
1472                                            vpath->device_id);
1473                                        ((struct vxge_mac_addrs *)entry)->state
1474                                                = VXGE_LL_MAC_ADDR_IN_LIST;
1475                                }
1476                        }
1477                }
1478        }
1479
1480        return status;
1481}
1482
1483/* Store all vlan ids from the list to the vid table */
1484static enum vxge_hw_status
1485vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1486{
1487        enum vxge_hw_status status = VXGE_HW_OK;
1488        struct vxgedev *vdev = vpath->vdev;
1489        u16 vid;
1490
1491        if (!vpath->is_open)
1492                return status;
1493
1494        for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1495                status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1496
1497        return status;
1498}
1499
1500/*
1501 * vxge_reset_vpath
1502 * @vdev: pointer to vdev
1503 * @vp_id: vpath to reset
1504 *
1505 * Resets the vpath
1506*/
1507static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1508{
1509        enum vxge_hw_status status = VXGE_HW_OK;
1510        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1511        int ret = 0;
1512
1513        /* check if device is down already */
1514        if (unlikely(!is_vxge_card_up(vdev)))
1515                return 0;
1516
1517        /* is device reset already scheduled */
1518        if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1519                return 0;
1520
1521        if (vpath->handle) {
1522                if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1523                        if (is_vxge_card_up(vdev) &&
1524                                vxge_hw_vpath_recover_from_reset(vpath->handle)
1525                                        != VXGE_HW_OK) {
1526                                vxge_debug_init(VXGE_ERR,
1527                                        "vxge_hw_vpath_recover_from_reset"
1528                                        "failed for vpath:%d", vp_id);
1529                                return status;
1530                        }
1531                } else {
1532                        vxge_debug_init(VXGE_ERR,
1533                                "vxge_hw_vpath_reset failed for"
1534                                "vpath:%d", vp_id);
1535                        return status;
1536                }
1537        } else
1538                return VXGE_HW_FAIL;
1539
1540        vxge_restore_vpath_mac_addr(vpath);
1541        vxge_restore_vpath_vid_table(vpath);
1542
1543        /* Enable all broadcast */
1544        vxge_hw_vpath_bcast_enable(vpath->handle);
1545
1546        /* Enable all multicast */
1547        if (vdev->all_multi_flg) {
1548                status = vxge_hw_vpath_mcast_enable(vpath->handle);
1549                if (status != VXGE_HW_OK)
1550                        vxge_debug_init(VXGE_ERR,
1551                                "%s:%d Enabling multicast failed",
1552                                __func__, __LINE__);
1553        }
1554
1555        /* Enable the interrupts */
1556        vxge_vpath_intr_enable(vdev, vp_id);
1557
1558        smp_wmb();
1559
1560        /* Enable the flow of traffic through the vpath */
1561        vxge_hw_vpath_enable(vpath->handle);
1562
1563        smp_wmb();
1564        vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1565        vpath->ring.last_status = VXGE_HW_OK;
1566
1567        /* Vpath reset done */
1568        clear_bit(vp_id, &vdev->vp_reset);
1569
1570        /* Start the vpath queue */
1571        if (netif_tx_queue_stopped(vpath->fifo.txq))
1572                netif_tx_wake_queue(vpath->fifo.txq);
1573
1574        return ret;
1575}
1576
1577/* Configure CI */
1578static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1579{
1580        int i = 0;
1581
1582        /* Enable CI for RTI */
1583        if (vdev->config.intr_type == MSI_X) {
1584                for (i = 0; i < vdev->no_of_vpath; i++) {
1585                        struct __vxge_hw_ring *hw_ring;
1586
1587                        hw_ring = vdev->vpaths[i].ring.handle;
1588                        vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1589                }
1590        }
1591
1592        /* Enable CI for TTI */
1593        for (i = 0; i < vdev->no_of_vpath; i++) {
1594                struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1595                vxge_hw_vpath_tti_ci_set(hw_fifo);
1596                /*
1597                 * For Inta (with or without napi), Set CI ON for only one
1598                 * vpath. (Have only one free running timer).
1599                 */
1600                if ((vdev->config.intr_type == INTA) && (i == 0))
1601                        break;
1602        }
1603
1604        return;
1605}
1606
1607static int do_vxge_reset(struct vxgedev *vdev, int event)
1608{
1609        int ret = 0, vp_id, i;
1610
1611        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1612
1613        if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1614                /* check if device is down already */
1615                if (unlikely(!is_vxge_card_up(vdev)))
1616                        return 0;
1617
1618                /* is reset already scheduled */
1619                if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1620                        return 0;
1621        }
1622
1623        if (event == VXGE_LL_FULL_RESET) {
1624                netif_carrier_off(vdev->ndev);
1625
1626                /* wait for all the vpath reset to complete */
1627                for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1628                        while (test_bit(vp_id, &vdev->vp_reset))
1629                                msleep(50);
1630                }
1631
1632                netif_carrier_on(vdev->ndev);
1633
1634                /* if execution mode is set to debug, don't reset the adapter */
1635                if (unlikely(vdev->exec_mode)) {
1636                        vxge_debug_init(VXGE_ERR,
1637                                "%s: execution mode is debug, returning..",
1638                                vdev->ndev->name);
1639                        clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1640                        netif_tx_stop_all_queues(vdev->ndev);
1641                        return 0;
1642                }
1643        }
1644
1645        if (event == VXGE_LL_FULL_RESET) {
1646                vxge_hw_device_wait_receive_idle(vdev->devh);
1647                vxge_hw_device_intr_disable(vdev->devh);
1648
1649                switch (vdev->cric_err_event) {
1650                case VXGE_HW_EVENT_UNKNOWN:
1651                        netif_tx_stop_all_queues(vdev->ndev);
1652                        vxge_debug_init(VXGE_ERR,
1653                                "fatal: %s: Disabling device due to"
1654                                "unknown error",
1655                                vdev->ndev->name);
1656                        ret = -EPERM;
1657                        goto out;
1658                case VXGE_HW_EVENT_RESET_START:
1659                        break;
1660                case VXGE_HW_EVENT_RESET_COMPLETE:
1661                case VXGE_HW_EVENT_LINK_DOWN:
1662                case VXGE_HW_EVENT_LINK_UP:
1663                case VXGE_HW_EVENT_ALARM_CLEARED:
1664                case VXGE_HW_EVENT_ECCERR:
1665                case VXGE_HW_EVENT_MRPCIM_ECCERR:
1666                        ret = -EPERM;
1667                        goto out;
1668                case VXGE_HW_EVENT_FIFO_ERR:
1669                case VXGE_HW_EVENT_VPATH_ERR:
1670                        break;
1671                case VXGE_HW_EVENT_CRITICAL_ERR:
1672                        netif_tx_stop_all_queues(vdev->ndev);
1673                        vxge_debug_init(VXGE_ERR,
1674                                "fatal: %s: Disabling device due to"
1675                                "serious error",
1676                                vdev->ndev->name);
1677                        /* SOP or device reset required */
1678                        /* This event is not currently used */
1679                        ret = -EPERM;
1680                        goto out;
1681                case VXGE_HW_EVENT_SERR:
1682                        netif_tx_stop_all_queues(vdev->ndev);
1683                        vxge_debug_init(VXGE_ERR,
1684                                "fatal: %s: Disabling device due to"
1685                                "serious error",
1686                                vdev->ndev->name);
1687                        ret = -EPERM;
1688                        goto out;
1689                case VXGE_HW_EVENT_SRPCIM_SERR:
1690                case VXGE_HW_EVENT_MRPCIM_SERR:
1691                        ret = -EPERM;
1692                        goto out;
1693                case VXGE_HW_EVENT_SLOT_FREEZE:
1694                        netif_tx_stop_all_queues(vdev->ndev);
1695                        vxge_debug_init(VXGE_ERR,
1696                                "fatal: %s: Disabling device due to"
1697                                "slot freeze",
1698                                vdev->ndev->name);
1699                        ret = -EPERM;
1700                        goto out;
1701                default:
1702                        break;
1703
1704                }
1705        }
1706
1707        if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1708                netif_tx_stop_all_queues(vdev->ndev);
1709
1710        if (event == VXGE_LL_FULL_RESET) {
1711                vxge_reset_all_vpaths(vdev);
1712        }
1713
1714        if (event == VXGE_LL_COMPL_RESET) {
1715                for (i = 0; i < vdev->no_of_vpath; i++)
1716                        if (vdev->vpaths[i].handle) {
1717                                if (vxge_hw_vpath_recover_from_reset(
1718                                        vdev->vpaths[i].handle)
1719                                                != VXGE_HW_OK) {
1720                                        vxge_debug_init(VXGE_ERR,
1721                                                "vxge_hw_vpath_recover_"
1722                                                "from_reset failed for vpath: "
1723                                                "%d", i);
1724                                        ret = -EPERM;
1725                                        goto out;
1726                                }
1727                                } else {
1728                                        vxge_debug_init(VXGE_ERR,
1729                                        "vxge_hw_vpath_reset failed for "
1730                                                "vpath:%d", i);
1731                                        ret = -EPERM;
1732                                        goto out;
1733                                }
1734        }
1735
1736        if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1737                /* Reprogram the DA table with populated mac addresses */
1738                for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1739                        vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1740                        vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1741                }
1742
1743                /* enable vpath interrupts */
1744                for (i = 0; i < vdev->no_of_vpath; i++)
1745                        vxge_vpath_intr_enable(vdev, i);
1746
1747                vxge_hw_device_intr_enable(vdev->devh);
1748
1749                smp_wmb();
1750
1751                /* Indicate card up */
1752                set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1753
1754                /* Get the traffic to flow through the vpaths */
1755                for (i = 0; i < vdev->no_of_vpath; i++) {
1756                        vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1757                        smp_wmb();
1758                        vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1759                }
1760
1761                netif_tx_wake_all_queues(vdev->ndev);
1762        }
1763
1764        /* configure CI */
1765        vxge_config_ci_for_tti_rti(vdev);
1766
1767out:
1768        vxge_debug_entryexit(VXGE_TRACE,
1769                "%s:%d  Exiting...", __func__, __LINE__);
1770
1771        /* Indicate reset done */
1772        if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1773                clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1774        return ret;
1775}
1776
1777/*
1778 * vxge_reset
1779 * @vdev: pointer to ll device
1780 *
1781 * driver may reset the chip on events of serr, eccerr, etc
1782 */
1783static void vxge_reset(struct work_struct *work)
1784{
1785        struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1786
1787        if (!netif_running(vdev->ndev))
1788                return;
1789
1790        do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1791}
1792
1793/**
1794 * vxge_poll_msix - Receive handler when Receive Polling is used.
1795 * @napi: pointer to the napi structure.
1796 * @budget: Number of packets budgeted to be processed in this iteration.
1797 *
1798 * This function comes into picture only if Receive side is being handled
1799 * through polling (called NAPI in linux). It mostly does what the normal
1800 * Rx interrupt handler does in terms of descriptor and packet processing
1801 * but not in an interrupt context. Also it will process a specified number
1802 * of packets at most in one iteration. This value is passed down by the
1803 * kernel as the function argument 'budget'.
1804 */
1805static int vxge_poll_msix(struct napi_struct *napi, int budget)
1806{
1807        struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1808        int pkts_processed;
1809        int budget_org = budget;
1810
1811        ring->budget = budget;
1812        ring->pkts_processed = 0;
1813        vxge_hw_vpath_poll_rx(ring->handle);
1814        pkts_processed = ring->pkts_processed;
1815
1816        if (pkts_processed < budget_org) {
1817                napi_complete_done(napi, pkts_processed);
1818
1819                /* Re enable the Rx interrupts for the vpath */
1820                vxge_hw_channel_msix_unmask(
1821                                (struct __vxge_hw_channel *)ring->handle,
1822                                ring->rx_vector_no);
1823        }
1824
1825        /* We are copying and returning the local variable, in case if after
1826         * clearing the msix interrupt above, if the interrupt fires right
1827         * away which can preempt this NAPI thread */
1828        return pkts_processed;
1829}
1830
1831static int vxge_poll_inta(struct napi_struct *napi, int budget)
1832{
1833        struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1834        int pkts_processed = 0;
1835        int i;
1836        int budget_org = budget;
1837        struct vxge_ring *ring;
1838
1839        struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1840
1841        for (i = 0; i < vdev->no_of_vpath; i++) {
1842                ring = &vdev->vpaths[i].ring;
1843                ring->budget = budget;
1844                ring->pkts_processed = 0;
1845                vxge_hw_vpath_poll_rx(ring->handle);
1846                pkts_processed += ring->pkts_processed;
1847                budget -= ring->pkts_processed;
1848                if (budget <= 0)
1849                        break;
1850        }
1851
1852        VXGE_COMPLETE_ALL_TX(vdev);
1853
1854        if (pkts_processed < budget_org) {
1855                napi_complete_done(napi, pkts_processed);
1856                /* Re enable the Rx interrupts for the ring */
1857                vxge_hw_device_unmask_all(hldev);
1858                vxge_hw_device_flush_io(hldev);
1859        }
1860
1861        return pkts_processed;
1862}
1863
1864#ifdef CONFIG_NET_POLL_CONTROLLER
1865/**
1866 * vxge_netpoll - netpoll event handler entry point
1867 * @dev : pointer to the device structure.
1868 * Description:
1869 *      This function will be called by upper layer to check for events on the
1870 * interface in situations where interrupts are disabled. It is used for
1871 * specific in-kernel networking tasks, such as remote consoles and kernel
1872 * debugging over the network (example netdump in RedHat).
1873 */
1874static void vxge_netpoll(struct net_device *dev)
1875{
1876        struct vxgedev *vdev = netdev_priv(dev);
1877        struct pci_dev *pdev = vdev->pdev;
1878        struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
1879        const int irq = pdev->irq;
1880
1881        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1882
1883        if (pci_channel_offline(pdev))
1884                return;
1885
1886        disable_irq(irq);
1887        vxge_hw_device_clear_tx_rx(hldev);
1888
1889        vxge_hw_device_clear_tx_rx(hldev);
1890        VXGE_COMPLETE_ALL_RX(vdev);
1891        VXGE_COMPLETE_ALL_TX(vdev);
1892
1893        enable_irq(irq);
1894
1895        vxge_debug_entryexit(VXGE_TRACE,
1896                "%s:%d  Exiting...", __func__, __LINE__);
1897}
1898#endif
1899
1900/* RTH configuration */
1901static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1902{
1903        enum vxge_hw_status status = VXGE_HW_OK;
1904        struct vxge_hw_rth_hash_types hash_types;
1905        u8 itable[256] = {0}; /* indirection table */
1906        u8 mtable[256] = {0}; /* CPU to vpath mapping  */
1907        int index;
1908
1909        /*
1910         * Filling
1911         *      - itable with bucket numbers
1912         *      - mtable with bucket-to-vpath mapping
1913         */
1914        for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1915                itable[index] = index;
1916                mtable[index] = index % vdev->no_of_vpath;
1917        }
1918
1919        /* set indirection table, bucket-to-vpath mapping */
1920        status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1921                                                vdev->no_of_vpath,
1922                                                mtable, itable,
1923                                                vdev->config.rth_bkt_sz);
1924        if (status != VXGE_HW_OK) {
1925                vxge_debug_init(VXGE_ERR,
1926                        "RTH indirection table configuration failed "
1927                        "for vpath:%d", vdev->vpaths[0].device_id);
1928                return status;
1929        }
1930
1931        /* Fill RTH hash types */
1932        hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
1933        hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
1934        hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
1935        hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
1936        hash_types.hash_type_tcpipv6ex_en =
1937                                        vdev->config.rth_hash_type_tcpipv6ex;
1938        hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
1939
1940        /*
1941         * Because the itable_set() method uses the active_table field
1942         * for the target virtual path the RTH config should be updated
1943         * for all VPATHs. The h/w only uses the lowest numbered VPATH
1944         * when steering frames.
1945         */
1946        for (index = 0; index < vdev->no_of_vpath; index++) {
1947                status = vxge_hw_vpath_rts_rth_set(
1948                                vdev->vpaths[index].handle,
1949                                vdev->config.rth_algorithm,
1950                                &hash_types,
1951                                vdev->config.rth_bkt_sz);
1952                if (status != VXGE_HW_OK) {
1953                        vxge_debug_init(VXGE_ERR,
1954                                "RTH configuration failed for vpath:%d",
1955                                vdev->vpaths[index].device_id);
1956                        return status;
1957                }
1958        }
1959
1960        return status;
1961}
1962
1963/* reset vpaths */
1964static void vxge_reset_all_vpaths(struct vxgedev *vdev)
1965{
1966        struct vxge_vpath *vpath;
1967        int i;
1968
1969        for (i = 0; i < vdev->no_of_vpath; i++) {
1970                vpath = &vdev->vpaths[i];
1971                if (vpath->handle) {
1972                        if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1973                                if (is_vxge_card_up(vdev) &&
1974                                        vxge_hw_vpath_recover_from_reset(
1975                                                vpath->handle) != VXGE_HW_OK) {
1976                                        vxge_debug_init(VXGE_ERR,
1977                                                "vxge_hw_vpath_recover_"
1978                                                "from_reset failed for vpath: "
1979                                                "%d", i);
1980                                        return;
1981                                }
1982                        } else {
1983                                vxge_debug_init(VXGE_ERR,
1984                                        "vxge_hw_vpath_reset failed for "
1985                                        "vpath:%d", i);
1986                                return;
1987                        }
1988                }
1989        }
1990}
1991
1992/* close vpaths */
1993static void vxge_close_vpaths(struct vxgedev *vdev, int index)
1994{
1995        struct vxge_vpath *vpath;
1996        int i;
1997
1998        for (i = index; i < vdev->no_of_vpath; i++) {
1999                vpath = &vdev->vpaths[i];
2000
2001                if (vpath->handle && vpath->is_open) {
2002                        vxge_hw_vpath_close(vpath->handle);
2003                        vdev->stats.vpaths_open--;
2004                }
2005                vpath->is_open = 0;
2006                vpath->handle = NULL;
2007        }
2008}
2009
2010/* open vpaths */
2011static int vxge_open_vpaths(struct vxgedev *vdev)
2012{
2013        struct vxge_hw_vpath_attr attr;
2014        enum vxge_hw_status status;
2015        struct vxge_vpath *vpath;
2016        u32 vp_id = 0;
2017        int i;
2018
2019        for (i = 0; i < vdev->no_of_vpath; i++) {
2020                vpath = &vdev->vpaths[i];
2021                vxge_assert(vpath->is_configured);
2022
2023                if (!vdev->titan1) {
2024                        struct vxge_hw_vp_config *vcfg;
2025                        vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2026
2027                        vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2028                        vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2029                        vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2030                        vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2031                        vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2032                        vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2033                        vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2034                        vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2035                        vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2036                }
2037
2038                attr.vp_id = vpath->device_id;
2039                attr.fifo_attr.callback = vxge_xmit_compl;
2040                attr.fifo_attr.txdl_term = vxge_tx_term;
2041                attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2042                attr.fifo_attr.userdata = &vpath->fifo;
2043
2044                attr.ring_attr.callback = vxge_rx_1b_compl;
2045                attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2046                attr.ring_attr.rxd_term = vxge_rx_term;
2047                attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2048                attr.ring_attr.userdata = &vpath->ring;
2049
2050                vpath->ring.ndev = vdev->ndev;
2051                vpath->ring.pdev = vdev->pdev;
2052
2053                status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2054                if (status == VXGE_HW_OK) {
2055                        vpath->fifo.handle =
2056                            (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2057                        vpath->ring.handle =
2058                            (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2059                        vpath->fifo.tx_steering_type =
2060                                vdev->config.tx_steering_type;
2061                        vpath->fifo.ndev = vdev->ndev;
2062                        vpath->fifo.pdev = vdev->pdev;
2063
2064                        u64_stats_init(&vpath->fifo.stats.syncp);
2065                        u64_stats_init(&vpath->ring.stats.syncp);
2066
2067                        if (vdev->config.tx_steering_type)
2068                                vpath->fifo.txq =
2069                                        netdev_get_tx_queue(vdev->ndev, i);
2070                        else
2071                                vpath->fifo.txq =
2072                                        netdev_get_tx_queue(vdev->ndev, 0);
2073                        vpath->fifo.indicate_max_pkts =
2074                                vdev->config.fifo_indicate_max_pkts;
2075                        vpath->fifo.tx_vector_no = 0;
2076                        vpath->ring.rx_vector_no = 0;
2077                        vpath->ring.rx_hwts = vdev->rx_hwts;
2078                        vpath->is_open = 1;
2079                        vdev->vp_handles[i] = vpath->handle;
2080                        vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2081                        vdev->stats.vpaths_open++;
2082                } else {
2083                        vdev->stats.vpath_open_fail++;
2084                        vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2085                                        "open with status: %d",
2086                                        vdev->ndev->name, vpath->device_id,
2087                                        status);
2088                        vxge_close_vpaths(vdev, 0);
2089                        return -EPERM;
2090                }
2091
2092                vp_id = vpath->handle->vpath->vp_id;
2093                vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2094        }
2095
2096        return VXGE_HW_OK;
2097}
2098
2099/**
2100 *  adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2101 *  if the interrupts are not within a range
2102 *  @fifo: pointer to transmit fifo structure
2103 *  Description: The function changes boundary timer and restriction timer
2104 *  value depends on the traffic
2105 *  Return Value: None
2106 */
2107static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2108{
2109        fifo->interrupt_count++;
2110        if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
2111                struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2112
2113                fifo->jiffies = jiffies;
2114                if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2115                    hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2116                        hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2117                        vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2118                } else if (hw_fifo->rtimer != 0) {
2119                        hw_fifo->rtimer = 0;
2120                        vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2121                }
2122                fifo->interrupt_count = 0;
2123        }
2124}
2125
2126/**
2127 *  adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2128 *  if the interrupts are not within a range
2129 *  @ring: pointer to receive ring structure
2130 *  Description: The function increases of decreases the packet counts within
2131 *  the ranges of traffic utilization, if the interrupts due to this ring are
2132 *  not within a fixed range.
2133 *  Return Value: Nothing
2134 */
2135static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2136{
2137        ring->interrupt_count++;
2138        if (time_before(ring->jiffies + HZ / 100, jiffies)) {
2139                struct __vxge_hw_ring *hw_ring = ring->handle;
2140
2141                ring->jiffies = jiffies;
2142                if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2143                    hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2144                        hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2145                        vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2146                } else if (hw_ring->rtimer != 0) {
2147                        hw_ring->rtimer = 0;
2148                        vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2149                }
2150                ring->interrupt_count = 0;
2151        }
2152}
2153
2154/*
2155 *  vxge_isr_napi
2156 *  @irq: the irq of the device.
2157 *  @dev_id: a void pointer to the hldev structure of the Titan device
2158 *  @ptregs: pointer to the registers pushed on the stack.
2159 *
2160 *  This function is the ISR handler of the device when napi is enabled. It
2161 *  identifies the reason for the interrupt and calls the relevant service
2162 *  routines.
2163 */
2164static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2165{
2166        struct __vxge_hw_device *hldev;
2167        u64 reason;
2168        enum vxge_hw_status status;
2169        struct vxgedev *vdev = (struct vxgedev *)dev_id;
2170
2171        vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2172
2173        hldev = pci_get_drvdata(vdev->pdev);
2174
2175        if (pci_channel_offline(vdev->pdev))
2176                return IRQ_NONE;
2177
2178        if (unlikely(!is_vxge_card_up(vdev)))
2179                return IRQ_HANDLED;
2180
2181        status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2182        if (status == VXGE_HW_OK) {
2183                vxge_hw_device_mask_all(hldev);
2184
2185                if (reason &
2186                        VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2187                        vdev->vpaths_deployed >>
2188                        (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2189
2190                        vxge_hw_device_clear_tx_rx(hldev);
2191                        napi_schedule(&vdev->napi);
2192                        vxge_debug_intr(VXGE_TRACE,
2193                                "%s:%d  Exiting...", __func__, __LINE__);
2194                        return IRQ_HANDLED;
2195                } else
2196                        vxge_hw_device_unmask_all(hldev);
2197        } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2198                (status == VXGE_HW_ERR_CRITICAL) ||
2199                (status == VXGE_HW_ERR_FIFO))) {
2200                vxge_hw_device_mask_all(hldev);
2201                vxge_hw_device_flush_io(hldev);
2202                return IRQ_HANDLED;
2203        } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2204                return IRQ_HANDLED;
2205
2206        vxge_debug_intr(VXGE_TRACE, "%s:%d  Exiting...", __func__, __LINE__);
2207        return IRQ_NONE;
2208}
2209
2210static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2211{
2212        struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2213
2214        adaptive_coalesce_tx_interrupts(fifo);
2215
2216        vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2217                                  fifo->tx_vector_no);
2218
2219        vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2220                                   fifo->tx_vector_no);
2221
2222        VXGE_COMPLETE_VPATH_TX(fifo);
2223
2224        vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2225                                    fifo->tx_vector_no);
2226
2227        return IRQ_HANDLED;
2228}
2229
2230static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2231{
2232        struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2233
2234        adaptive_coalesce_rx_interrupts(ring);
2235
2236        vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2237                                  ring->rx_vector_no);
2238
2239        vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2240                                   ring->rx_vector_no);
2241
2242        napi_schedule(&ring->napi);
2243        return IRQ_HANDLED;
2244}
2245
2246static irqreturn_t
2247vxge_alarm_msix_handle(int irq, void *dev_id)
2248{
2249        int i;
2250        enum vxge_hw_status status;
2251        struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2252        struct vxgedev *vdev = vpath->vdev;
2253        int msix_id = (vpath->handle->vpath->vp_id *
2254                VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2255
2256        for (i = 0; i < vdev->no_of_vpath; i++) {
2257                /* Reduce the chance of losing alarm interrupts by masking
2258                 * the vector. A pending bit will be set if an alarm is
2259                 * generated and on unmask the interrupt will be fired.
2260                 */
2261                vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2262                vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2263
2264                status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2265                        vdev->exec_mode);
2266                if (status == VXGE_HW_OK) {
2267                        vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2268                                                  msix_id);
2269                        continue;
2270                }
2271                vxge_debug_intr(VXGE_ERR,
2272                        "%s: vxge_hw_vpath_alarm_process failed %x ",
2273                        VXGE_DRIVER_NAME, status);
2274        }
2275        return IRQ_HANDLED;
2276}
2277
2278static int vxge_alloc_msix(struct vxgedev *vdev)
2279{
2280        int j, i, ret = 0;
2281        int msix_intr_vect = 0, temp;
2282        vdev->intr_cnt = 0;
2283
2284start:
2285        /* Tx/Rx MSIX Vectors count */
2286        vdev->intr_cnt = vdev->no_of_vpath * 2;
2287
2288        /* Alarm MSIX Vectors count */
2289        vdev->intr_cnt++;
2290
2291        vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2292                                GFP_KERNEL);
2293        if (!vdev->entries) {
2294                vxge_debug_init(VXGE_ERR,
2295                        "%s: memory allocation failed",
2296                        VXGE_DRIVER_NAME);
2297                ret = -ENOMEM;
2298                goto alloc_entries_failed;
2299        }
2300
2301        vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2302                                     sizeof(struct vxge_msix_entry),
2303                                     GFP_KERNEL);
2304        if (!vdev->vxge_entries) {
2305                vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2306                        VXGE_DRIVER_NAME);
2307                ret = -ENOMEM;
2308                goto alloc_vxge_entries_failed;
2309        }
2310
2311        for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2312
2313                msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2314
2315                /* Initialize the fifo vector */
2316                vdev->entries[j].entry = msix_intr_vect;
2317                vdev->vxge_entries[j].entry = msix_intr_vect;
2318                vdev->vxge_entries[j].in_use = 0;
2319                j++;
2320
2321                /* Initialize the ring vector */
2322                vdev->entries[j].entry = msix_intr_vect + 1;
2323                vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2324                vdev->vxge_entries[j].in_use = 0;
2325                j++;
2326        }
2327
2328        /* Initialize the alarm vector */
2329        vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2330        vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2331        vdev->vxge_entries[j].in_use = 0;
2332
2333        ret = pci_enable_msix_range(vdev->pdev,
2334                                    vdev->entries, 3, vdev->intr_cnt);
2335        if (ret < 0) {
2336                ret = -ENODEV;
2337                goto enable_msix_failed;
2338        } else if (ret < vdev->intr_cnt) {
2339                pci_disable_msix(vdev->pdev);
2340
2341                vxge_debug_init(VXGE_ERR,
2342                        "%s: MSI-X enable failed for %d vectors, ret: %d",
2343                        VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2344                if (max_config_vpath != VXGE_USE_DEFAULT) {
2345                        ret = -ENODEV;
2346                        goto enable_msix_failed;
2347                }
2348
2349                kfree(vdev->entries);
2350                kfree(vdev->vxge_entries);
2351                vdev->entries = NULL;
2352                vdev->vxge_entries = NULL;
2353                /* Try with less no of vector by reducing no of vpaths count */
2354                temp = (ret - 1)/2;
2355                vxge_close_vpaths(vdev, temp);
2356                vdev->no_of_vpath = temp;
2357                goto start;
2358        }
2359        return 0;
2360
2361enable_msix_failed:
2362        kfree(vdev->vxge_entries);
2363alloc_vxge_entries_failed:
2364        kfree(vdev->entries);
2365alloc_entries_failed:
2366        return ret;
2367}
2368
2369static int vxge_enable_msix(struct vxgedev *vdev)
2370{
2371
2372        int i, ret = 0;
2373        /* 0 - Tx, 1 - Rx  */
2374        int tim_msix_id[4] = {0, 1, 0, 0};
2375
2376        vdev->intr_cnt = 0;
2377
2378        /* allocate msix vectors */
2379        ret = vxge_alloc_msix(vdev);
2380        if (!ret) {
2381                for (i = 0; i < vdev->no_of_vpath; i++) {
2382                        struct vxge_vpath *vpath = &vdev->vpaths[i];
2383
2384                        /* If fifo or ring are not enabled, the MSIX vector for
2385                         * it should be set to 0.
2386                         */
2387                        vpath->ring.rx_vector_no = (vpath->device_id *
2388                                                VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2389
2390                        vpath->fifo.tx_vector_no = (vpath->device_id *
2391                                                VXGE_HW_VPATH_MSIX_ACTIVE);
2392
2393                        vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2394                                               VXGE_ALARM_MSIX_ID);
2395                }
2396        }
2397
2398        return ret;
2399}
2400
2401static void vxge_rem_msix_isr(struct vxgedev *vdev)
2402{
2403        int intr_cnt;
2404
2405        for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2406                intr_cnt++) {
2407                if (vdev->vxge_entries[intr_cnt].in_use) {
2408                        synchronize_irq(vdev->entries[intr_cnt].vector);
2409                        free_irq(vdev->entries[intr_cnt].vector,
2410                                vdev->vxge_entries[intr_cnt].arg);
2411                        vdev->vxge_entries[intr_cnt].in_use = 0;
2412                }
2413        }
2414
2415        kfree(vdev->entries);
2416        kfree(vdev->vxge_entries);
2417        vdev->entries = NULL;
2418        vdev->vxge_entries = NULL;
2419
2420        if (vdev->config.intr_type == MSI_X)
2421                pci_disable_msix(vdev->pdev);
2422}
2423
2424static void vxge_rem_isr(struct vxgedev *vdev)
2425{
2426        if (IS_ENABLED(CONFIG_PCI_MSI) &&
2427            vdev->config.intr_type == MSI_X) {
2428                vxge_rem_msix_isr(vdev);
2429        } else if (vdev->config.intr_type == INTA) {
2430                        synchronize_irq(vdev->pdev->irq);
2431                        free_irq(vdev->pdev->irq, vdev);
2432        }
2433}
2434
2435static int vxge_add_isr(struct vxgedev *vdev)
2436{
2437        int ret = 0;
2438        int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2439        int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2440
2441        if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
2442                ret = vxge_enable_msix(vdev);
2443
2444        if (ret) {
2445                vxge_debug_init(VXGE_ERR,
2446                        "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2447                vxge_debug_init(VXGE_ERR,
2448                        "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2449                vdev->config.intr_type = INTA;
2450        }
2451
2452        if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
2453                for (intr_idx = 0;
2454                     intr_idx < (vdev->no_of_vpath *
2455                        VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2456
2457                        msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2458                        irq_req = 0;
2459
2460                        switch (msix_idx) {
2461                        case 0:
2462                                snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2463                                        "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2464                                        vdev->ndev->name,
2465                                        vdev->entries[intr_cnt].entry,
2466                                        pci_fun, vp_idx);
2467                                ret = request_irq(
2468                                        vdev->entries[intr_cnt].vector,
2469                                        vxge_tx_msix_handle, 0,
2470                                        vdev->desc[intr_cnt],
2471                                        &vdev->vpaths[vp_idx].fifo);
2472                                vdev->vxge_entries[intr_cnt].arg =
2473                                                &vdev->vpaths[vp_idx].fifo;
2474                                irq_req = 1;
2475                                break;
2476                        case 1:
2477                                snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2478                                        "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2479                                        vdev->ndev->name,
2480                                        vdev->entries[intr_cnt].entry,
2481                                        pci_fun, vp_idx);
2482                                ret = request_irq(
2483                                        vdev->entries[intr_cnt].vector,
2484                                        vxge_rx_msix_napi_handle, 0,
2485                                        vdev->desc[intr_cnt],
2486                                        &vdev->vpaths[vp_idx].ring);
2487                                vdev->vxge_entries[intr_cnt].arg =
2488                                                &vdev->vpaths[vp_idx].ring;
2489                                irq_req = 1;
2490                                break;
2491                        }
2492
2493                        if (ret) {
2494                                vxge_debug_init(VXGE_ERR,
2495                                        "%s: MSIX - %d  Registration failed",
2496                                        vdev->ndev->name, intr_cnt);
2497                                vxge_rem_msix_isr(vdev);
2498                                vdev->config.intr_type = INTA;
2499                                vxge_debug_init(VXGE_ERR,
2500                                        "%s: Defaulting to INTA",
2501                                        vdev->ndev->name);
2502                                goto INTA_MODE;
2503                        }
2504
2505                        if (irq_req) {
2506                                /* We requested for this msix interrupt */
2507                                vdev->vxge_entries[intr_cnt].in_use = 1;
2508                                msix_idx +=  vdev->vpaths[vp_idx].device_id *
2509                                        VXGE_HW_VPATH_MSIX_ACTIVE;
2510                                vxge_hw_vpath_msix_unmask(
2511                                        vdev->vpaths[vp_idx].handle,
2512                                        msix_idx);
2513                                intr_cnt++;
2514                        }
2515
2516                        /* Point to next vpath handler */
2517                        if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2518                            (vp_idx < (vdev->no_of_vpath - 1)))
2519                                vp_idx++;
2520                }
2521
2522                intr_cnt = vdev->no_of_vpath * 2;
2523                snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2524                        "%s:vxge:MSI-X %d - Alarm - fn:%d",
2525                        vdev->ndev->name,
2526                        vdev->entries[intr_cnt].entry,
2527                        pci_fun);
2528                /* For Alarm interrupts */
2529                ret = request_irq(vdev->entries[intr_cnt].vector,
2530                                        vxge_alarm_msix_handle, 0,
2531                                        vdev->desc[intr_cnt],
2532                                        &vdev->vpaths[0]);
2533                if (ret) {
2534                        vxge_debug_init(VXGE_ERR,
2535                                "%s: MSIX - %d Registration failed",
2536                                vdev->ndev->name, intr_cnt);
2537                        vxge_rem_msix_isr(vdev);
2538                        vdev->config.intr_type = INTA;
2539                        vxge_debug_init(VXGE_ERR,
2540                                "%s: Defaulting to INTA",
2541                                vdev->ndev->name);
2542                        goto INTA_MODE;
2543                }
2544
2545                msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2546                        VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2547                vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2548                                        msix_idx);
2549                vdev->vxge_entries[intr_cnt].in_use = 1;
2550                vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2551        }
2552
2553INTA_MODE:
2554        if (vdev->config.intr_type == INTA) {
2555                snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2556                        "%s:vxge:INTA", vdev->ndev->name);
2557                vxge_hw_device_set_intr_type(vdev->devh,
2558                        VXGE_HW_INTR_MODE_IRQLINE);
2559
2560                vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2561
2562                ret = request_irq((int) vdev->pdev->irq,
2563                        vxge_isr_napi,
2564                        IRQF_SHARED, vdev->desc[0], vdev);
2565                if (ret) {
2566                        vxge_debug_init(VXGE_ERR,
2567                                "%s %s-%d: ISR registration failed",
2568                                VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2569                        return -ENODEV;
2570                }
2571                vxge_debug_init(VXGE_TRACE,
2572                        "new %s-%d line allocated",
2573                        "IRQ", vdev->pdev->irq);
2574        }
2575
2576        return VXGE_HW_OK;
2577}
2578
2579static void vxge_poll_vp_reset(struct timer_list *t)
2580{
2581        struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
2582        int i, j = 0;
2583
2584        for (i = 0; i < vdev->no_of_vpath; i++) {
2585                if (test_bit(i, &vdev->vp_reset)) {
2586                        vxge_reset_vpath(vdev, i);
2587                        j++;
2588                }
2589        }
2590        if (j && (vdev->config.intr_type != MSI_X)) {
2591                vxge_hw_device_unmask_all(vdev->devh);
2592                vxge_hw_device_flush_io(vdev->devh);
2593        }
2594
2595        mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2596}
2597
2598static void vxge_poll_vp_lockup(struct timer_list *t)
2599{
2600        struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
2601        enum vxge_hw_status status = VXGE_HW_OK;
2602        struct vxge_vpath *vpath;
2603        struct vxge_ring *ring;
2604        int i;
2605        unsigned long rx_frms;
2606
2607        for (i = 0; i < vdev->no_of_vpath; i++) {
2608                ring = &vdev->vpaths[i].ring;
2609
2610                /* Truncated to machine word size number of frames */
2611                rx_frms = READ_ONCE(ring->stats.rx_frms);
2612
2613                /* Did this vpath received any packets */
2614                if (ring->stats.prev_rx_frms == rx_frms) {
2615                        status = vxge_hw_vpath_check_leak(ring->handle);
2616
2617                        /* Did it received any packets last time */
2618                        if ((VXGE_HW_FAIL == status) &&
2619                                (VXGE_HW_FAIL == ring->last_status)) {
2620
2621                                /* schedule vpath reset */
2622                                if (!test_and_set_bit(i, &vdev->vp_reset)) {
2623                                        vpath = &vdev->vpaths[i];
2624
2625                                        /* disable interrupts for this vpath */
2626                                        vxge_vpath_intr_disable(vdev, i);
2627
2628                                        /* stop the queue for this vpath */
2629                                        netif_tx_stop_queue(vpath->fifo.txq);
2630                                        continue;
2631                                }
2632                        }
2633                }
2634                ring->stats.prev_rx_frms = rx_frms;
2635                ring->last_status = status;
2636        }
2637
2638        /* Check every 1 milli second */
2639        mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2640}
2641
2642static netdev_features_t vxge_fix_features(struct net_device *dev,
2643        netdev_features_t features)
2644{
2645        netdev_features_t changed = dev->features ^ features;
2646
2647        /* Enabling RTH requires some of the logic in vxge_device_register and a
2648         * vpath reset.  Due to these restrictions, only allow modification
2649         * while the interface is down.
2650         */
2651        if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2652                features ^= NETIF_F_RXHASH;
2653
2654        return features;
2655}
2656
2657static int vxge_set_features(struct net_device *dev, netdev_features_t features)
2658{
2659        struct vxgedev *vdev = netdev_priv(dev);
2660        netdev_features_t changed = dev->features ^ features;
2661
2662        if (!(changed & NETIF_F_RXHASH))
2663                return 0;
2664
2665        /* !netif_running() ensured by vxge_fix_features() */
2666
2667        vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2668        vxge_reset_all_vpaths(vdev);
2669
2670        return 0;
2671}
2672
2673/**
2674 * vxge_open
2675 * @dev: pointer to the device structure.
2676 *
2677 * This function is the open entry point of the driver. It mainly calls a
2678 * function to allocate Rx buffers and inserts them into the buffer
2679 * descriptors and then enables the Rx part of the NIC.
2680 * Return value: '0' on success and an appropriate (-)ve integer as
2681 * defined in errno.h file on failure.
2682 */
2683static int vxge_open(struct net_device *dev)
2684{
2685        enum vxge_hw_status status;
2686        struct vxgedev *vdev;
2687        struct __vxge_hw_device *hldev;
2688        struct vxge_vpath *vpath;
2689        int ret = 0;
2690        int i;
2691        u64 val64;
2692
2693        vxge_debug_entryexit(VXGE_TRACE,
2694                "%s: %s:%d", dev->name, __func__, __LINE__);
2695
2696        vdev = netdev_priv(dev);
2697        hldev = pci_get_drvdata(vdev->pdev);
2698
2699        /* make sure you have link off by default every time Nic is
2700         * initialized */
2701        netif_carrier_off(dev);
2702
2703        /* Open VPATHs */
2704        status = vxge_open_vpaths(vdev);
2705        if (status != VXGE_HW_OK) {
2706                vxge_debug_init(VXGE_ERR,
2707                        "%s: fatal: Vpath open failed", vdev->ndev->name);
2708                ret = -EPERM;
2709                goto out0;
2710        }
2711
2712        vdev->mtu = dev->mtu;
2713
2714        status = vxge_add_isr(vdev);
2715        if (status != VXGE_HW_OK) {
2716                vxge_debug_init(VXGE_ERR,
2717                        "%s: fatal: ISR add failed", dev->name);
2718                ret = -EPERM;
2719                goto out1;
2720        }
2721
2722        if (vdev->config.intr_type != MSI_X) {
2723                netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2724                        vdev->config.napi_weight);
2725                napi_enable(&vdev->napi);
2726                for (i = 0; i < vdev->no_of_vpath; i++) {
2727                        vpath = &vdev->vpaths[i];
2728                        vpath->ring.napi_p = &vdev->napi;
2729                }
2730        } else {
2731                for (i = 0; i < vdev->no_of_vpath; i++) {
2732                        vpath = &vdev->vpaths[i];
2733                        netif_napi_add(dev, &vpath->ring.napi,
2734                            vxge_poll_msix, vdev->config.napi_weight);
2735                        napi_enable(&vpath->ring.napi);
2736                        vpath->ring.napi_p = &vpath->ring.napi;
2737                }
2738        }
2739
2740        /* configure RTH */
2741        if (vdev->config.rth_steering) {
2742                status = vxge_rth_configure(vdev);
2743                if (status != VXGE_HW_OK) {
2744                        vxge_debug_init(VXGE_ERR,
2745                                "%s: fatal: RTH configuration failed",
2746                                dev->name);
2747                        ret = -EPERM;
2748                        goto out2;
2749                }
2750        }
2751        printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2752               hldev->config.rth_en ? "enabled" : "disabled");
2753
2754        for (i = 0; i < vdev->no_of_vpath; i++) {
2755                vpath = &vdev->vpaths[i];
2756
2757                /* set initial mtu before enabling the device */
2758                status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2759                if (status != VXGE_HW_OK) {
2760                        vxge_debug_init(VXGE_ERR,
2761                                "%s: fatal: can not set new MTU", dev->name);
2762                        ret = -EPERM;
2763                        goto out2;
2764                }
2765        }
2766
2767        VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2768        vxge_debug_init(vdev->level_trace,
2769                "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2770        VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2771
2772        /* Restore the DA, VID table and also multicast and promiscuous mode
2773         * states
2774         */
2775        if (vdev->all_multi_flg) {
2776                for (i = 0; i < vdev->no_of_vpath; i++) {
2777                        vpath = &vdev->vpaths[i];
2778                        vxge_restore_vpath_mac_addr(vpath);
2779                        vxge_restore_vpath_vid_table(vpath);
2780
2781                        status = vxge_hw_vpath_mcast_enable(vpath->handle);
2782                        if (status != VXGE_HW_OK)
2783                                vxge_debug_init(VXGE_ERR,
2784                                        "%s:%d Enabling multicast failed",
2785                                        __func__, __LINE__);
2786                }
2787        }
2788
2789        /* Enable vpath to sniff all unicast/multicast traffic that not
2790         * addressed to them. We allow promiscuous mode for PF only
2791         */
2792
2793        val64 = 0;
2794        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2795                val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2796
2797        vxge_hw_mgmt_reg_write(vdev->devh,
2798                vxge_hw_mgmt_reg_type_mrpcim,
2799                0,
2800                (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2801                        rxmac_authorize_all_addr),
2802                val64);
2803
2804        vxge_hw_mgmt_reg_write(vdev->devh,
2805                vxge_hw_mgmt_reg_type_mrpcim,
2806                0,
2807                (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2808                        rxmac_authorize_all_vid),
2809                val64);
2810
2811        vxge_set_multicast(dev);
2812
2813        /* Enabling Bcast and mcast for all vpath */
2814        for (i = 0; i < vdev->no_of_vpath; i++) {
2815                vpath = &vdev->vpaths[i];
2816                status = vxge_hw_vpath_bcast_enable(vpath->handle);
2817                if (status != VXGE_HW_OK)
2818                        vxge_debug_init(VXGE_ERR,
2819                                "%s : Can not enable bcast for vpath "
2820                                "id %d", dev->name, i);
2821                if (vdev->config.addr_learn_en) {
2822                        status = vxge_hw_vpath_mcast_enable(vpath->handle);
2823                        if (status != VXGE_HW_OK)
2824                                vxge_debug_init(VXGE_ERR,
2825                                        "%s : Can not enable mcast for vpath "
2826                                        "id %d", dev->name, i);
2827                }
2828        }
2829
2830        vxge_hw_device_setpause_data(vdev->devh, 0,
2831                vdev->config.tx_pause_enable,
2832                vdev->config.rx_pause_enable);
2833
2834        if (vdev->vp_reset_timer.function == NULL)
2835                vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
2836                              HZ / 2);
2837
2838        /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2839        if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2840                vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
2841                              HZ / 2);
2842
2843        set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2844
2845        smp_wmb();
2846
2847        if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2848                netif_carrier_on(vdev->ndev);
2849                netdev_notice(vdev->ndev, "Link Up\n");
2850                vdev->stats.link_up++;
2851        }
2852
2853        vxge_hw_device_intr_enable(vdev->devh);
2854
2855        smp_wmb();
2856
2857        for (i = 0; i < vdev->no_of_vpath; i++) {
2858                vpath = &vdev->vpaths[i];
2859
2860                vxge_hw_vpath_enable(vpath->handle);
2861                smp_wmb();
2862                vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2863        }
2864
2865        netif_tx_start_all_queues(vdev->ndev);
2866
2867        /* configure CI */
2868        vxge_config_ci_for_tti_rti(vdev);
2869
2870        goto out0;
2871
2872out2:
2873        vxge_rem_isr(vdev);
2874
2875        /* Disable napi */
2876        if (vdev->config.intr_type != MSI_X)
2877                napi_disable(&vdev->napi);
2878        else {
2879                for (i = 0; i < vdev->no_of_vpath; i++)
2880                        napi_disable(&vdev->vpaths[i].ring.napi);
2881        }
2882
2883out1:
2884        vxge_close_vpaths(vdev, 0);
2885out0:
2886        vxge_debug_entryexit(VXGE_TRACE,
2887                                "%s: %s:%d  Exiting...",
2888                                dev->name, __func__, __LINE__);
2889        return ret;
2890}
2891
2892/* Loop through the mac address list and delete all the entries */
2893static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2894{
2895
2896        struct list_head *entry, *next;
2897        if (list_empty(&vpath->mac_addr_list))
2898                return;
2899
2900        list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2901                list_del(entry);
2902                kfree(entry);
2903        }
2904}
2905
2906static void vxge_napi_del_all(struct vxgedev *vdev)
2907{
2908        int i;
2909        if (vdev->config.intr_type != MSI_X)
2910                netif_napi_del(&vdev->napi);
2911        else {
2912                for (i = 0; i < vdev->no_of_vpath; i++)
2913                        netif_napi_del(&vdev->vpaths[i].ring.napi);
2914        }
2915}
2916
2917static int do_vxge_close(struct net_device *dev, int do_io)
2918{
2919        enum vxge_hw_status status;
2920        struct vxgedev *vdev;
2921        struct __vxge_hw_device *hldev;
2922        int i;
2923        u64 val64, vpath_vector;
2924        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2925                dev->name, __func__, __LINE__);
2926
2927        vdev = netdev_priv(dev);
2928        hldev = pci_get_drvdata(vdev->pdev);
2929
2930        if (unlikely(!is_vxge_card_up(vdev)))
2931                return 0;
2932
2933        /* If vxge_handle_crit_err task is executing,
2934         * wait till it completes. */
2935        while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2936                msleep(50);
2937
2938        if (do_io) {
2939                /* Put the vpath back in normal mode */
2940                vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2941                status = vxge_hw_mgmt_reg_read(vdev->devh,
2942                                vxge_hw_mgmt_reg_type_mrpcim,
2943                                0,
2944                                (ulong)offsetof(
2945                                        struct vxge_hw_mrpcim_reg,
2946                                        rts_mgr_cbasin_cfg),
2947                                &val64);
2948                if (status == VXGE_HW_OK) {
2949                        val64 &= ~vpath_vector;
2950                        status = vxge_hw_mgmt_reg_write(vdev->devh,
2951                                        vxge_hw_mgmt_reg_type_mrpcim,
2952                                        0,
2953                                        (ulong)offsetof(
2954                                                struct vxge_hw_mrpcim_reg,
2955                                                rts_mgr_cbasin_cfg),
2956                                        val64);
2957                }
2958
2959                /* Remove the function 0 from promiscuous mode */
2960                vxge_hw_mgmt_reg_write(vdev->devh,
2961                        vxge_hw_mgmt_reg_type_mrpcim,
2962                        0,
2963                        (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2964                                rxmac_authorize_all_addr),
2965                        0);
2966
2967                vxge_hw_mgmt_reg_write(vdev->devh,
2968                        vxge_hw_mgmt_reg_type_mrpcim,
2969                        0,
2970                        (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2971                                rxmac_authorize_all_vid),
2972                        0);
2973
2974                smp_wmb();
2975        }
2976
2977        if (vdev->titan1)
2978                del_timer_sync(&vdev->vp_lockup_timer);
2979
2980        del_timer_sync(&vdev->vp_reset_timer);
2981
2982        if (do_io)
2983                vxge_hw_device_wait_receive_idle(hldev);
2984
2985        clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2986
2987        /* Disable napi */
2988        if (vdev->config.intr_type != MSI_X)
2989                napi_disable(&vdev->napi);
2990        else {
2991                for (i = 0; i < vdev->no_of_vpath; i++)
2992                        napi_disable(&vdev->vpaths[i].ring.napi);
2993        }
2994
2995        netif_carrier_off(vdev->ndev);
2996        netdev_notice(vdev->ndev, "Link Down\n");
2997        netif_tx_stop_all_queues(vdev->ndev);
2998
2999        /* Note that at this point xmit() is stopped by upper layer */
3000        if (do_io)
3001                vxge_hw_device_intr_disable(vdev->devh);
3002
3003        vxge_rem_isr(vdev);
3004
3005        vxge_napi_del_all(vdev);
3006
3007        if (do_io)
3008                vxge_reset_all_vpaths(vdev);
3009
3010        vxge_close_vpaths(vdev, 0);
3011
3012        vxge_debug_entryexit(VXGE_TRACE,
3013                "%s: %s:%d  Exiting...", dev->name, __func__, __LINE__);
3014
3015        clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3016
3017        return 0;
3018}
3019
3020/**
3021 * vxge_close
3022 * @dev: device pointer.
3023 *
3024 * This is the stop entry point of the driver. It needs to undo exactly
3025 * whatever was done by the open entry point, thus it's usually referred to
3026 * as the close function.Among other things this function mainly stops the
3027 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3028 * Return value: '0' on success and an appropriate (-)ve integer as
3029 * defined in errno.h file on failure.
3030 */
3031static int vxge_close(struct net_device *dev)
3032{
3033        do_vxge_close(dev, 1);
3034        return 0;
3035}
3036
3037/**
3038 * vxge_change_mtu
3039 * @dev: net device pointer.
3040 * @new_mtu :the new MTU size for the device.
3041 *
3042 * A driver entry point to change MTU size for the device. Before changing
3043 * the MTU the device must be stopped.
3044 */
3045static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3046{
3047        struct vxgedev *vdev = netdev_priv(dev);
3048
3049        vxge_debug_entryexit(vdev->level_trace,
3050                "%s:%d", __func__, __LINE__);
3051
3052        /* check if device is down already */
3053        if (unlikely(!is_vxge_card_up(vdev))) {
3054                /* just store new value, will use later on open() */
3055                dev->mtu = new_mtu;
3056                vxge_debug_init(vdev->level_err,
3057                        "%s", "device is down on MTU change");
3058                return 0;
3059        }
3060
3061        vxge_debug_init(vdev->level_trace,
3062                "trying to apply new MTU %d", new_mtu);
3063
3064        if (vxge_close(dev))
3065                return -EIO;
3066
3067        dev->mtu = new_mtu;
3068        vdev->mtu = new_mtu;
3069
3070        if (vxge_open(dev))
3071                return -EIO;
3072
3073        vxge_debug_init(vdev->level_trace,
3074                "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3075
3076        vxge_debug_entryexit(vdev->level_trace,
3077                "%s:%d  Exiting...", __func__, __LINE__);
3078
3079        return 0;
3080}
3081
3082/**
3083 * vxge_get_stats64
3084 * @dev: pointer to the device structure
3085 * @net_stats: pointer to struct rtnl_link_stats64
3086 *
3087 */
3088static void
3089vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3090{
3091        struct vxgedev *vdev = netdev_priv(dev);
3092        int k;
3093
3094        /* net_stats already zeroed by caller */
3095        for (k = 0; k < vdev->no_of_vpath; k++) {
3096                struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3097                struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3098                unsigned int start;
3099                u64 packets, bytes, multicast;
3100
3101                do {
3102                        start = u64_stats_fetch_begin_irq(&rxstats->syncp);
3103
3104                        packets   = rxstats->rx_frms;
3105                        multicast = rxstats->rx_mcast;
3106                        bytes     = rxstats->rx_bytes;
3107                } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
3108
3109                net_stats->rx_packets += packets;
3110                net_stats->rx_bytes += bytes;
3111                net_stats->multicast += multicast;
3112
3113                net_stats->rx_errors += rxstats->rx_errors;
3114                net_stats->rx_dropped += rxstats->rx_dropped;
3115
3116                do {
3117                        start = u64_stats_fetch_begin_irq(&txstats->syncp);
3118
3119                        packets = txstats->tx_frms;
3120                        bytes   = txstats->tx_bytes;
3121                } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
3122
3123                net_stats->tx_packets += packets;
3124                net_stats->tx_bytes += bytes;
3125                net_stats->tx_errors += txstats->tx_errors;
3126        }
3127}
3128
3129static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3130{
3131        enum vxge_hw_status status;
3132        u64 val64;
3133
3134        /* Timestamp is passed to the driver via the FCS, therefore we
3135         * must disable the FCS stripping by the adapter.  Since this is
3136         * required for the driver to load (due to a hardware bug),
3137         * there is no need to do anything special here.
3138         */
3139        val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3140                VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3141                VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3142
3143        status = vxge_hw_mgmt_reg_write(devh,
3144                                        vxge_hw_mgmt_reg_type_mrpcim,
3145                                        0,
3146                                        offsetof(struct vxge_hw_mrpcim_reg,
3147                                                 xmac_timestamp),
3148                                        val64);
3149        vxge_hw_device_flush_io(devh);
3150        devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3151        return status;
3152}
3153
3154static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
3155{
3156        struct hwtstamp_config config;
3157        int i;
3158
3159        if (copy_from_user(&config, data, sizeof(config)))
3160                return -EFAULT;
3161
3162        /* reserved for future extensions */
3163        if (config.flags)
3164                return -EINVAL;
3165
3166        /* Transmit HW Timestamp not supported */
3167        switch (config.tx_type) {
3168        case HWTSTAMP_TX_OFF:
3169                break;
3170        case HWTSTAMP_TX_ON:
3171        default:
3172                return -ERANGE;
3173        }
3174
3175        switch (config.rx_filter) {
3176        case HWTSTAMP_FILTER_NONE:
3177                vdev->rx_hwts = 0;
3178                config.rx_filter = HWTSTAMP_FILTER_NONE;
3179                break;
3180
3181        case HWTSTAMP_FILTER_ALL:
3182        case HWTSTAMP_FILTER_SOME:
3183        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3184        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3185        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3186        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3187        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3188        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3189        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3190        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3191        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3192        case HWTSTAMP_FILTER_PTP_V2_EVENT:
3193        case HWTSTAMP_FILTER_PTP_V2_SYNC:
3194        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3195        case HWTSTAMP_FILTER_NTP_ALL:
3196                if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3197                        return -EFAULT;
3198
3199                vdev->rx_hwts = 1;
3200                config.rx_filter = HWTSTAMP_FILTER_ALL;
3201                break;
3202
3203        default:
3204                 return -ERANGE;
3205        }
3206
3207        for (i = 0; i < vdev->no_of_vpath; i++)
3208                vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3209
3210        if (copy_to_user(data, &config, sizeof(config)))
3211                return -EFAULT;
3212
3213        return 0;
3214}
3215
3216static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
3217{
3218        struct hwtstamp_config config;
3219
3220        config.flags = 0;
3221        config.tx_type = HWTSTAMP_TX_OFF;
3222        config.rx_filter = (vdev->rx_hwts ?
3223                            HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
3224
3225        if (copy_to_user(data, &config, sizeof(config)))
3226                return -EFAULT;
3227
3228        return 0;
3229}
3230
3231/**
3232 * vxge_ioctl
3233 * @dev: Device pointer.
3234 * @rq: An IOCTL specific structure, that can contain a pointer to
3235 *       a proprietary structure used to pass information to the driver.
3236 * @cmd: This is used to distinguish between the different commands that
3237 *       can be passed to the IOCTL functions.
3238 *
3239 * Entry point for the Ioctl.
3240 */
3241static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3242{
3243        struct vxgedev *vdev = netdev_priv(dev);
3244
3245        switch (cmd) {
3246        case SIOCSHWTSTAMP:
3247                return vxge_hwtstamp_set(vdev, rq->ifr_data);
3248        case SIOCGHWTSTAMP:
3249                return vxge_hwtstamp_get(vdev, rq->ifr_data);
3250        default:
3251                return -EOPNOTSUPP;
3252        }
3253}
3254
3255/**
3256 * vxge_tx_watchdog
3257 * @dev: pointer to net device structure
3258 * @txqueue: index of the hanging queue
3259 *
3260 * Watchdog for transmit side.
3261 * This function is triggered if the Tx Queue is stopped
3262 * for a pre-defined amount of time when the Interface is still up.
3263 */
3264static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue)
3265{
3266        struct vxgedev *vdev;
3267
3268        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3269
3270        vdev = netdev_priv(dev);
3271
3272        vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3273
3274        schedule_work(&vdev->reset_task);
3275        vxge_debug_entryexit(VXGE_TRACE,
3276                "%s:%d  Exiting...", __func__, __LINE__);
3277}
3278
3279/**
3280 * vxge_vlan_rx_add_vid
3281 * @dev: net device pointer.
3282 * @proto: vlan protocol
3283 * @vid: vid
3284 *
3285 * Add the vlan id to the devices vlan id table
3286 */
3287static int
3288vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3289{
3290        struct vxgedev *vdev = netdev_priv(dev);
3291        struct vxge_vpath *vpath;
3292        int vp_id;
3293
3294        /* Add these vlan to the vid table */
3295        for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3296                vpath = &vdev->vpaths[vp_id];
3297                if (!vpath->is_open)
3298                        continue;
3299                vxge_hw_vpath_vid_add(vpath->handle, vid);
3300        }
3301        set_bit(vid, vdev->active_vlans);
3302        return 0;
3303}
3304
3305/**
3306 * vxge_vlan_rx_kill_vid
3307 * @dev: net device pointer.
3308 * @proto: vlan protocol
3309 * @vid: vid
3310 *
3311 * Remove the vlan id from the device's vlan id table
3312 */
3313static int
3314vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3315{
3316        struct vxgedev *vdev = netdev_priv(dev);
3317        struct vxge_vpath *vpath;
3318        int vp_id;
3319
3320        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3321
3322        /* Delete this vlan from the vid table */
3323        for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3324                vpath = &vdev->vpaths[vp_id];
3325                if (!vpath->is_open)
3326                        continue;
3327                vxge_hw_vpath_vid_delete(vpath->handle, vid);
3328        }
3329        vxge_debug_entryexit(VXGE_TRACE,
3330                "%s:%d  Exiting...", __func__, __LINE__);
3331        clear_bit(vid, vdev->active_vlans);
3332        return 0;
3333}
3334
3335static const struct net_device_ops vxge_netdev_ops = {
3336        .ndo_open               = vxge_open,
3337        .ndo_stop               = vxge_close,
3338        .ndo_get_stats64        = vxge_get_stats64,
3339        .ndo_start_xmit         = vxge_xmit,
3340        .ndo_validate_addr      = eth_validate_addr,
3341        .ndo_set_rx_mode        = vxge_set_multicast,
3342        .ndo_eth_ioctl           = vxge_ioctl,
3343        .ndo_set_mac_address    = vxge_set_mac_addr,
3344        .ndo_change_mtu         = vxge_change_mtu,
3345        .ndo_fix_features       = vxge_fix_features,
3346        .ndo_set_features       = vxge_set_features,
3347        .ndo_vlan_rx_kill_vid   = vxge_vlan_rx_kill_vid,
3348        .ndo_vlan_rx_add_vid    = vxge_vlan_rx_add_vid,
3349        .ndo_tx_timeout         = vxge_tx_watchdog,
3350#ifdef CONFIG_NET_POLL_CONTROLLER
3351        .ndo_poll_controller    = vxge_netpoll,
3352#endif
3353};
3354
3355static int vxge_device_register(struct __vxge_hw_device *hldev,
3356                                struct vxge_config *config, int high_dma,
3357                                int no_of_vpath, struct vxgedev **vdev_out)
3358{
3359        struct net_device *ndev;
3360        enum vxge_hw_status status = VXGE_HW_OK;
3361        struct vxgedev *vdev;
3362        int ret = 0, no_of_queue = 1;
3363        u64 stat;
3364
3365        *vdev_out = NULL;
3366        if (config->tx_steering_type)
3367                no_of_queue = no_of_vpath;
3368
3369        ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3370                        no_of_queue);
3371        if (ndev == NULL) {
3372                vxge_debug_init(
3373                        vxge_hw_device_trace_level_get(hldev),
3374                "%s : device allocation failed", __func__);
3375                ret = -ENODEV;
3376                goto _out0;
3377        }
3378
3379        vxge_debug_entryexit(
3380                vxge_hw_device_trace_level_get(hldev),
3381                "%s: %s:%d  Entering...",
3382                ndev->name, __func__, __LINE__);
3383
3384        vdev = netdev_priv(ndev);
3385        memset(vdev, 0, sizeof(struct vxgedev));
3386
3387        vdev->ndev = ndev;
3388        vdev->devh = hldev;
3389        vdev->pdev = hldev->pdev;
3390        memcpy(&vdev->config, config, sizeof(struct vxge_config));
3391        vdev->rx_hwts = 0;
3392        vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3393
3394        SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3395
3396        ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3397                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3398                NETIF_F_TSO | NETIF_F_TSO6 |
3399                NETIF_F_HW_VLAN_CTAG_TX;
3400        if (vdev->config.rth_steering != NO_STEERING)
3401                ndev->hw_features |= NETIF_F_RXHASH;
3402
3403        ndev->features |= ndev->hw_features |
3404                NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3405
3406
3407        ndev->netdev_ops = &vxge_netdev_ops;
3408
3409        ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3410        INIT_WORK(&vdev->reset_task, vxge_reset);
3411
3412        vxge_initialize_ethtool_ops(ndev);
3413
3414        /* Allocate memory for vpath */
3415        vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
3416                               GFP_KERNEL);
3417        if (!vdev->vpaths) {
3418                vxge_debug_init(VXGE_ERR,
3419                        "%s: vpath memory allocation failed",
3420                        vdev->ndev->name);
3421                ret = -ENOMEM;
3422                goto _out1;
3423        }
3424
3425        vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3426                "%s : checksumming enabled", __func__);
3427
3428        if (high_dma) {
3429                ndev->features |= NETIF_F_HIGHDMA;
3430                vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3431                        "%s : using High DMA", __func__);
3432        }
3433
3434        /* MTU range: 68 - 9600 */
3435        ndev->min_mtu = VXGE_HW_MIN_MTU;
3436        ndev->max_mtu = VXGE_HW_MAX_MTU;
3437
3438        ret = register_netdev(ndev);
3439        if (ret) {
3440                vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3441                        "%s: %s : device registration failed!",
3442                        ndev->name, __func__);
3443                goto _out2;
3444        }
3445
3446        /*  Set the factory defined MAC address initially */
3447        ndev->addr_len = ETH_ALEN;
3448
3449        /* Make Link state as off at this point, when the Link change
3450         * interrupt comes the state will be automatically changed to
3451         * the right state.
3452         */
3453        netif_carrier_off(ndev);
3454
3455        vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3456                "%s: Ethernet device registered",
3457                ndev->name);
3458
3459        hldev->ndev = ndev;
3460        *vdev_out = vdev;
3461
3462        /* Resetting the Device stats */
3463        status = vxge_hw_mrpcim_stats_access(
3464                                hldev,
3465                                VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3466                                0,
3467                                0,
3468                                &stat);
3469
3470        if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION)
3471                vxge_debug_init(
3472                        vxge_hw_device_trace_level_get(hldev),
3473                        "%s: device stats clear returns"
3474                        "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name);
3475
3476        vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3477                "%s: %s:%d  Exiting...",
3478                ndev->name, __func__, __LINE__);
3479
3480        return ret;
3481_out2:
3482        kfree(vdev->vpaths);
3483_out1:
3484        free_netdev(ndev);
3485_out0:
3486        return ret;
3487}
3488
3489/*
3490 * vxge_device_unregister
3491 *
3492 * This function will unregister and free network device
3493 */
3494static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3495{
3496        struct vxgedev *vdev;
3497        struct net_device *dev;
3498        char buf[IFNAMSIZ];
3499
3500        dev = hldev->ndev;
3501        vdev = netdev_priv(dev);
3502
3503        vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3504                             __func__, __LINE__);
3505
3506        strlcpy(buf, dev->name, IFNAMSIZ);
3507
3508        flush_work(&vdev->reset_task);
3509
3510        /* in 2.6 will call stop() if device is up */
3511        unregister_netdev(dev);
3512
3513        kfree(vdev->vpaths);
3514
3515        vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3516                        buf);
3517        vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
3518                             __func__, __LINE__);
3519
3520        /* we are safe to free it now */
3521        free_netdev(dev);
3522}
3523
3524/*
3525 * vxge_callback_crit_err
3526 *
3527 * This function is called by the alarm handler in interrupt context.
3528 * Driver must analyze it based on the event type.
3529 */
3530static void
3531vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3532                        enum vxge_hw_event type, u64 vp_id)
3533{
3534        struct net_device *dev = hldev->ndev;
3535        struct vxgedev *vdev = netdev_priv(dev);
3536        struct vxge_vpath *vpath = NULL;
3537        int vpath_idx;
3538
3539        vxge_debug_entryexit(vdev->level_trace,
3540                "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3541
3542        /* Note: This event type should be used for device wide
3543         * indications only - Serious errors, Slot freeze and critical errors
3544         */
3545        vdev->cric_err_event = type;
3546
3547        for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3548                vpath = &vdev->vpaths[vpath_idx];
3549                if (vpath->device_id == vp_id)
3550                        break;
3551        }
3552
3553        if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3554                if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3555                        vxge_debug_init(VXGE_ERR,
3556                                "%s: Slot is frozen", vdev->ndev->name);
3557                } else if (type == VXGE_HW_EVENT_SERR) {
3558                        vxge_debug_init(VXGE_ERR,
3559                                "%s: Encountered Serious Error",
3560                                vdev->ndev->name);
3561                } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3562                        vxge_debug_init(VXGE_ERR,
3563                                "%s: Encountered Critical Error",
3564                                vdev->ndev->name);
3565        }
3566
3567        if ((type == VXGE_HW_EVENT_SERR) ||
3568                (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3569                if (unlikely(vdev->exec_mode))
3570                        clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3571        } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3572                vxge_hw_device_mask_all(hldev);
3573                if (unlikely(vdev->exec_mode))
3574                        clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3575        } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3576                  (type == VXGE_HW_EVENT_VPATH_ERR)) {
3577
3578                if (unlikely(vdev->exec_mode))
3579                        clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3580                else {
3581                        /* check if this vpath is already set for reset */
3582                        if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3583
3584                                /* disable interrupts for this vpath */
3585                                vxge_vpath_intr_disable(vdev, vpath_idx);
3586
3587                                /* stop the queue for this vpath */
3588                                netif_tx_stop_queue(vpath->fifo.txq);
3589                        }
3590                }
3591        }
3592
3593        vxge_debug_entryexit(vdev->level_trace,
3594                "%s: %s:%d  Exiting...",
3595                vdev->ndev->name, __func__, __LINE__);
3596}
3597
3598static void verify_bandwidth(void)
3599{
3600        int i, band_width, total = 0, equal_priority = 0;
3601
3602        /* 1. If user enters 0 for some fifo, give equal priority to all */
3603        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3604                if (bw_percentage[i] == 0) {
3605                        equal_priority = 1;
3606                        break;
3607                }
3608        }
3609
3610        if (!equal_priority) {
3611                /* 2. If sum exceeds 100, give equal priority to all */
3612                for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3613                        if (bw_percentage[i] == 0xFF)
3614                                break;
3615
3616                        total += bw_percentage[i];
3617                        if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3618                                equal_priority = 1;
3619                                break;
3620                        }
3621                }
3622        }
3623
3624        if (!equal_priority) {
3625                /* Is all the bandwidth consumed? */
3626                if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3627                        if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3628                                /* Split rest of bw equally among next VPs*/
3629                                band_width =
3630                                  (VXGE_HW_VPATH_BANDWIDTH_MAX  - total) /
3631                                        (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3632                                if (band_width < 2) /* min of 2% */
3633                                        equal_priority = 1;
3634                                else {
3635                                        for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3636                                                i++)
3637                                                bw_percentage[i] =
3638                                                        band_width;
3639                                }
3640                        }
3641                } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3642                        equal_priority = 1;
3643        }
3644
3645        if (equal_priority) {
3646                vxge_debug_init(VXGE_ERR,
3647                        "%s: Assigning equal bandwidth to all the vpaths",
3648                        VXGE_DRIVER_NAME);
3649                bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3650                                        VXGE_HW_MAX_VIRTUAL_PATHS;
3651                for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3652                        bw_percentage[i] = bw_percentage[0];
3653        }
3654}
3655
3656/*
3657 * Vpath configuration
3658 */
3659static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
3660                              u64 vpath_mask, struct vxge_config *config_param)
3661{
3662        int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3663        u32 txdl_size, txdl_per_memblock;
3664
3665        temp = driver_config->vpath_per_dev;
3666        if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3667                (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3668                /* No more CPU. Return vpath number as zero.*/
3669                if (driver_config->g_no_cpus == -1)
3670                        return 0;
3671
3672                if (!driver_config->g_no_cpus)
3673                        driver_config->g_no_cpus =
3674                                netif_get_num_default_rss_queues();
3675
3676                driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3677                if (!driver_config->vpath_per_dev)
3678                        driver_config->vpath_per_dev = 1;
3679
3680                for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3681                        if (vxge_bVALn(vpath_mask, i, 1))
3682                                default_no_vpath++;
3683
3684                if (default_no_vpath < driver_config->vpath_per_dev)
3685                        driver_config->vpath_per_dev = default_no_vpath;
3686
3687                driver_config->g_no_cpus = driver_config->g_no_cpus -
3688                                (driver_config->vpath_per_dev * 2);
3689                if (driver_config->g_no_cpus <= 0)
3690                        driver_config->g_no_cpus = -1;
3691        }
3692
3693        if (driver_config->vpath_per_dev == 1) {
3694                vxge_debug_ll_config(VXGE_TRACE,
3695                        "%s: Disable tx and rx steering, "
3696                        "as single vpath is configured", VXGE_DRIVER_NAME);
3697                config_param->rth_steering = NO_STEERING;
3698                config_param->tx_steering_type = NO_STEERING;
3699                device_config->rth_en = 0;
3700        }
3701
3702        /* configure bandwidth */
3703        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3704                device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3705
3706        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3707                device_config->vp_config[i].vp_id = i;
3708                device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3709                if (no_of_vpaths < driver_config->vpath_per_dev) {
3710                        if (!vxge_bVALn(vpath_mask, i, 1)) {
3711                                vxge_debug_ll_config(VXGE_TRACE,
3712                                        "%s: vpath: %d is not available",
3713                                        VXGE_DRIVER_NAME, i);
3714                                continue;
3715                        } else {
3716                                vxge_debug_ll_config(VXGE_TRACE,
3717                                        "%s: vpath: %d available",
3718                                        VXGE_DRIVER_NAME, i);
3719                                no_of_vpaths++;
3720                        }
3721                } else {
3722                        vxge_debug_ll_config(VXGE_TRACE,
3723                                "%s: vpath: %d is not configured, "
3724                                "max_config_vpath exceeded",
3725                                VXGE_DRIVER_NAME, i);
3726                        break;
3727                }
3728
3729                /* Configure Tx fifo's */
3730                device_config->vp_config[i].fifo.enable =
3731                                                VXGE_HW_FIFO_ENABLE;
3732                device_config->vp_config[i].fifo.max_frags =
3733                                MAX_SKB_FRAGS + 1;
3734                device_config->vp_config[i].fifo.memblock_size =
3735                        VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3736
3737                txdl_size = device_config->vp_config[i].fifo.max_frags *
3738                                sizeof(struct vxge_hw_fifo_txd);
3739                txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3740
3741                device_config->vp_config[i].fifo.fifo_blocks =
3742                        ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3743
3744                device_config->vp_config[i].fifo.intr =
3745                                VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3746
3747                /* Configure tti properties */
3748                device_config->vp_config[i].tti.intr_enable =
3749                                        VXGE_HW_TIM_INTR_ENABLE;
3750
3751                device_config->vp_config[i].tti.btimer_val =
3752                        (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3753
3754                device_config->vp_config[i].tti.timer_ac_en =
3755                                VXGE_HW_TIM_TIMER_AC_ENABLE;
3756
3757                /* For msi-x with napi (each vector has a handler of its own) -
3758                 * Set CI to OFF for all vpaths
3759                 */
3760                device_config->vp_config[i].tti.timer_ci_en =
3761                        VXGE_HW_TIM_TIMER_CI_DISABLE;
3762
3763                device_config->vp_config[i].tti.timer_ri_en =
3764                                VXGE_HW_TIM_TIMER_RI_DISABLE;
3765
3766                device_config->vp_config[i].tti.util_sel =
3767                        VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3768
3769                device_config->vp_config[i].tti.ltimer_val =
3770                        (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3771
3772                device_config->vp_config[i].tti.rtimer_val =
3773                        (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3774
3775                device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3776                device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3777                device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3778                device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3779                device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3780                device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3781                device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3782
3783                /* Configure Rx rings */
3784                device_config->vp_config[i].ring.enable  =
3785                                                VXGE_HW_RING_ENABLE;
3786
3787                device_config->vp_config[i].ring.ring_blocks  =
3788                                                VXGE_HW_DEF_RING_BLOCKS;
3789
3790                device_config->vp_config[i].ring.buffer_mode =
3791                        VXGE_HW_RING_RXD_BUFFER_MODE_1;
3792
3793                device_config->vp_config[i].ring.rxds_limit  =
3794                                VXGE_HW_DEF_RING_RXDS_LIMIT;
3795
3796                device_config->vp_config[i].ring.scatter_mode =
3797                                        VXGE_HW_RING_SCATTER_MODE_A;
3798
3799                /* Configure rti properties */
3800                device_config->vp_config[i].rti.intr_enable =
3801                                        VXGE_HW_TIM_INTR_ENABLE;
3802
3803                device_config->vp_config[i].rti.btimer_val =
3804                        (VXGE_RTI_BTIMER_VAL * 1000)/272;
3805
3806                device_config->vp_config[i].rti.timer_ac_en =
3807                                                VXGE_HW_TIM_TIMER_AC_ENABLE;
3808
3809                device_config->vp_config[i].rti.timer_ci_en =
3810                                                VXGE_HW_TIM_TIMER_CI_DISABLE;
3811
3812                device_config->vp_config[i].rti.timer_ri_en =
3813                                                VXGE_HW_TIM_TIMER_RI_DISABLE;
3814
3815                device_config->vp_config[i].rti.util_sel =
3816                                VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3817
3818                device_config->vp_config[i].rti.urange_a =
3819                                                RTI_RX_URANGE_A;
3820                device_config->vp_config[i].rti.urange_b =
3821                                                RTI_RX_URANGE_B;
3822                device_config->vp_config[i].rti.urange_c =
3823                                                RTI_RX_URANGE_C;
3824                device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3825                device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3826                device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3827                device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3828
3829                device_config->vp_config[i].rti.rtimer_val =
3830                        (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3831
3832                device_config->vp_config[i].rti.ltimer_val =
3833                        (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3834
3835                device_config->vp_config[i].rpa_strip_vlan_tag =
3836                        vlan_tag_strip;
3837        }
3838
3839        driver_config->vpath_per_dev = temp;
3840        return no_of_vpaths;
3841}
3842
3843/* initialize device configuratrions */
3844static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
3845                                    int *intr_type)
3846{
3847        /* Used for CQRQ/SRQ. */
3848        device_config->dma_blockpool_initial =
3849                        VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3850
3851        device_config->dma_blockpool_max =
3852                        VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3853
3854        if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3855                max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3856
3857        if (!IS_ENABLED(CONFIG_PCI_MSI)) {
3858                vxge_debug_init(VXGE_ERR,
3859                        "%s: This Kernel does not support "
3860                        "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3861                *intr_type = INTA;
3862        }
3863
3864        /* Configure whether MSI-X or IRQL. */
3865        switch (*intr_type) {
3866        case INTA:
3867                device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3868                break;
3869
3870        case MSI_X:
3871                device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3872                break;
3873        }
3874
3875        /* Timer period between device poll */
3876        device_config->device_poll_millis = VXGE_TIMER_DELAY;
3877
3878        /* Configure mac based steering. */
3879        device_config->rts_mac_en = addr_learn_en;
3880
3881        /* Configure Vpaths */
3882        device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3883
3884        vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3885                        __func__);
3886        vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3887                        device_config->intr_mode);
3888        vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3889                        device_config->device_poll_millis);
3890        vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3891                        device_config->rth_en);
3892        vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3893                        device_config->rth_it_type);
3894}
3895
3896static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3897{
3898        int i;
3899
3900        vxge_debug_init(VXGE_TRACE,
3901                "%s: %d Vpath(s) opened",
3902                vdev->ndev->name, vdev->no_of_vpath);
3903
3904        switch (vdev->config.intr_type) {
3905        case INTA:
3906                vxge_debug_init(VXGE_TRACE,
3907                        "%s: Interrupt type INTA", vdev->ndev->name);
3908                break;
3909
3910        case MSI_X:
3911                vxge_debug_init(VXGE_TRACE,
3912                        "%s: Interrupt type MSI-X", vdev->ndev->name);
3913                break;
3914        }
3915
3916        if (vdev->config.rth_steering) {
3917                vxge_debug_init(VXGE_TRACE,
3918                        "%s: RTH steering enabled for TCP_IPV4",
3919                        vdev->ndev->name);
3920        } else {
3921                vxge_debug_init(VXGE_TRACE,
3922                        "%s: RTH steering disabled", vdev->ndev->name);
3923        }
3924
3925        switch (vdev->config.tx_steering_type) {
3926        case NO_STEERING:
3927                vxge_debug_init(VXGE_TRACE,
3928                        "%s: Tx steering disabled", vdev->ndev->name);
3929                break;
3930        case TX_PRIORITY_STEERING:
3931                vxge_debug_init(VXGE_TRACE,
3932                        "%s: Unsupported tx steering option",
3933                        vdev->ndev->name);
3934                vxge_debug_init(VXGE_TRACE,
3935                        "%s: Tx steering disabled", vdev->ndev->name);
3936                vdev->config.tx_steering_type = 0;
3937                break;
3938        case TX_VLAN_STEERING:
3939                vxge_debug_init(VXGE_TRACE,
3940                        "%s: Unsupported tx steering option",
3941                        vdev->ndev->name);
3942                vxge_debug_init(VXGE_TRACE,
3943                        "%s: Tx steering disabled", vdev->ndev->name);
3944                vdev->config.tx_steering_type = 0;
3945                break;
3946        case TX_MULTIQ_STEERING:
3947                vxge_debug_init(VXGE_TRACE,
3948                        "%s: Tx multiqueue steering enabled",
3949                        vdev->ndev->name);
3950                break;
3951        case TX_PORT_STEERING:
3952                vxge_debug_init(VXGE_TRACE,
3953                        "%s: Tx port steering enabled",
3954                        vdev->ndev->name);
3955                break;
3956        default:
3957                vxge_debug_init(VXGE_ERR,
3958                        "%s: Unsupported tx steering type",
3959                        vdev->ndev->name);
3960                vxge_debug_init(VXGE_TRACE,
3961                        "%s: Tx steering disabled", vdev->ndev->name);
3962                vdev->config.tx_steering_type = 0;
3963        }
3964
3965        if (vdev->config.addr_learn_en)
3966                vxge_debug_init(VXGE_TRACE,
3967                        "%s: MAC Address learning enabled", vdev->ndev->name);
3968
3969        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3970                if (!vxge_bVALn(vpath_mask, i, 1))
3971                        continue;
3972                vxge_debug_ll_config(VXGE_TRACE,
3973                        "%s: MTU size - %d", vdev->ndev->name,
3974                        ((vdev->devh))->
3975                                config.vp_config[i].mtu);
3976                vxge_debug_init(VXGE_TRACE,
3977                        "%s: VLAN tag stripping %s", vdev->ndev->name,
3978                        ((vdev->devh))->
3979                                config.vp_config[i].rpa_strip_vlan_tag
3980                        ? "Enabled" : "Disabled");
3981                vxge_debug_ll_config(VXGE_TRACE,
3982                        "%s: Max frags : %d", vdev->ndev->name,
3983                        ((vdev->devh))->
3984                                config.vp_config[i].fifo.max_frags);
3985                break;
3986        }
3987}
3988
3989/**
3990 * vxge_pm_suspend - vxge power management suspend entry point
3991 * @dev_d: device pointer
3992 *
3993 */
3994static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
3995{
3996        return -ENOSYS;
3997}
3998/**
3999 * vxge_pm_resume - vxge power management resume entry point
4000 * @dev_d: device pointer
4001 *
4002 */
4003static int __maybe_unused vxge_pm_resume(struct device *dev_d)
4004{
4005        return -ENOSYS;
4006}
4007
4008/**
4009 * vxge_io_error_detected - called when PCI error is detected
4010 * @pdev: Pointer to PCI device
4011 * @state: The current pci connection state
4012 *
4013 * This function is called after a PCI bus error affecting
4014 * this device has been detected.
4015 */
4016static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4017                                                pci_channel_state_t state)
4018{
4019        struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4020        struct net_device *netdev = hldev->ndev;
4021
4022        netif_device_detach(netdev);
4023
4024        if (state == pci_channel_io_perm_failure)
4025                return PCI_ERS_RESULT_DISCONNECT;
4026
4027        if (netif_running(netdev)) {
4028                /* Bring down the card, while avoiding PCI I/O */
4029                do_vxge_close(netdev, 0);
4030        }
4031
4032        pci_disable_device(pdev);
4033
4034        return PCI_ERS_RESULT_NEED_RESET;
4035}
4036
4037/**
4038 * vxge_io_slot_reset - called after the pci bus has been reset.
4039 * @pdev: Pointer to PCI device
4040 *
4041 * Restart the card from scratch, as if from a cold-boot.
4042 * At this point, the card has exprienced a hard reset,
4043 * followed by fixups by BIOS, and has its config space
4044 * set up identically to what it was at cold boot.
4045 */
4046static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4047{
4048        struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4049        struct net_device *netdev = hldev->ndev;
4050
4051        struct vxgedev *vdev = netdev_priv(netdev);
4052
4053        if (pci_enable_device(pdev)) {
4054                netdev_err(netdev, "Cannot re-enable device after reset\n");
4055                return PCI_ERS_RESULT_DISCONNECT;
4056        }
4057
4058        pci_set_master(pdev);
4059        do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4060
4061        return PCI_ERS_RESULT_RECOVERED;
4062}
4063
4064/**
4065 * vxge_io_resume - called when traffic can start flowing again.
4066 * @pdev: Pointer to PCI device
4067 *
4068 * This callback is called when the error recovery driver tells
4069 * us that its OK to resume normal operation.
4070 */
4071static void vxge_io_resume(struct pci_dev *pdev)
4072{
4073        struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4074        struct net_device *netdev = hldev->ndev;
4075
4076        if (netif_running(netdev)) {
4077                if (vxge_open(netdev)) {
4078                        netdev_err(netdev,
4079                                   "Can't bring device back up after reset\n");
4080                        return;
4081                }
4082        }
4083
4084        netif_device_attach(netdev);
4085}
4086
4087static inline u32 vxge_get_num_vfs(u64 function_mode)
4088{
4089        u32 num_functions = 0;
4090
4091        switch (function_mode) {
4092        case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4093        case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4094                num_functions = 8;
4095                break;
4096        case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4097                num_functions = 1;
4098                break;
4099        case VXGE_HW_FUNCTION_MODE_SRIOV:
4100        case VXGE_HW_FUNCTION_MODE_MRIOV:
4101        case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4102                num_functions = 17;
4103                break;
4104        case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4105                num_functions = 4;
4106                break;
4107        case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4108                num_functions = 2;
4109                break;
4110        case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4111                num_functions = 8; /* TODO */
4112                break;
4113        }
4114        return num_functions;
4115}
4116
4117int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4118{
4119        struct __vxge_hw_device *hldev = vdev->devh;
4120        u32 maj, min, bld, cmaj, cmin, cbld;
4121        enum vxge_hw_status status;
4122        const struct firmware *fw;
4123        int ret;
4124
4125        ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4126        if (ret) {
4127                vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4128                                VXGE_DRIVER_NAME, fw_name);
4129                goto out;
4130        }
4131
4132        /* Load the new firmware onto the adapter */
4133        status = vxge_update_fw_image(hldev, fw->data, fw->size);
4134        if (status != VXGE_HW_OK) {
4135                vxge_debug_init(VXGE_ERR,
4136                                "%s: FW image download to adapter failed '%s'.",
4137                                VXGE_DRIVER_NAME, fw_name);
4138                ret = -EIO;
4139                goto out;
4140        }
4141
4142        /* Read the version of the new firmware */
4143        status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4144        if (status != VXGE_HW_OK) {
4145                vxge_debug_init(VXGE_ERR,
4146                                "%s: Upgrade read version failed '%s'.",
4147                                VXGE_DRIVER_NAME, fw_name);
4148                ret = -EIO;
4149                goto out;
4150        }
4151
4152        cmaj = vdev->config.device_hw_info.fw_version.major;
4153        cmin = vdev->config.device_hw_info.fw_version.minor;
4154        cbld = vdev->config.device_hw_info.fw_version.build;
4155        /* It's possible the version in /lib/firmware is not the latest version.
4156         * If so, we could get into a loop of trying to upgrade to the latest
4157         * and flashing the older version.
4158         */
4159        if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4160            !override) {
4161                ret = -EINVAL;
4162                goto out;
4163        }
4164
4165        printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4166               maj, min, bld);
4167
4168        /* Flash the adapter with the new firmware */
4169        status = vxge_hw_flash_fw(hldev);
4170        if (status != VXGE_HW_OK) {
4171                vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4172                                VXGE_DRIVER_NAME, fw_name);
4173                ret = -EIO;
4174                goto out;
4175        }
4176
4177        printk(KERN_NOTICE "Upgrade of firmware successful!  Adapter must be "
4178               "hard reset before using, thus requiring a system reboot or a "
4179               "hotplug event.\n");
4180
4181out:
4182        release_firmware(fw);
4183        return ret;
4184}
4185
4186static int vxge_probe_fw_update(struct vxgedev *vdev)
4187{
4188        u32 maj, min, bld;
4189        int ret, gpxe = 0;
4190        char *fw_name;
4191
4192        maj = vdev->config.device_hw_info.fw_version.major;
4193        min = vdev->config.device_hw_info.fw_version.minor;
4194        bld = vdev->config.device_hw_info.fw_version.build;
4195
4196        if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4197                return 0;
4198
4199        /* Ignore the build number when determining if the current firmware is
4200         * "too new" to load the driver
4201         */
4202        if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4203                vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4204                                "version, unable to load driver\n",
4205                                VXGE_DRIVER_NAME);
4206                return -EINVAL;
4207        }
4208
4209        /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4210         * work with this driver.
4211         */
4212        if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4213                vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4214                                "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4215                return -EINVAL;
4216        }
4217
4218        /* If file not specified, determine gPXE or not */
4219        if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4220                int i;
4221                for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4222                        if (vdev->devh->eprom_versions[i]) {
4223                                gpxe = 1;
4224                                break;
4225                        }
4226        }
4227        if (gpxe)
4228                fw_name = "vxge/X3fw-pxe.ncf";
4229        else
4230                fw_name = "vxge/X3fw.ncf";
4231
4232        ret = vxge_fw_upgrade(vdev, fw_name, 0);
4233        /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4234         * probe, so ignore them
4235         */
4236        if (ret != -EINVAL && ret != -ENOENT)
4237                return -EIO;
4238        else
4239                ret = 0;
4240
4241        if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4242            VXGE_FW_VER(maj, min, 0)) {
4243                vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4244                                " be used with this driver.",
4245                                VXGE_DRIVER_NAME, maj, min, bld);
4246                return -EINVAL;
4247        }
4248
4249        return ret;
4250}
4251
4252static int is_sriov_initialized(struct pci_dev *pdev)
4253{
4254        int pos;
4255        u16 ctrl;
4256
4257        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4258        if (pos) {
4259                pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4260                if (ctrl & PCI_SRIOV_CTRL_VFE)
4261                        return 1;
4262        }
4263        return 0;
4264}
4265
4266static const struct vxge_hw_uld_cbs vxge_callbacks = {
4267        .link_up = vxge_callback_link_up,
4268        .link_down = vxge_callback_link_down,
4269        .crit_err = vxge_callback_crit_err,
4270};
4271
4272/**
4273 * vxge_probe
4274 * @pdev : structure containing the PCI related information of the device.
4275 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4276 * Description:
4277 * This function is called when a new PCI device gets detected and initializes
4278 * it.
4279 * Return value:
4280 * returns 0 on success and negative on failure.
4281 *
4282 */
4283static int
4284vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4285{
4286        struct __vxge_hw_device *hldev;
4287        enum vxge_hw_status status;
4288        int ret;
4289        int high_dma = 0;
4290        u64 vpath_mask = 0;
4291        struct vxgedev *vdev;
4292        struct vxge_config *ll_config = NULL;
4293        struct vxge_hw_device_config *device_config = NULL;
4294        struct vxge_hw_device_attr attr;
4295        int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4296        u8 *macaddr;
4297        struct vxge_mac_addrs *entry;
4298        static int bus = -1, device = -1;
4299        u32 host_type;
4300        u8 new_device = 0;
4301        enum vxge_hw_status is_privileged;
4302        u32 function_mode;
4303        u32 num_vfs = 0;
4304
4305        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4306        attr.pdev = pdev;
4307
4308        /* In SRIOV-17 mode, functions of the same adapter
4309         * can be deployed on different buses
4310         */
4311        if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4312            !pdev->is_virtfn)
4313                new_device = 1;
4314
4315        bus = pdev->bus->number;
4316        device = PCI_SLOT(pdev->devfn);
4317
4318        if (new_device) {
4319                if (driver_config->config_dev_cnt &&
4320                   (driver_config->config_dev_cnt !=
4321                        driver_config->total_dev_cnt))
4322                        vxge_debug_init(VXGE_ERR,
4323                                "%s: Configured %d of %d devices",
4324                                VXGE_DRIVER_NAME,
4325                                driver_config->config_dev_cnt,
4326                                driver_config->total_dev_cnt);
4327                driver_config->config_dev_cnt = 0;
4328                driver_config->total_dev_cnt = 0;
4329        }
4330
4331        /* Now making the CPU based no of vpath calculation
4332         * applicable for individual functions as well.
4333         */
4334        driver_config->g_no_cpus = 0;
4335        driver_config->vpath_per_dev = max_config_vpath;
4336
4337        driver_config->total_dev_cnt++;
4338        if (++driver_config->config_dev_cnt > max_config_dev) {
4339                ret = 0;
4340                goto _exit0;
4341        }
4342
4343        device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4344                GFP_KERNEL);
4345        if (!device_config) {
4346                ret = -ENOMEM;
4347                vxge_debug_init(VXGE_ERR,
4348                        "device_config : malloc failed %s %d",
4349                        __FILE__, __LINE__);
4350                goto _exit0;
4351        }
4352
4353        ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
4354        if (!ll_config) {
4355                ret = -ENOMEM;
4356                vxge_debug_init(VXGE_ERR,
4357                        "device_config : malloc failed %s %d",
4358                        __FILE__, __LINE__);
4359                goto _exit0;
4360        }
4361        ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4362        ll_config->intr_type = MSI_X;
4363        ll_config->napi_weight = NEW_NAPI_WEIGHT;
4364        ll_config->rth_steering = RTH_STEERING;
4365
4366        /* get the default configuration parameters */
4367        vxge_hw_device_config_default_get(device_config);
4368
4369        /* initialize configuration parameters */
4370        vxge_device_config_init(device_config, &ll_config->intr_type);
4371
4372        ret = pci_enable_device(pdev);
4373        if (ret) {
4374                vxge_debug_init(VXGE_ERR,
4375                        "%s : can not enable PCI device", __func__);
4376                goto _exit0;
4377        }
4378
4379        if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4380                vxge_debug_ll_config(VXGE_TRACE,
4381                        "%s : using 64bit DMA", __func__);
4382
4383                high_dma = 1;
4384
4385                if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4386                        vxge_debug_init(VXGE_ERR,
4387                                "%s : unable to obtain 64bit DMA for "
4388                                "consistent allocations", __func__);
4389                        ret = -ENOMEM;
4390                        goto _exit1;
4391                }
4392        } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
4393                vxge_debug_ll_config(VXGE_TRACE,
4394                        "%s : using 32bit DMA", __func__);
4395        } else {
4396                ret = -ENOMEM;
4397                goto _exit1;
4398        }
4399
4400        ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4401        if (ret) {
4402                vxge_debug_init(VXGE_ERR,
4403                        "%s : request regions failed", __func__);
4404                goto _exit1;
4405        }
4406
4407        pci_set_master(pdev);
4408
4409        attr.bar0 = pci_ioremap_bar(pdev, 0);
4410        if (!attr.bar0) {
4411                vxge_debug_init(VXGE_ERR,
4412                        "%s : cannot remap io memory bar0", __func__);
4413                ret = -ENODEV;
4414                goto _exit2;
4415        }
4416        vxge_debug_ll_config(VXGE_TRACE,
4417                "pci ioremap bar0: %p:0x%llx",
4418                attr.bar0,
4419                (unsigned long long)pci_resource_start(pdev, 0));
4420
4421        status = vxge_hw_device_hw_info_get(attr.bar0,
4422                        &ll_config->device_hw_info);
4423        if (status != VXGE_HW_OK) {
4424                vxge_debug_init(VXGE_ERR,
4425                        "%s: Reading of hardware info failed."
4426                        "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4427                ret = -EINVAL;
4428                goto _exit3;
4429        }
4430
4431        vpath_mask = ll_config->device_hw_info.vpath_mask;
4432        if (vpath_mask == 0) {
4433                vxge_debug_ll_config(VXGE_TRACE,
4434                        "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4435                ret = -EINVAL;
4436                goto _exit3;
4437        }
4438
4439        vxge_debug_ll_config(VXGE_TRACE,
4440                "%s:%d  Vpath mask = %llx", __func__, __LINE__,
4441                (unsigned long long)vpath_mask);
4442
4443        function_mode = ll_config->device_hw_info.function_mode;
4444        host_type = ll_config->device_hw_info.host_type;
4445        is_privileged = __vxge_hw_device_is_privilaged(host_type,
4446                ll_config->device_hw_info.func_id);
4447
4448        /* Check how many vpaths are available */
4449        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4450                if (!((vpath_mask) & vxge_mBIT(i)))
4451                        continue;
4452                max_vpath_supported++;
4453        }
4454
4455        if (new_device)
4456                num_vfs = vxge_get_num_vfs(function_mode) - 1;
4457
4458        /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4459        if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4460           (ll_config->intr_type != INTA)) {
4461                ret = pci_enable_sriov(pdev, num_vfs);
4462                if (ret)
4463                        vxge_debug_ll_config(VXGE_ERR,
4464                                "Failed in enabling SRIOV mode: %d\n", ret);
4465                        /* No need to fail out, as an error here is non-fatal */
4466        }
4467
4468        /*
4469         * Configure vpaths and get driver configured number of vpaths
4470         * which is less than or equal to the maximum vpaths per function.
4471         */
4472        no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4473        if (!no_of_vpath) {
4474                vxge_debug_ll_config(VXGE_ERR,
4475                        "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4476                ret = 0;
4477                goto _exit3;
4478        }
4479
4480        /* Setting driver callbacks */
4481        attr.uld_callbacks = &vxge_callbacks;
4482
4483        status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4484        if (status != VXGE_HW_OK) {
4485                vxge_debug_init(VXGE_ERR,
4486                        "Failed to initialize device (%d)", status);
4487                ret = -EINVAL;
4488                goto _exit3;
4489        }
4490
4491        if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4492                        ll_config->device_hw_info.fw_version.minor,
4493                        ll_config->device_hw_info.fw_version.build) >=
4494            VXGE_EPROM_FW_VER) {
4495                struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4496
4497                status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4498                if (status != VXGE_HW_OK) {
4499                        vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4500                                        VXGE_DRIVER_NAME);
4501                        /* This is a non-fatal error, continue */
4502                }
4503
4504                for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4505                        hldev->eprom_versions[i] = img[i].version;
4506                        if (!img[i].is_valid)
4507                                break;
4508                        vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4509                                        "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4510                                        VXGE_EPROM_IMG_MAJOR(img[i].version),
4511                                        VXGE_EPROM_IMG_MINOR(img[i].version),
4512                                        VXGE_EPROM_IMG_FIX(img[i].version),
4513                                        VXGE_EPROM_IMG_BUILD(img[i].version));
4514                }
4515        }
4516
4517        /* if FCS stripping is not disabled in MAC fail driver load */
4518        status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4519        if (status != VXGE_HW_OK) {
4520                vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4521                                " failing driver load", VXGE_DRIVER_NAME);
4522                ret = -EINVAL;
4523                goto _exit4;
4524        }
4525
4526        /* Always enable HWTS.  This will always cause the FCS to be invalid,
4527         * due to the fact that HWTS is using the FCS as the location of the
4528         * timestamp.  The HW FCS checking will still correctly determine if
4529         * there is a valid checksum, and the FCS is being removed by the driver
4530         * anyway.  So no functionality is being lost.  Since it is always
4531         * enabled, we now simply use the ioctl call to set whether or not the
4532         * driver should be paying attention to the HWTS.
4533         */
4534        if (is_privileged == VXGE_HW_OK) {
4535                status = vxge_timestamp_config(hldev);
4536                if (status != VXGE_HW_OK) {
4537                        vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4538                                        VXGE_DRIVER_NAME);
4539                        ret = -EFAULT;
4540                        goto _exit4;
4541                }
4542        }
4543
4544        vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4545
4546        /* set private device info */
4547        pci_set_drvdata(pdev, hldev);
4548
4549        ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4550        ll_config->addr_learn_en = addr_learn_en;
4551        ll_config->rth_algorithm = RTH_ALG_JENKINS;
4552        ll_config->rth_hash_type_tcpipv4 = 1;
4553        ll_config->rth_hash_type_ipv4 = 0;
4554        ll_config->rth_hash_type_tcpipv6 = 0;
4555        ll_config->rth_hash_type_ipv6 = 0;
4556        ll_config->rth_hash_type_tcpipv6ex = 0;
4557        ll_config->rth_hash_type_ipv6ex = 0;
4558        ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4559        ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4560        ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4561
4562        ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4563                                   &vdev);
4564        if (ret) {
4565                ret = -EINVAL;
4566                goto _exit4;
4567        }
4568
4569        ret = vxge_probe_fw_update(vdev);
4570        if (ret)
4571                goto _exit5;
4572
4573        vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4574        VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4575                vxge_hw_device_trace_level_get(hldev));
4576
4577        /* set private HW device info */
4578        vdev->mtu = VXGE_HW_DEFAULT_MTU;
4579        vdev->bar0 = attr.bar0;
4580        vdev->max_vpath_supported = max_vpath_supported;
4581        vdev->no_of_vpath = no_of_vpath;
4582
4583        /* Virtual Path count */
4584        for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4585                if (!vxge_bVALn(vpath_mask, i, 1))
4586                        continue;
4587                if (j >= vdev->no_of_vpath)
4588                        break;
4589
4590                vdev->vpaths[j].is_configured = 1;
4591                vdev->vpaths[j].device_id = i;
4592                vdev->vpaths[j].ring.driver_id = j;
4593                vdev->vpaths[j].vdev = vdev;
4594                vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4595                memcpy((u8 *)vdev->vpaths[j].macaddr,
4596                                ll_config->device_hw_info.mac_addrs[i],
4597                                ETH_ALEN);
4598
4599                /* Initialize the mac address list header */
4600                INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4601
4602                vdev->vpaths[j].mac_addr_cnt = 0;
4603                vdev->vpaths[j].mcast_addr_cnt = 0;
4604                j++;
4605        }
4606        vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4607        vdev->max_config_port = max_config_port;
4608
4609        vdev->vlan_tag_strip = vlan_tag_strip;
4610
4611        /* map the hashing selector table to the configured vpaths */
4612        for (i = 0; i < vdev->no_of_vpath; i++)
4613                vdev->vpath_selector[i] = vpath_selector[i];
4614
4615        macaddr = (u8 *)vdev->vpaths[0].macaddr;
4616
4617        ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4618        ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4619        ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4620
4621        vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4622                vdev->ndev->name, ll_config->device_hw_info.serial_number);
4623
4624        vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4625                vdev->ndev->name, ll_config->device_hw_info.part_number);
4626
4627        vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4628                vdev->ndev->name, ll_config->device_hw_info.product_desc);
4629
4630        vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4631                vdev->ndev->name, macaddr);
4632
4633        vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4634                vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4635
4636        vxge_debug_init(VXGE_TRACE,
4637                "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4638                ll_config->device_hw_info.fw_version.version,
4639                ll_config->device_hw_info.fw_date.date);
4640
4641        if (new_device) {
4642                switch (ll_config->device_hw_info.function_mode) {
4643                case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4644                        vxge_debug_init(VXGE_TRACE,
4645                        "%s: Single Function Mode Enabled", vdev->ndev->name);
4646                break;
4647                case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4648                        vxge_debug_init(VXGE_TRACE,
4649                        "%s: Multi Function Mode Enabled", vdev->ndev->name);
4650                break;
4651                case VXGE_HW_FUNCTION_MODE_SRIOV:
4652                        vxge_debug_init(VXGE_TRACE,
4653                        "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4654                break;
4655                case VXGE_HW_FUNCTION_MODE_MRIOV:
4656                        vxge_debug_init(VXGE_TRACE,
4657                        "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4658                break;
4659                }
4660        }
4661
4662        vxge_print_parm(vdev, vpath_mask);
4663
4664        /* Store the fw version for ethttool option */
4665        strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4666        memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4667
4668        /* Copy the station mac address to the list */
4669        for (i = 0; i < vdev->no_of_vpath; i++) {
4670                entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4671                if (NULL == entry) {
4672                        vxge_debug_init(VXGE_ERR,
4673                                "%s: mac_addr_list : memory allocation failed",
4674                                vdev->ndev->name);
4675                        ret = -EPERM;
4676                        goto _exit6;
4677                }
4678                macaddr = (u8 *)&entry->macaddr;
4679                memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4680                list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4681                vdev->vpaths[i].mac_addr_cnt = 1;
4682        }
4683
4684        kfree(device_config);
4685
4686        /*
4687         * INTA is shared in multi-function mode. This is unlike the INTA
4688         * implementation in MR mode, where each VH has its own INTA message.
4689         * - INTA is masked (disabled) as long as at least one function sets
4690         * its TITAN_MASK_ALL_INT.ALARM bit.
4691         * - INTA is unmasked (enabled) when all enabled functions have cleared
4692         * their own TITAN_MASK_ALL_INT.ALARM bit.
4693         * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4694         * Though this driver leaves the top level interrupts unmasked while
4695         * leaving the required module interrupt bits masked on exit, there
4696         * could be a rougue driver around that does not follow this procedure
4697         * resulting in a failure to generate interrupts. The following code is
4698         * present to prevent such a failure.
4699         */
4700
4701        if (ll_config->device_hw_info.function_mode ==
4702                VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4703                if (vdev->config.intr_type == INTA)
4704                        vxge_hw_device_unmask_all(hldev);
4705
4706        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d  Exiting...",
4707                vdev->ndev->name, __func__, __LINE__);
4708
4709        vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4710        VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4711                vxge_hw_device_trace_level_get(hldev));
4712
4713        kfree(ll_config);
4714        return 0;
4715
4716_exit6:
4717        for (i = 0; i < vdev->no_of_vpath; i++)
4718                vxge_free_mac_add_list(&vdev->vpaths[i]);
4719_exit5:
4720        vxge_device_unregister(hldev);
4721_exit4:
4722        vxge_hw_device_terminate(hldev);
4723        pci_disable_sriov(pdev);
4724_exit3:
4725        iounmap(attr.bar0);
4726_exit2:
4727        pci_release_region(pdev, 0);
4728_exit1:
4729        pci_disable_device(pdev);
4730_exit0:
4731        kfree(ll_config);
4732        kfree(device_config);
4733        driver_config->config_dev_cnt--;
4734        driver_config->total_dev_cnt--;
4735        return ret;
4736}
4737
4738/**
4739 * vxge_remove - Free the PCI device
4740 * @pdev: structure containing the PCI related information of the device.
4741 * Description: This function is called by the Pci subsystem to release a
4742 * PCI device and free up all resource held up by the device.
4743 */
4744static void vxge_remove(struct pci_dev *pdev)
4745{
4746        struct __vxge_hw_device *hldev;
4747        struct vxgedev *vdev;
4748        int i;
4749
4750        hldev = pci_get_drvdata(pdev);
4751        if (hldev == NULL)
4752                return;
4753
4754        vdev = netdev_priv(hldev->ndev);
4755
4756        vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4757        vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4758                        __func__);
4759
4760        for (i = 0; i < vdev->no_of_vpath; i++)
4761                vxge_free_mac_add_list(&vdev->vpaths[i]);
4762
4763        vxge_device_unregister(hldev);
4764        /* Do not call pci_disable_sriov here, as it will break child devices */
4765        vxge_hw_device_terminate(hldev);
4766        iounmap(vdev->bar0);
4767        pci_release_region(pdev, 0);
4768        pci_disable_device(pdev);
4769        driver_config->config_dev_cnt--;
4770        driver_config->total_dev_cnt--;
4771
4772        vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4773                        __func__, __LINE__);
4774        vxge_debug_entryexit(vdev->level_trace, "%s:%d  Exiting...", __func__,
4775                             __LINE__);
4776}
4777
4778static const struct pci_error_handlers vxge_err_handler = {
4779        .error_detected = vxge_io_error_detected,
4780        .slot_reset = vxge_io_slot_reset,
4781        .resume = vxge_io_resume,
4782};
4783
4784static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume);
4785
4786static struct pci_driver vxge_driver = {
4787        .name = VXGE_DRIVER_NAME,
4788        .id_table = vxge_id_table,
4789        .probe = vxge_probe,
4790        .remove = vxge_remove,
4791        .driver.pm = &vxge_pm_ops,
4792        .err_handler = &vxge_err_handler,
4793};
4794
4795static int __init
4796vxge_starter(void)
4797{
4798        int ret = 0;
4799
4800        pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4801        pr_info("Driver version: %s\n", DRV_VERSION);
4802
4803        verify_bandwidth();
4804
4805        driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4806        if (!driver_config)
4807                return -ENOMEM;
4808
4809        ret = pci_register_driver(&vxge_driver);
4810        if (ret) {
4811                kfree(driver_config);
4812                goto err;
4813        }
4814
4815        if (driver_config->config_dev_cnt &&
4816           (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4817                vxge_debug_init(VXGE_ERR,
4818                        "%s: Configured %d of %d devices",
4819                        VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4820                        driver_config->total_dev_cnt);
4821err:
4822        return ret;
4823}
4824
4825static void __exit
4826vxge_closer(void)
4827{
4828        pci_unregister_driver(&vxge_driver);
4829        kfree(driver_config);
4830}
4831module_init(vxge_starter);
4832module_exit(vxge_closer);
4833