linux/drivers/net/ethernet/broadcom/cnic.c
<<
>>
Prefs
   1/* cnic.c: QLogic CNIC core network driver.
   2 *
   3 * Copyright (c) 2006-2014 Broadcom Corporation
   4 * Copyright (c) 2014 QLogic Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
  11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
  12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/module.h>
  18
  19#include <linux/kernel.h>
  20#include <linux/errno.h>
  21#include <linux/list.h>
  22#include <linux/slab.h>
  23#include <linux/pci.h>
  24#include <linux/init.h>
  25#include <linux/netdevice.h>
  26#include <linux/uio_driver.h>
  27#include <linux/in.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/delay.h>
  30#include <linux/ethtool.h>
  31#include <linux/if_vlan.h>
  32#include <linux/prefetch.h>
  33#include <linux/random.h>
  34#if IS_ENABLED(CONFIG_VLAN_8021Q)
  35#define BCM_VLAN 1
  36#endif
  37#include <net/ip.h>
  38#include <net/tcp.h>
  39#include <net/route.h>
  40#include <net/ipv6.h>
  41#include <net/ip6_route.h>
  42#include <net/ip6_checksum.h>
  43#include <scsi/iscsi_if.h>
  44
  45#define BCM_CNIC        1
  46#include "cnic_if.h"
  47#include "bnx2.h"
  48#include "bnx2x/bnx2x.h"
  49#include "bnx2x/bnx2x_reg.h"
  50#include "bnx2x/bnx2x_fw_defs.h"
  51#include "bnx2x/bnx2x_hsi.h"
  52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
  53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
  54#include "../../../scsi/bnx2fc/bnx2fc_constants.h"
  55#include "cnic.h"
  56#include "cnic_defs.h"
  57
  58#define CNIC_MODULE_NAME        "cnic"
  59
  60static char version[] =
  61        "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
  62
  63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
  64              "Chen (zongxi@broadcom.com");
  65MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver");
  66MODULE_LICENSE("GPL");
  67MODULE_VERSION(CNIC_MODULE_VERSION);
  68
  69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
  70static LIST_HEAD(cnic_dev_list);
  71static LIST_HEAD(cnic_udev_list);
  72static DEFINE_RWLOCK(cnic_dev_lock);
  73static DEFINE_MUTEX(cnic_lock);
  74
  75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
  76
  77/* helper function, assuming cnic_lock is held */
  78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
  79{
  80        return rcu_dereference_protected(cnic_ulp_tbl[type],
  81                                         lockdep_is_held(&cnic_lock));
  82}
  83
  84static int cnic_service_bnx2(void *, void *);
  85static int cnic_service_bnx2x(void *, void *);
  86static int cnic_ctl(void *, struct cnic_ctl_info *);
  87
  88static struct cnic_ops cnic_bnx2_ops = {
  89        .cnic_owner     = THIS_MODULE,
  90        .cnic_handler   = cnic_service_bnx2,
  91        .cnic_ctl       = cnic_ctl,
  92};
  93
  94static struct cnic_ops cnic_bnx2x_ops = {
  95        .cnic_owner     = THIS_MODULE,
  96        .cnic_handler   = cnic_service_bnx2x,
  97        .cnic_ctl       = cnic_ctl,
  98};
  99
 100static struct workqueue_struct *cnic_wq;
 101
 102static void cnic_shutdown_rings(struct cnic_dev *);
 103static void cnic_init_rings(struct cnic_dev *);
 104static int cnic_cm_set_pg(struct cnic_sock *);
 105
 106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
 107{
 108        struct cnic_uio_dev *udev = uinfo->priv;
 109        struct cnic_dev *dev;
 110
 111        if (!capable(CAP_NET_ADMIN))
 112                return -EPERM;
 113
 114        if (udev->uio_dev != -1)
 115                return -EBUSY;
 116
 117        rtnl_lock();
 118        dev = udev->dev;
 119
 120        if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
 121                rtnl_unlock();
 122                return -ENODEV;
 123        }
 124
 125        udev->uio_dev = iminor(inode);
 126
 127        cnic_shutdown_rings(dev);
 128        cnic_init_rings(dev);
 129        rtnl_unlock();
 130
 131        return 0;
 132}
 133
 134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
 135{
 136        struct cnic_uio_dev *udev = uinfo->priv;
 137
 138        udev->uio_dev = -1;
 139        return 0;
 140}
 141
 142static inline void cnic_hold(struct cnic_dev *dev)
 143{
 144        atomic_inc(&dev->ref_count);
 145}
 146
 147static inline void cnic_put(struct cnic_dev *dev)
 148{
 149        atomic_dec(&dev->ref_count);
 150}
 151
 152static inline void csk_hold(struct cnic_sock *csk)
 153{
 154        atomic_inc(&csk->ref_count);
 155}
 156
 157static inline void csk_put(struct cnic_sock *csk)
 158{
 159        atomic_dec(&csk->ref_count);
 160}
 161
 162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
 163{
 164        struct cnic_dev *cdev;
 165
 166        read_lock(&cnic_dev_lock);
 167        list_for_each_entry(cdev, &cnic_dev_list, list) {
 168                if (netdev == cdev->netdev) {
 169                        cnic_hold(cdev);
 170                        read_unlock(&cnic_dev_lock);
 171                        return cdev;
 172                }
 173        }
 174        read_unlock(&cnic_dev_lock);
 175        return NULL;
 176}
 177
 178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
 179{
 180        atomic_inc(&ulp_ops->ref_count);
 181}
 182
 183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
 184{
 185        atomic_dec(&ulp_ops->ref_count);
 186}
 187
 188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
 189{
 190        struct cnic_local *cp = dev->cnic_priv;
 191        struct cnic_eth_dev *ethdev = cp->ethdev;
 192        struct drv_ctl_info info;
 193        struct drv_ctl_io *io = &info.data.io;
 194
 195        info.cmd = DRV_CTL_CTX_WR_CMD;
 196        io->cid_addr = cid_addr;
 197        io->offset = off;
 198        io->data = val;
 199        ethdev->drv_ctl(dev->netdev, &info);
 200}
 201
 202static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
 203{
 204        struct cnic_local *cp = dev->cnic_priv;
 205        struct cnic_eth_dev *ethdev = cp->ethdev;
 206        struct drv_ctl_info info;
 207        struct drv_ctl_io *io = &info.data.io;
 208
 209        info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 210        io->offset = off;
 211        io->dma_addr = addr;
 212        ethdev->drv_ctl(dev->netdev, &info);
 213}
 214
 215static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
 216{
 217        struct cnic_local *cp = dev->cnic_priv;
 218        struct cnic_eth_dev *ethdev = cp->ethdev;
 219        struct drv_ctl_info info;
 220        struct drv_ctl_l2_ring *ring = &info.data.ring;
 221
 222        if (start)
 223                info.cmd = DRV_CTL_START_L2_CMD;
 224        else
 225                info.cmd = DRV_CTL_STOP_L2_CMD;
 226
 227        ring->cid = cid;
 228        ring->client_id = cl_id;
 229        ethdev->drv_ctl(dev->netdev, &info);
 230}
 231
 232static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
 233{
 234        struct cnic_local *cp = dev->cnic_priv;
 235        struct cnic_eth_dev *ethdev = cp->ethdev;
 236        struct drv_ctl_info info;
 237        struct drv_ctl_io *io = &info.data.io;
 238
 239        info.cmd = DRV_CTL_IO_WR_CMD;
 240        io->offset = off;
 241        io->data = val;
 242        ethdev->drv_ctl(dev->netdev, &info);
 243}
 244
 245static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
 246{
 247        struct cnic_local *cp = dev->cnic_priv;
 248        struct cnic_eth_dev *ethdev = cp->ethdev;
 249        struct drv_ctl_info info;
 250        struct drv_ctl_io *io = &info.data.io;
 251
 252        info.cmd = DRV_CTL_IO_RD_CMD;
 253        io->offset = off;
 254        ethdev->drv_ctl(dev->netdev, &info);
 255        return io->data;
 256}
 257
 258static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
 259{
 260        struct cnic_local *cp = dev->cnic_priv;
 261        struct cnic_eth_dev *ethdev = cp->ethdev;
 262        struct drv_ctl_info info;
 263        struct fcoe_capabilities *fcoe_cap =
 264                &info.data.register_data.fcoe_features;
 265
 266        if (reg) {
 267                info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 268                if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
 269                        memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
 270        } else {
 271                info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
 272        }
 273
 274        info.data.ulp_type = ulp_type;
 275        ethdev->drv_ctl(dev->netdev, &info);
 276}
 277
 278static int cnic_in_use(struct cnic_sock *csk)
 279{
 280        return test_bit(SK_F_INUSE, &csk->flags);
 281}
 282
 283static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
 284{
 285        struct cnic_local *cp = dev->cnic_priv;
 286        struct cnic_eth_dev *ethdev = cp->ethdev;
 287        struct drv_ctl_info info;
 288
 289        info.cmd = cmd;
 290        info.data.credit.credit_count = count;
 291        ethdev->drv_ctl(dev->netdev, &info);
 292}
 293
 294static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
 295{
 296        u32 i;
 297
 298        if (!cp->ctx_tbl)
 299                return -EINVAL;
 300
 301        for (i = 0; i < cp->max_cid_space; i++) {
 302                if (cp->ctx_tbl[i].cid == cid) {
 303                        *l5_cid = i;
 304                        return 0;
 305                }
 306        }
 307        return -EINVAL;
 308}
 309
 310static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
 311                           struct cnic_sock *csk)
 312{
 313        struct iscsi_path path_req;
 314        char *buf = NULL;
 315        u16 len = 0;
 316        u32 msg_type = ISCSI_KEVENT_IF_DOWN;
 317        struct cnic_ulp_ops *ulp_ops;
 318        struct cnic_uio_dev *udev = cp->udev;
 319        int rc = 0, retry = 0;
 320
 321        if (!udev || udev->uio_dev == -1)
 322                return -ENODEV;
 323
 324        if (csk) {
 325                len = sizeof(path_req);
 326                buf = (char *) &path_req;
 327                memset(&path_req, 0, len);
 328
 329                msg_type = ISCSI_KEVENT_PATH_REQ;
 330                path_req.handle = (u64) csk->l5_cid;
 331                if (test_bit(SK_F_IPV6, &csk->flags)) {
 332                        memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
 333                               sizeof(struct in6_addr));
 334                        path_req.ip_addr_len = 16;
 335                } else {
 336                        memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
 337                               sizeof(struct in_addr));
 338                        path_req.ip_addr_len = 4;
 339                }
 340                path_req.vlan_id = csk->vlan_id;
 341                path_req.pmtu = csk->mtu;
 342        }
 343
 344        while (retry < 3) {
 345                rc = 0;
 346                rcu_read_lock();
 347                ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
 348                if (ulp_ops)
 349                        rc = ulp_ops->iscsi_nl_send_msg(
 350                                cp->ulp_handle[CNIC_ULP_ISCSI],
 351                                msg_type, buf, len);
 352                rcu_read_unlock();
 353                if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
 354                        break;
 355
 356                msleep(100);
 357                retry++;
 358        }
 359        return rc;
 360}
 361
 362static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
 363
 364static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 365                                  char *buf, u16 len)
 366{
 367        int rc = -EINVAL;
 368
 369        switch (msg_type) {
 370        case ISCSI_UEVENT_PATH_UPDATE: {
 371                struct cnic_local *cp;
 372                u32 l5_cid;
 373                struct cnic_sock *csk;
 374                struct iscsi_path *path_resp;
 375
 376                if (len < sizeof(*path_resp))
 377                        break;
 378
 379                path_resp = (struct iscsi_path *) buf;
 380                cp = dev->cnic_priv;
 381                l5_cid = (u32) path_resp->handle;
 382                if (l5_cid >= MAX_CM_SK_TBL_SZ)
 383                        break;
 384
 385                rcu_read_lock();
 386                if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
 387                        rc = -ENODEV;
 388                        rcu_read_unlock();
 389                        break;
 390                }
 391                csk = &cp->csk_tbl[l5_cid];
 392                csk_hold(csk);
 393                if (cnic_in_use(csk) &&
 394                    test_bit(SK_F_CONNECT_START, &csk->flags)) {
 395
 396                        csk->vlan_id = path_resp->vlan_id;
 397
 398                        memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
 399                        if (test_bit(SK_F_IPV6, &csk->flags))
 400                                memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
 401                                       sizeof(struct in6_addr));
 402                        else
 403                                memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
 404                                       sizeof(struct in_addr));
 405
 406                        if (is_valid_ether_addr(csk->ha)) {
 407                                cnic_cm_set_pg(csk);
 408                        } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
 409                                !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 410
 411                                cnic_cm_upcall(cp, csk,
 412                                        L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
 413                                clear_bit(SK_F_CONNECT_START, &csk->flags);
 414                        }
 415                }
 416                csk_put(csk);
 417                rcu_read_unlock();
 418                rc = 0;
 419        }
 420        }
 421
 422        return rc;
 423}
 424
 425static int cnic_offld_prep(struct cnic_sock *csk)
 426{
 427        if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 428                return 0;
 429
 430        if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
 431                clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
 432                return 0;
 433        }
 434
 435        return 1;
 436}
 437
 438static int cnic_close_prep(struct cnic_sock *csk)
 439{
 440        clear_bit(SK_F_CONNECT_START, &csk->flags);
 441        smp_mb__after_atomic();
 442
 443        if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 444                while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 445                        msleep(1);
 446
 447                return 1;
 448        }
 449        return 0;
 450}
 451
 452static int cnic_abort_prep(struct cnic_sock *csk)
 453{
 454        clear_bit(SK_F_CONNECT_START, &csk->flags);
 455        smp_mb__after_atomic();
 456
 457        while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
 458                msleep(1);
 459
 460        if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
 461                csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
 462                return 1;
 463        }
 464
 465        return 0;
 466}
 467
 468int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
 469{
 470        struct cnic_dev *dev;
 471
 472        if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 473                pr_err("%s: Bad type %d\n", __func__, ulp_type);
 474                return -EINVAL;
 475        }
 476        mutex_lock(&cnic_lock);
 477        if (cnic_ulp_tbl_prot(ulp_type)) {
 478                pr_err("%s: Type %d has already been registered\n",
 479                       __func__, ulp_type);
 480                mutex_unlock(&cnic_lock);
 481                return -EBUSY;
 482        }
 483
 484        read_lock(&cnic_dev_lock);
 485        list_for_each_entry(dev, &cnic_dev_list, list) {
 486                struct cnic_local *cp = dev->cnic_priv;
 487
 488                clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
 489        }
 490        read_unlock(&cnic_dev_lock);
 491
 492        atomic_set(&ulp_ops->ref_count, 0);
 493        rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
 494        mutex_unlock(&cnic_lock);
 495
 496        /* Prevent race conditions with netdev_event */
 497        rtnl_lock();
 498        list_for_each_entry(dev, &cnic_dev_list, list) {
 499                struct cnic_local *cp = dev->cnic_priv;
 500
 501                if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
 502                        ulp_ops->cnic_init(dev);
 503        }
 504        rtnl_unlock();
 505
 506        return 0;
 507}
 508
 509int cnic_unregister_driver(int ulp_type)
 510{
 511        struct cnic_dev *dev;
 512        struct cnic_ulp_ops *ulp_ops;
 513        int i = 0;
 514
 515        if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 516                pr_err("%s: Bad type %d\n", __func__, ulp_type);
 517                return -EINVAL;
 518        }
 519        mutex_lock(&cnic_lock);
 520        ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 521        if (!ulp_ops) {
 522                pr_err("%s: Type %d has not been registered\n",
 523                       __func__, ulp_type);
 524                goto out_unlock;
 525        }
 526        read_lock(&cnic_dev_lock);
 527        list_for_each_entry(dev, &cnic_dev_list, list) {
 528                struct cnic_local *cp = dev->cnic_priv;
 529
 530                if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 531                        pr_err("%s: Type %d still has devices registered\n",
 532                               __func__, ulp_type);
 533                        read_unlock(&cnic_dev_lock);
 534                        goto out_unlock;
 535                }
 536        }
 537        read_unlock(&cnic_dev_lock);
 538
 539        RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 540
 541        mutex_unlock(&cnic_lock);
 542        synchronize_rcu();
 543        while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
 544                msleep(100);
 545                i++;
 546        }
 547
 548        if (atomic_read(&ulp_ops->ref_count) != 0)
 549                pr_warn("%s: Failed waiting for ref count to go to zero\n",
 550                        __func__);
 551        return 0;
 552
 553out_unlock:
 554        mutex_unlock(&cnic_lock);
 555        return -EINVAL;
 556}
 557
 558static int cnic_start_hw(struct cnic_dev *);
 559static void cnic_stop_hw(struct cnic_dev *);
 560
 561static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 562                                void *ulp_ctx)
 563{
 564        struct cnic_local *cp = dev->cnic_priv;
 565        struct cnic_ulp_ops *ulp_ops;
 566
 567        if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 568                pr_err("%s: Bad type %d\n", __func__, ulp_type);
 569                return -EINVAL;
 570        }
 571        mutex_lock(&cnic_lock);
 572        if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
 573                pr_err("%s: Driver with type %d has not been registered\n",
 574                       __func__, ulp_type);
 575                mutex_unlock(&cnic_lock);
 576                return -EAGAIN;
 577        }
 578        if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 579                pr_err("%s: Type %d has already been registered to this device\n",
 580                       __func__, ulp_type);
 581                mutex_unlock(&cnic_lock);
 582                return -EBUSY;
 583        }
 584
 585        clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
 586        cp->ulp_handle[ulp_type] = ulp_ctx;
 587        ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 588        rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
 589        cnic_hold(dev);
 590
 591        if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
 592                if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
 593                        ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
 594
 595        mutex_unlock(&cnic_lock);
 596
 597        cnic_ulp_ctl(dev, ulp_type, true);
 598
 599        return 0;
 600
 601}
 602EXPORT_SYMBOL(cnic_register_driver);
 603
 604static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
 605{
 606        struct cnic_local *cp = dev->cnic_priv;
 607        int i = 0;
 608
 609        if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
 610                pr_err("%s: Bad type %d\n", __func__, ulp_type);
 611                return -EINVAL;
 612        }
 613
 614        if (ulp_type == CNIC_ULP_ISCSI)
 615                cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
 616
 617        mutex_lock(&cnic_lock);
 618        if (rcu_dereference(cp->ulp_ops[ulp_type])) {
 619                RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
 620                cnic_put(dev);
 621        } else {
 622                pr_err("%s: device not registered to this ulp type %d\n",
 623                       __func__, ulp_type);
 624                mutex_unlock(&cnic_lock);
 625                return -EINVAL;
 626        }
 627        mutex_unlock(&cnic_lock);
 628
 629        if (ulp_type == CNIC_ULP_FCOE)
 630                dev->fcoe_cap = NULL;
 631
 632        synchronize_rcu();
 633
 634        while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
 635               i < 20) {
 636                msleep(100);
 637                i++;
 638        }
 639        if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 640                netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 641
 642        cnic_ulp_ctl(dev, ulp_type, false);
 643
 644        return 0;
 645}
 646EXPORT_SYMBOL(cnic_unregister_driver);
 647
 648static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
 649                            u32 next)
 650{
 651        id_tbl->start = start_id;
 652        id_tbl->max = size;
 653        id_tbl->next = next;
 654        spin_lock_init(&id_tbl->lock);
 655        id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
 656        if (!id_tbl->table)
 657                return -ENOMEM;
 658
 659        return 0;
 660}
 661
 662static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
 663{
 664        kfree(id_tbl->table);
 665        id_tbl->table = NULL;
 666}
 667
 668static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
 669{
 670        int ret = -1;
 671
 672        id -= id_tbl->start;
 673        if (id >= id_tbl->max)
 674                return ret;
 675
 676        spin_lock(&id_tbl->lock);
 677        if (!test_bit(id, id_tbl->table)) {
 678                set_bit(id, id_tbl->table);
 679                ret = 0;
 680        }
 681        spin_unlock(&id_tbl->lock);
 682        return ret;
 683}
 684
 685/* Returns -1 if not successful */
 686static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
 687{
 688        u32 id;
 689
 690        spin_lock(&id_tbl->lock);
 691        id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
 692        if (id >= id_tbl->max) {
 693                id = -1;
 694                if (id_tbl->next != 0) {
 695                        id = find_first_zero_bit(id_tbl->table, id_tbl->next);
 696                        if (id >= id_tbl->next)
 697                                id = -1;
 698                }
 699        }
 700
 701        if (id < id_tbl->max) {
 702                set_bit(id, id_tbl->table);
 703                id_tbl->next = (id + 1) & (id_tbl->max - 1);
 704                id += id_tbl->start;
 705        }
 706
 707        spin_unlock(&id_tbl->lock);
 708
 709        return id;
 710}
 711
 712static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
 713{
 714        if (id == -1)
 715                return;
 716
 717        id -= id_tbl->start;
 718        if (id >= id_tbl->max)
 719                return;
 720
 721        clear_bit(id, id_tbl->table);
 722}
 723
 724static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
 725{
 726        int i;
 727
 728        if (!dma->pg_arr)
 729                return;
 730
 731        for (i = 0; i < dma->num_pages; i++) {
 732                if (dma->pg_arr[i]) {
 733                        dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
 734                                          dma->pg_arr[i], dma->pg_map_arr[i]);
 735                        dma->pg_arr[i] = NULL;
 736                }
 737        }
 738        if (dma->pgtbl) {
 739                dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 740                                  dma->pgtbl, dma->pgtbl_map);
 741                dma->pgtbl = NULL;
 742        }
 743        kfree(dma->pg_arr);
 744        dma->pg_arr = NULL;
 745        dma->num_pages = 0;
 746}
 747
 748static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 749{
 750        int i;
 751        __le32 *page_table = (__le32 *) dma->pgtbl;
 752
 753        for (i = 0; i < dma->num_pages; i++) {
 754                /* Each entry needs to be in big endian format. */
 755                *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 756                page_table++;
 757                *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 758                page_table++;
 759        }
 760}
 761
 762static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 763{
 764        int i;
 765        __le32 *page_table = (__le32 *) dma->pgtbl;
 766
 767        for (i = 0; i < dma->num_pages; i++) {
 768                /* Each entry needs to be in little endian format. */
 769                *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 770                page_table++;
 771                *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 772                page_table++;
 773        }
 774}
 775
 776static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
 777                          int pages, int use_pg_tbl)
 778{
 779        int i, size;
 780        struct cnic_local *cp = dev->cnic_priv;
 781
 782        size = pages * (sizeof(void *) + sizeof(dma_addr_t));
 783        dma->pg_arr = kzalloc(size, GFP_ATOMIC);
 784        if (dma->pg_arr == NULL)
 785                return -ENOMEM;
 786
 787        dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
 788        dma->num_pages = pages;
 789
 790        for (i = 0; i < pages; i++) {
 791                dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
 792                                                    CNIC_PAGE_SIZE,
 793                                                    &dma->pg_map_arr[i],
 794                                                    GFP_ATOMIC);
 795                if (dma->pg_arr[i] == NULL)
 796                        goto error;
 797        }
 798        if (!use_pg_tbl)
 799                return 0;
 800
 801        dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
 802                          ~(CNIC_PAGE_SIZE - 1);
 803        dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
 804                                        &dma->pgtbl_map, GFP_ATOMIC);
 805        if (dma->pgtbl == NULL)
 806                goto error;
 807
 808        cp->setup_pgtbl(dev, dma);
 809
 810        return 0;
 811
 812error:
 813        cnic_free_dma(dev, dma);
 814        return -ENOMEM;
 815}
 816
 817static void cnic_free_context(struct cnic_dev *dev)
 818{
 819        struct cnic_local *cp = dev->cnic_priv;
 820        int i;
 821
 822        for (i = 0; i < cp->ctx_blks; i++) {
 823                if (cp->ctx_arr[i].ctx) {
 824                        dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
 825                                          cp->ctx_arr[i].ctx,
 826                                          cp->ctx_arr[i].mapping);
 827                        cp->ctx_arr[i].ctx = NULL;
 828                }
 829        }
 830}
 831
 832static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
 833{
 834        if (udev->l2_buf) {
 835                dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
 836                                  udev->l2_buf, udev->l2_buf_map);
 837                udev->l2_buf = NULL;
 838        }
 839
 840        if (udev->l2_ring) {
 841                dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
 842                                  udev->l2_ring, udev->l2_ring_map);
 843                udev->l2_ring = NULL;
 844        }
 845
 846}
 847
 848static void __cnic_free_uio(struct cnic_uio_dev *udev)
 849{
 850        uio_unregister_device(&udev->cnic_uinfo);
 851
 852        __cnic_free_uio_rings(udev);
 853
 854        pci_dev_put(udev->pdev);
 855        kfree(udev);
 856}
 857
 858static void cnic_free_uio(struct cnic_uio_dev *udev)
 859{
 860        if (!udev)
 861                return;
 862
 863        write_lock(&cnic_dev_lock);
 864        list_del_init(&udev->list);
 865        write_unlock(&cnic_dev_lock);
 866        __cnic_free_uio(udev);
 867}
 868
 869static void cnic_free_resc(struct cnic_dev *dev)
 870{
 871        struct cnic_local *cp = dev->cnic_priv;
 872        struct cnic_uio_dev *udev = cp->udev;
 873
 874        if (udev) {
 875                udev->dev = NULL;
 876                cp->udev = NULL;
 877                if (udev->uio_dev == -1)
 878                        __cnic_free_uio_rings(udev);
 879        }
 880
 881        cnic_free_context(dev);
 882        kfree(cp->ctx_arr);
 883        cp->ctx_arr = NULL;
 884        cp->ctx_blks = 0;
 885
 886        cnic_free_dma(dev, &cp->gbl_buf_info);
 887        cnic_free_dma(dev, &cp->kwq_info);
 888        cnic_free_dma(dev, &cp->kwq_16_data_info);
 889        cnic_free_dma(dev, &cp->kcq2.dma);
 890        cnic_free_dma(dev, &cp->kcq1.dma);
 891        kfree(cp->iscsi_tbl);
 892        cp->iscsi_tbl = NULL;
 893        kfree(cp->ctx_tbl);
 894        cp->ctx_tbl = NULL;
 895
 896        cnic_free_id_tbl(&cp->fcoe_cid_tbl);
 897        cnic_free_id_tbl(&cp->cid_tbl);
 898}
 899
 900static int cnic_alloc_context(struct cnic_dev *dev)
 901{
 902        struct cnic_local *cp = dev->cnic_priv;
 903
 904        if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
 905                int i, k, arr_size;
 906
 907                cp->ctx_blk_size = CNIC_PAGE_SIZE;
 908                cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
 909                arr_size = BNX2_MAX_CID / cp->cids_per_blk *
 910                           sizeof(struct cnic_ctx);
 911                cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
 912                if (cp->ctx_arr == NULL)
 913                        return -ENOMEM;
 914
 915                k = 0;
 916                for (i = 0; i < 2; i++) {
 917                        u32 j, reg, off, lo, hi;
 918
 919                        if (i == 0)
 920                                off = BNX2_PG_CTX_MAP;
 921                        else
 922                                off = BNX2_ISCSI_CTX_MAP;
 923
 924                        reg = cnic_reg_rd_ind(dev, off);
 925                        lo = reg >> 16;
 926                        hi = reg & 0xffff;
 927                        for (j = lo; j < hi; j += cp->cids_per_blk, k++)
 928                                cp->ctx_arr[k].cid = j;
 929                }
 930
 931                cp->ctx_blks = k;
 932                if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
 933                        cp->ctx_blks = 0;
 934                        return -ENOMEM;
 935                }
 936
 937                for (i = 0; i < cp->ctx_blks; i++) {
 938                        cp->ctx_arr[i].ctx =
 939                                dma_alloc_coherent(&dev->pcidev->dev,
 940                                                   CNIC_PAGE_SIZE,
 941                                                   &cp->ctx_arr[i].mapping,
 942                                                   GFP_KERNEL);
 943                        if (cp->ctx_arr[i].ctx == NULL)
 944                                return -ENOMEM;
 945                }
 946        }
 947        return 0;
 948}
 949
 950static u16 cnic_bnx2_next_idx(u16 idx)
 951{
 952        return idx + 1;
 953}
 954
 955static u16 cnic_bnx2_hw_idx(u16 idx)
 956{
 957        return idx;
 958}
 959
 960static u16 cnic_bnx2x_next_idx(u16 idx)
 961{
 962        idx++;
 963        if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 964                idx++;
 965
 966        return idx;
 967}
 968
 969static u16 cnic_bnx2x_hw_idx(u16 idx)
 970{
 971        if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
 972                idx++;
 973        return idx;
 974}
 975
 976static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
 977                          bool use_pg_tbl)
 978{
 979        int err, i, use_page_tbl = 0;
 980        struct kcqe **kcq;
 981
 982        if (use_pg_tbl)
 983                use_page_tbl = 1;
 984
 985        err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
 986        if (err)
 987                return err;
 988
 989        kcq = (struct kcqe **) info->dma.pg_arr;
 990        info->kcq = kcq;
 991
 992        info->next_idx = cnic_bnx2_next_idx;
 993        info->hw_idx = cnic_bnx2_hw_idx;
 994        if (use_pg_tbl)
 995                return 0;
 996
 997        info->next_idx = cnic_bnx2x_next_idx;
 998        info->hw_idx = cnic_bnx2x_hw_idx;
 999
1000        for (i = 0; i < KCQ_PAGE_CNT; i++) {
1001                struct bnx2x_bd_chain_next *next =
1002                        (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1003                int j = i + 1;
1004
1005                if (j >= KCQ_PAGE_CNT)
1006                        j = 0;
1007                next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1008                next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1009        }
1010        return 0;
1011}
1012
1013static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1014{
1015        struct cnic_local *cp = udev->dev->cnic_priv;
1016
1017        if (udev->l2_ring)
1018                return 0;
1019
1020        udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1021        udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1022                                           &udev->l2_ring_map,
1023                                           GFP_KERNEL | __GFP_COMP);
1024        if (!udev->l2_ring)
1025                return -ENOMEM;
1026
1027        udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1028        udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1029        udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1030                                          &udev->l2_buf_map,
1031                                          GFP_KERNEL | __GFP_COMP);
1032        if (!udev->l2_buf) {
1033                __cnic_free_uio_rings(udev);
1034                return -ENOMEM;
1035        }
1036
1037        return 0;
1038
1039}
1040
1041static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1042{
1043        struct cnic_local *cp = dev->cnic_priv;
1044        struct cnic_uio_dev *udev;
1045
1046        list_for_each_entry(udev, &cnic_udev_list, list) {
1047                if (udev->pdev == dev->pcidev) {
1048                        udev->dev = dev;
1049                        if (__cnic_alloc_uio_rings(udev, pages)) {
1050                                udev->dev = NULL;
1051                                return -ENOMEM;
1052                        }
1053                        cp->udev = udev;
1054                        return 0;
1055                }
1056        }
1057
1058        udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059        if (!udev)
1060                return -ENOMEM;
1061
1062        udev->uio_dev = -1;
1063
1064        udev->dev = dev;
1065        udev->pdev = dev->pcidev;
1066
1067        if (__cnic_alloc_uio_rings(udev, pages))
1068                goto err_udev;
1069
1070        list_add(&udev->list, &cnic_udev_list);
1071
1072        pci_dev_get(udev->pdev);
1073
1074        cp->udev = udev;
1075
1076        return 0;
1077
1078 err_udev:
1079        kfree(udev);
1080        return -ENOMEM;
1081}
1082
1083static int cnic_init_uio(struct cnic_dev *dev)
1084{
1085        struct cnic_local *cp = dev->cnic_priv;
1086        struct cnic_uio_dev *udev = cp->udev;
1087        struct uio_info *uinfo;
1088        int ret = 0;
1089
1090        if (!udev)
1091                return -ENOMEM;
1092
1093        uinfo = &udev->cnic_uinfo;
1094
1095        uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1096        uinfo->mem[0].internal_addr = dev->regview;
1097        uinfo->mem[0].memtype = UIO_MEM_PHYS;
1098
1099        if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1100                uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1101                                                     TX_MAX_TSS_RINGS + 1);
1102                uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1103                                        CNIC_PAGE_MASK;
1104                if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1105                        uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1106                else
1107                        uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1108
1109                uinfo->name = "bnx2_cnic";
1110        } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1111                uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1112
1113                uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1114                        CNIC_PAGE_MASK;
1115                uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1116
1117                uinfo->name = "bnx2x_cnic";
1118        }
1119
1120        uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1121
1122        uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1123        uinfo->mem[2].size = udev->l2_ring_size;
1124        uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1125
1126        uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1127        uinfo->mem[3].size = udev->l2_buf_size;
1128        uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1129
1130        uinfo->version = CNIC_MODULE_VERSION;
1131        uinfo->irq = UIO_IRQ_CUSTOM;
1132
1133        uinfo->open = cnic_uio_open;
1134        uinfo->release = cnic_uio_close;
1135
1136        if (udev->uio_dev == -1) {
1137                if (!uinfo->priv) {
1138                        uinfo->priv = udev;
1139
1140                        ret = uio_register_device(&udev->pdev->dev, uinfo);
1141                }
1142        } else {
1143                cnic_init_rings(dev);
1144        }
1145
1146        return ret;
1147}
1148
1149static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1150{
1151        struct cnic_local *cp = dev->cnic_priv;
1152        int ret;
1153
1154        ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1155        if (ret)
1156                goto error;
1157        cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1158
1159        ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1160        if (ret)
1161                goto error;
1162
1163        ret = cnic_alloc_context(dev);
1164        if (ret)
1165                goto error;
1166
1167        ret = cnic_alloc_uio_rings(dev, 2);
1168        if (ret)
1169                goto error;
1170
1171        ret = cnic_init_uio(dev);
1172        if (ret)
1173                goto error;
1174
1175        return 0;
1176
1177error:
1178        cnic_free_resc(dev);
1179        return ret;
1180}
1181
1182static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1183{
1184        struct cnic_local *cp = dev->cnic_priv;
1185        struct bnx2x *bp = netdev_priv(dev->netdev);
1186        int ctx_blk_size = cp->ethdev->ctx_blk_size;
1187        int total_mem, blks, i;
1188
1189        total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1190        blks = total_mem / ctx_blk_size;
1191        if (total_mem % ctx_blk_size)
1192                blks++;
1193
1194        if (blks > cp->ethdev->ctx_tbl_len)
1195                return -ENOMEM;
1196
1197        cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1198        if (cp->ctx_arr == NULL)
1199                return -ENOMEM;
1200
1201        cp->ctx_blks = blks;
1202        cp->ctx_blk_size = ctx_blk_size;
1203        if (!CHIP_IS_E1(bp))
1204                cp->ctx_align = 0;
1205        else
1206                cp->ctx_align = ctx_blk_size;
1207
1208        cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1209
1210        for (i = 0; i < blks; i++) {
1211                cp->ctx_arr[i].ctx =
1212                        dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1213                                           &cp->ctx_arr[i].mapping,
1214                                           GFP_KERNEL);
1215                if (cp->ctx_arr[i].ctx == NULL)
1216                        return -ENOMEM;
1217
1218                if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1219                        if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1220                                cnic_free_context(dev);
1221                                cp->ctx_blk_size += cp->ctx_align;
1222                                i = -1;
1223                                continue;
1224                        }
1225                }
1226        }
1227        return 0;
1228}
1229
1230static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1231{
1232        struct cnic_local *cp = dev->cnic_priv;
1233        struct bnx2x *bp = netdev_priv(dev->netdev);
1234        struct cnic_eth_dev *ethdev = cp->ethdev;
1235        u32 start_cid = ethdev->starting_cid;
1236        int i, j, n, ret, pages;
1237        struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1238
1239        cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1240        cp->iscsi_start_cid = start_cid;
1241        cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1242
1243        if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1244                cp->max_cid_space += dev->max_fcoe_conn;
1245                cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1246                if (!cp->fcoe_init_cid)
1247                        cp->fcoe_init_cid = 0x10;
1248        }
1249
1250        cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1251                                GFP_KERNEL);
1252        if (!cp->iscsi_tbl)
1253                goto error;
1254
1255        cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1256                                cp->max_cid_space, GFP_KERNEL);
1257        if (!cp->ctx_tbl)
1258                goto error;
1259
1260        for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1261                cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1262                cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1263        }
1264
1265        for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1266                cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1267
1268        pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1269                CNIC_PAGE_SIZE;
1270
1271        ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1272        if (ret)
1273                return -ENOMEM;
1274
1275        n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1276        for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1277                long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1278
1279                cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1280                cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1281                                                   off;
1282
1283                if ((i % n) == (n - 1))
1284                        j++;
1285        }
1286
1287        ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1288        if (ret)
1289                goto error;
1290
1291        if (CNIC_SUPPORTS_FCOE(bp)) {
1292                ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1293                if (ret)
1294                        goto error;
1295        }
1296
1297        pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1298        ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1299        if (ret)
1300                goto error;
1301
1302        ret = cnic_alloc_bnx2x_context(dev);
1303        if (ret)
1304                goto error;
1305
1306        if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1307                return 0;
1308
1309        cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1310
1311        cp->l2_rx_ring_size = 15;
1312
1313        ret = cnic_alloc_uio_rings(dev, 4);
1314        if (ret)
1315                goto error;
1316
1317        ret = cnic_init_uio(dev);
1318        if (ret)
1319                goto error;
1320
1321        return 0;
1322
1323error:
1324        cnic_free_resc(dev);
1325        return -ENOMEM;
1326}
1327
1328static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1329{
1330        return cp->max_kwq_idx -
1331                ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1332}
1333
1334static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1335                                  u32 num_wqes)
1336{
1337        struct cnic_local *cp = dev->cnic_priv;
1338        struct kwqe *prod_qe;
1339        u16 prod, sw_prod, i;
1340
1341        if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1342                return -EAGAIN;         /* bnx2 is down */
1343
1344        spin_lock_bh(&cp->cnic_ulp_lock);
1345        if (num_wqes > cnic_kwq_avail(cp) &&
1346            !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1347                spin_unlock_bh(&cp->cnic_ulp_lock);
1348                return -EAGAIN;
1349        }
1350
1351        clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1352
1353        prod = cp->kwq_prod_idx;
1354        sw_prod = prod & MAX_KWQ_IDX;
1355        for (i = 0; i < num_wqes; i++) {
1356                prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1357                memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1358                prod++;
1359                sw_prod = prod & MAX_KWQ_IDX;
1360        }
1361        cp->kwq_prod_idx = prod;
1362
1363        CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1364
1365        spin_unlock_bh(&cp->cnic_ulp_lock);
1366        return 0;
1367}
1368
1369static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1370                                   union l5cm_specific_data *l5_data)
1371{
1372        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1373        dma_addr_t map;
1374
1375        map = ctx->kwqe_data_mapping;
1376        l5_data->phy_address.lo = (u64) map & 0xffffffff;
1377        l5_data->phy_address.hi = (u64) map >> 32;
1378        return ctx->kwqe_data;
1379}
1380
1381static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1382                                u32 type, union l5cm_specific_data *l5_data)
1383{
1384        struct cnic_local *cp = dev->cnic_priv;
1385        struct bnx2x *bp = netdev_priv(dev->netdev);
1386        struct l5cm_spe kwqe;
1387        struct kwqe_16 *kwq[1];
1388        u16 type_16;
1389        int ret;
1390
1391        kwqe.hdr.conn_and_cmd_data =
1392                cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1393                             BNX2X_HW_CID(bp, cid)));
1394
1395        type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1396        type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1397                   SPE_HDR_FUNCTION_ID;
1398
1399        kwqe.hdr.type = cpu_to_le16(type_16);
1400        kwqe.hdr.reserved1 = 0;
1401        kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1402        kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1403
1404        kwq[0] = (struct kwqe_16 *) &kwqe;
1405
1406        spin_lock_bh(&cp->cnic_ulp_lock);
1407        ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1408        spin_unlock_bh(&cp->cnic_ulp_lock);
1409
1410        if (ret == 1)
1411                return 0;
1412
1413        return ret;
1414}
1415
1416static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1417                                   struct kcqe *cqes[], u32 num_cqes)
1418{
1419        struct cnic_local *cp = dev->cnic_priv;
1420        struct cnic_ulp_ops *ulp_ops;
1421
1422        rcu_read_lock();
1423        ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1424        if (likely(ulp_ops)) {
1425                ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1426                                          cqes, num_cqes);
1427        }
1428        rcu_read_unlock();
1429}
1430
1431static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1432                                       int en_tcp_dack)
1433{
1434        struct bnx2x *bp = netdev_priv(dev->netdev);
1435        u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1436        u16 tstorm_flags = 0;
1437
1438        if (time_stamps) {
1439                xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1440                tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1441        }
1442        if (en_tcp_dack)
1443                tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1444
1445        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1446                 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1447
1448        CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1449                  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1450}
1451
1452static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1453{
1454        struct cnic_local *cp = dev->cnic_priv;
1455        struct bnx2x *bp = netdev_priv(dev->netdev);
1456        struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1457        int hq_bds, pages;
1458        u32 pfid = bp->pfid;
1459
1460        cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1461        cp->num_ccells = req1->num_ccells_per_conn;
1462        cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1463                              cp->num_iscsi_tasks;
1464        cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1465                        BNX2X_ISCSI_R2TQE_SIZE;
1466        cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1467        pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1468        hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1469        cp->num_cqs = req1->num_cqs;
1470
1471        if (!dev->max_iscsi_conn)
1472                return 0;
1473
1474        /* init Tstorm RAM */
1475        CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1476                  req1->rq_num_wqes);
1477        CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1478                  CNIC_PAGE_SIZE);
1479        CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1480                 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1481        CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1482                  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1483                  req1->num_tasks_per_conn);
1484
1485        /* init Ustorm RAM */
1486        CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1487                  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1488                  req1->rq_buffer_size);
1489        CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1490                  CNIC_PAGE_SIZE);
1491        CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1492                 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1493        CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1494                  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1495                  req1->num_tasks_per_conn);
1496        CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1497                  req1->rq_num_wqes);
1498        CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1499                  req1->cq_num_wqes);
1500        CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1501                  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1502
1503        /* init Xstorm RAM */
1504        CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1505                  CNIC_PAGE_SIZE);
1506        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1507                 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1508        CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1509                  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1510                  req1->num_tasks_per_conn);
1511        CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1512                  hq_bds);
1513        CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1514                  req1->num_tasks_per_conn);
1515        CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1516                  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1517
1518        /* init Cstorm RAM */
1519        CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1520                  CNIC_PAGE_SIZE);
1521        CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1522                 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1523        CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1524                  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1525                  req1->num_tasks_per_conn);
1526        CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1527                  req1->cq_num_wqes);
1528        CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1529                  hq_bds);
1530
1531        cnic_bnx2x_set_tcp_options(dev,
1532                        req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1533                        req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1534
1535        return 0;
1536}
1537
1538static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1539{
1540        struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1541        struct bnx2x *bp = netdev_priv(dev->netdev);
1542        u32 pfid = bp->pfid;
1543        struct iscsi_kcqe kcqe;
1544        struct kcqe *cqes[1];
1545
1546        memset(&kcqe, 0, sizeof(kcqe));
1547        if (!dev->max_iscsi_conn) {
1548                kcqe.completion_status =
1549                        ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1550                goto done;
1551        }
1552
1553        CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1554                TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1555        CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1556                TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1557                req2->error_bit_map[1]);
1558
1559        CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1560                  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1561        CNIC_WR(dev, BAR_USTRORM_INTMEM +
1562                USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1563        CNIC_WR(dev, BAR_USTRORM_INTMEM +
1564                USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1565                req2->error_bit_map[1]);
1566
1567        CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1568                  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1569
1570        kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1571
1572done:
1573        kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1574        cqes[0] = (struct kcqe *) &kcqe;
1575        cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1576
1577        return 0;
1578}
1579
1580static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1581{
1582        struct cnic_local *cp = dev->cnic_priv;
1583        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1584
1585        if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1586                struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1587
1588                cnic_free_dma(dev, &iscsi->hq_info);
1589                cnic_free_dma(dev, &iscsi->r2tq_info);
1590                cnic_free_dma(dev, &iscsi->task_array_info);
1591                cnic_free_id(&cp->cid_tbl, ctx->cid);
1592        } else {
1593                cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1594        }
1595
1596        ctx->cid = 0;
1597}
1598
1599static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1600{
1601        u32 cid;
1602        int ret, pages;
1603        struct cnic_local *cp = dev->cnic_priv;
1604        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1605        struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1606
1607        if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1608                cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1609                if (cid == -1) {
1610                        ret = -ENOMEM;
1611                        goto error;
1612                }
1613                ctx->cid = cid;
1614                return 0;
1615        }
1616
1617        cid = cnic_alloc_new_id(&cp->cid_tbl);
1618        if (cid == -1) {
1619                ret = -ENOMEM;
1620                goto error;
1621        }
1622
1623        ctx->cid = cid;
1624        pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1625
1626        ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1627        if (ret)
1628                goto error;
1629
1630        pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1631        ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1632        if (ret)
1633                goto error;
1634
1635        pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1636        ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1637        if (ret)
1638                goto error;
1639
1640        return 0;
1641
1642error:
1643        cnic_free_bnx2x_conn_resc(dev, l5_cid);
1644        return ret;
1645}
1646
1647static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1648                                struct regpair *ctx_addr)
1649{
1650        struct cnic_local *cp = dev->cnic_priv;
1651        struct cnic_eth_dev *ethdev = cp->ethdev;
1652        int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1653        int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1654        unsigned long align_off = 0;
1655        dma_addr_t ctx_map;
1656        void *ctx;
1657
1658        if (cp->ctx_align) {
1659                unsigned long mask = cp->ctx_align - 1;
1660
1661                if (cp->ctx_arr[blk].mapping & mask)
1662                        align_off = cp->ctx_align -
1663                                    (cp->ctx_arr[blk].mapping & mask);
1664        }
1665        ctx_map = cp->ctx_arr[blk].mapping + align_off +
1666                (off * BNX2X_CONTEXT_MEM_SIZE);
1667        ctx = cp->ctx_arr[blk].ctx + align_off +
1668              (off * BNX2X_CONTEXT_MEM_SIZE);
1669        if (init)
1670                memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1671
1672        ctx_addr->lo = ctx_map & 0xffffffff;
1673        ctx_addr->hi = (u64) ctx_map >> 32;
1674        return ctx;
1675}
1676
1677static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1678                                u32 num)
1679{
1680        struct cnic_local *cp = dev->cnic_priv;
1681        struct bnx2x *bp = netdev_priv(dev->netdev);
1682        struct iscsi_kwqe_conn_offload1 *req1 =
1683                        (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1684        struct iscsi_kwqe_conn_offload2 *req2 =
1685                        (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1686        struct iscsi_kwqe_conn_offload3 *req3;
1687        struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1688        struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1689        u32 cid = ctx->cid;
1690        u32 hw_cid = BNX2X_HW_CID(bp, cid);
1691        struct iscsi_context *ictx;
1692        struct regpair context_addr;
1693        int i, j, n = 2, n_max;
1694        u8 port = BP_PORT(bp);
1695
1696        ctx->ctx_flags = 0;
1697        if (!req2->num_additional_wqes)
1698                return -EINVAL;
1699
1700        n_max = req2->num_additional_wqes + 2;
1701
1702        ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1703        if (ictx == NULL)
1704                return -ENOMEM;
1705
1706        req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1707
1708        ictx->xstorm_ag_context.hq_prod = 1;
1709
1710        ictx->xstorm_st_context.iscsi.first_burst_length =
1711                ISCSI_DEF_FIRST_BURST_LEN;
1712        ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1713                ISCSI_DEF_MAX_RECV_SEG_LEN;
1714        ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1715                req1->sq_page_table_addr_lo;
1716        ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1717                req1->sq_page_table_addr_hi;
1718        ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1719        ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1720        ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1721                iscsi->hq_info.pgtbl_map & 0xffffffff;
1722        ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1723                (u64) iscsi->hq_info.pgtbl_map >> 32;
1724        ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1725                iscsi->hq_info.pgtbl[0];
1726        ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1727                iscsi->hq_info.pgtbl[1];
1728        ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1729                iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1730        ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1731                (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1732        ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1733                iscsi->r2tq_info.pgtbl[0];
1734        ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1735                iscsi->r2tq_info.pgtbl[1];
1736        ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1737                iscsi->task_array_info.pgtbl_map & 0xffffffff;
1738        ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1739                (u64) iscsi->task_array_info.pgtbl_map >> 32;
1740        ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1741                BNX2X_ISCSI_PBL_NOT_CACHED;
1742        ictx->xstorm_st_context.iscsi.flags.flags |=
1743                XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1744        ictx->xstorm_st_context.iscsi.flags.flags |=
1745                XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1746        ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1747                ETH_P_8021Q;
1748        if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1749            bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1750
1751                port = 0;
1752        }
1753        ictx->xstorm_st_context.common.flags =
1754                1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1755        ictx->xstorm_st_context.common.flags =
1756                port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1757
1758        ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1759        /* TSTORM requires the base address of RQ DB & not PTE */
1760        ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1761                req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1762        ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1763                req2->rq_page_table_addr_hi;
1764        ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1765        ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1766        ictx->tstorm_st_context.tcp.flags2 |=
1767                TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1768        ictx->tstorm_st_context.tcp.ooo_support_mode =
1769                TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1770
1771        ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1772
1773        ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1774                req2->rq_page_table_addr_lo;
1775        ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1776                req2->rq_page_table_addr_hi;
1777        ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1778        ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1779        ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1780                iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1781        ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1782                (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1783        ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1784                iscsi->r2tq_info.pgtbl[0];
1785        ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1786                iscsi->r2tq_info.pgtbl[1];
1787        ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1788                req1->cq_page_table_addr_lo;
1789        ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1790                req1->cq_page_table_addr_hi;
1791        ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1792        ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1793        ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1794        ictx->ustorm_st_context.task_pbe_cache_index =
1795                BNX2X_ISCSI_PBL_NOT_CACHED;
1796        ictx->ustorm_st_context.task_pdu_cache_index =
1797                BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1798
1799        for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1800                if (j == 3) {
1801                        if (n >= n_max)
1802                                break;
1803                        req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1804                        j = 0;
1805                }
1806                ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1807                ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1808                        req3->qp_first_pte[j].hi;
1809                ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1810                        req3->qp_first_pte[j].lo;
1811        }
1812
1813        ictx->ustorm_st_context.task_pbl_base.lo =
1814                iscsi->task_array_info.pgtbl_map & 0xffffffff;
1815        ictx->ustorm_st_context.task_pbl_base.hi =
1816                (u64) iscsi->task_array_info.pgtbl_map >> 32;
1817        ictx->ustorm_st_context.tce_phy_addr.lo =
1818                iscsi->task_array_info.pgtbl[0];
1819        ictx->ustorm_st_context.tce_phy_addr.hi =
1820                iscsi->task_array_info.pgtbl[1];
1821        ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1822        ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1823        ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1824        ictx->ustorm_st_context.negotiated_rx_and_flags |=
1825                ISCSI_DEF_MAX_BURST_LEN;
1826        ictx->ustorm_st_context.negotiated_rx |=
1827                ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1828                USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1829
1830        ictx->cstorm_st_context.hq_pbl_base.lo =
1831                iscsi->hq_info.pgtbl_map & 0xffffffff;
1832        ictx->cstorm_st_context.hq_pbl_base.hi =
1833                (u64) iscsi->hq_info.pgtbl_map >> 32;
1834        ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1835        ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1836        ictx->cstorm_st_context.task_pbl_base.lo =
1837                iscsi->task_array_info.pgtbl_map & 0xffffffff;
1838        ictx->cstorm_st_context.task_pbl_base.hi =
1839                (u64) iscsi->task_array_info.pgtbl_map >> 32;
1840        /* CSTORM and USTORM initialization is different, CSTORM requires
1841         * CQ DB base & not PTE addr */
1842        ictx->cstorm_st_context.cq_db_base.lo =
1843                req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1844        ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1845        ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1846        ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1847        for (i = 0; i < cp->num_cqs; i++) {
1848                ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1849                        ISCSI_INITIAL_SN;
1850                ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1851                        ISCSI_INITIAL_SN;
1852        }
1853
1854        ictx->xstorm_ag_context.cdu_reserved =
1855                CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1856                                       ISCSI_CONNECTION_TYPE);
1857        ictx->ustorm_ag_context.cdu_usage =
1858                CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1859                                       ISCSI_CONNECTION_TYPE);
1860        return 0;
1861
1862}
1863
1864static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1865                                   u32 num, int *work)
1866{
1867        struct iscsi_kwqe_conn_offload1 *req1;
1868        struct iscsi_kwqe_conn_offload2 *req2;
1869        struct cnic_local *cp = dev->cnic_priv;
1870        struct bnx2x *bp = netdev_priv(dev->netdev);
1871        struct cnic_context *ctx;
1872        struct iscsi_kcqe kcqe;
1873        struct kcqe *cqes[1];
1874        u32 l5_cid;
1875        int ret = 0;
1876
1877        if (num < 2) {
1878                *work = num;
1879                return -EINVAL;
1880        }
1881
1882        req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1883        req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1884        if ((num - 2) < req2->num_additional_wqes) {
1885                *work = num;
1886                return -EINVAL;
1887        }
1888        *work = 2 + req2->num_additional_wqes;
1889
1890        l5_cid = req1->iscsi_conn_id;
1891        if (l5_cid >= MAX_ISCSI_TBL_SZ)
1892                return -EINVAL;
1893
1894        memset(&kcqe, 0, sizeof(kcqe));
1895        kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1896        kcqe.iscsi_conn_id = l5_cid;
1897        kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1898
1899        ctx = &cp->ctx_tbl[l5_cid];
1900        if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1901                kcqe.completion_status =
1902                        ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1903                goto done;
1904        }
1905
1906        if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1907                atomic_dec(&cp->iscsi_conn);
1908                goto done;
1909        }
1910        ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1911        if (ret) {
1912                atomic_dec(&cp->iscsi_conn);
1913                ret = 0;
1914                goto done;
1915        }
1916        ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1917        if (ret < 0) {
1918                cnic_free_bnx2x_conn_resc(dev, l5_cid);
1919                atomic_dec(&cp->iscsi_conn);
1920                goto done;
1921        }
1922
1923        kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1924        kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1925
1926done:
1927        cqes[0] = (struct kcqe *) &kcqe;
1928        cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1929        return 0;
1930}
1931
1932
1933static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1934{
1935        struct cnic_local *cp = dev->cnic_priv;
1936        struct iscsi_kwqe_conn_update *req =
1937                (struct iscsi_kwqe_conn_update *) kwqe;
1938        void *data;
1939        union l5cm_specific_data l5_data;
1940        u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1941        int ret;
1942
1943        if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1944                return -EINVAL;
1945
1946        data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1947        if (!data)
1948                return -ENOMEM;
1949
1950        memcpy(data, kwqe, sizeof(struct kwqe));
1951
1952        ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1953                        req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1954        return ret;
1955}
1956
1957static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1958{
1959        struct cnic_local *cp = dev->cnic_priv;
1960        struct bnx2x *bp = netdev_priv(dev->netdev);
1961        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1962        union l5cm_specific_data l5_data;
1963        int ret;
1964        u32 hw_cid;
1965
1966        init_waitqueue_head(&ctx->waitq);
1967        ctx->wait_cond = 0;
1968        memset(&l5_data, 0, sizeof(l5_data));
1969        hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1970
1971        ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1972                                  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1973
1974        if (ret == 0) {
1975                wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1976                if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1977                        return -EBUSY;
1978        }
1979
1980        return 0;
1981}
1982
1983static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1984{
1985        struct cnic_local *cp = dev->cnic_priv;
1986        struct iscsi_kwqe_conn_destroy *req =
1987                (struct iscsi_kwqe_conn_destroy *) kwqe;
1988        u32 l5_cid = req->reserved0;
1989        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1990        int ret = 0;
1991        struct iscsi_kcqe kcqe;
1992        struct kcqe *cqes[1];
1993
1994        if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1995                goto skip_cfc_delete;
1996
1997        if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1998                unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1999
2000                if (delta > (2 * HZ))
2001                        delta = 0;
2002
2003                set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2004                queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2005                goto destroy_reply;
2006        }
2007
2008        ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2009
2010skip_cfc_delete:
2011        cnic_free_bnx2x_conn_resc(dev, l5_cid);
2012
2013        if (!ret) {
2014                atomic_dec(&cp->iscsi_conn);
2015                clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2016        }
2017
2018destroy_reply:
2019        memset(&kcqe, 0, sizeof(kcqe));
2020        kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2021        kcqe.iscsi_conn_id = l5_cid;
2022        kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2023        kcqe.iscsi_conn_context_id = req->context_id;
2024
2025        cqes[0] = (struct kcqe *) &kcqe;
2026        cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2027
2028        return 0;
2029}
2030
2031static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2032                                      struct l4_kwq_connect_req1 *kwqe1,
2033                                      struct l4_kwq_connect_req3 *kwqe3,
2034                                      struct l5cm_active_conn_buffer *conn_buf)
2035{
2036        struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2037        struct l5cm_xstorm_conn_buffer *xstorm_buf =
2038                &conn_buf->xstorm_conn_buffer;
2039        struct l5cm_tstorm_conn_buffer *tstorm_buf =
2040                &conn_buf->tstorm_conn_buffer;
2041        struct regpair context_addr;
2042        u32 cid = BNX2X_SW_CID(kwqe1->cid);
2043        struct in6_addr src_ip, dst_ip;
2044        int i;
2045        u32 *addrp;
2046
2047        addrp = (u32 *) &conn_addr->local_ip_addr;
2048        for (i = 0; i < 4; i++, addrp++)
2049                src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2050
2051        addrp = (u32 *) &conn_addr->remote_ip_addr;
2052        for (i = 0; i < 4; i++, addrp++)
2053                dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2054
2055        cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2056
2057        xstorm_buf->context_addr.hi = context_addr.hi;
2058        xstorm_buf->context_addr.lo = context_addr.lo;
2059        xstorm_buf->mss = 0xffff;
2060        xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2061        if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2062                xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2063        xstorm_buf->pseudo_header_checksum =
2064                swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2065
2066        if (kwqe3->ka_timeout) {
2067                tstorm_buf->ka_enable = 1;
2068                tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2069                tstorm_buf->ka_interval = kwqe3->ka_interval;
2070                tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2071        }
2072        tstorm_buf->max_rt_time = 0xffffffff;
2073}
2074
2075static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2076{
2077        struct bnx2x *bp = netdev_priv(dev->netdev);
2078        u32 pfid = bp->pfid;
2079        u8 *mac = dev->mac_addr;
2080
2081        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2082                 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2083        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2084                 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2085        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2086                 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2087        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2088                 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2089        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2090                 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2091        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2092                 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2093
2094        CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2095                 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2096        CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2097                 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2098                 mac[4]);
2099        CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2100                 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2101        CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2102                 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2103                 mac[2]);
2104        CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2105                 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2106        CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2107                 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2108                 mac[0]);
2109}
2110
2111static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2112                              u32 num, int *work)
2113{
2114        struct cnic_local *cp = dev->cnic_priv;
2115        struct bnx2x *bp = netdev_priv(dev->netdev);
2116        struct l4_kwq_connect_req1 *kwqe1 =
2117                (struct l4_kwq_connect_req1 *) wqes[0];
2118        struct l4_kwq_connect_req3 *kwqe3;
2119        struct l5cm_active_conn_buffer *conn_buf;
2120        struct l5cm_conn_addr_params *conn_addr;
2121        union l5cm_specific_data l5_data;
2122        u32 l5_cid = kwqe1->pg_cid;
2123        struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2124        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2125        int ret;
2126
2127        if (num < 2) {
2128                *work = num;
2129                return -EINVAL;
2130        }
2131
2132        if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2133                *work = 3;
2134        else
2135                *work = 2;
2136
2137        if (num < *work) {
2138                *work = num;
2139                return -EINVAL;
2140        }
2141
2142        if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2143                netdev_err(dev->netdev, "conn_buf size too big\n");
2144                return -ENOMEM;
2145        }
2146        conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2147        if (!conn_buf)
2148                return -ENOMEM;
2149
2150        memset(conn_buf, 0, sizeof(*conn_buf));
2151
2152        conn_addr = &conn_buf->conn_addr_buf;
2153        conn_addr->remote_addr_0 = csk->ha[0];
2154        conn_addr->remote_addr_1 = csk->ha[1];
2155        conn_addr->remote_addr_2 = csk->ha[2];
2156        conn_addr->remote_addr_3 = csk->ha[3];
2157        conn_addr->remote_addr_4 = csk->ha[4];
2158        conn_addr->remote_addr_5 = csk->ha[5];
2159
2160        if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2161                struct l4_kwq_connect_req2 *kwqe2 =
2162                        (struct l4_kwq_connect_req2 *) wqes[1];
2163
2164                conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2165                conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2166                conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2167
2168                conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2169                conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2170                conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2171                conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2172        }
2173        kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2174
2175        conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2176        conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2177        conn_addr->local_tcp_port = kwqe1->src_port;
2178        conn_addr->remote_tcp_port = kwqe1->dst_port;
2179
2180        conn_addr->pmtu = kwqe3->pmtu;
2181        cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2182
2183        CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2184                  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2185
2186        ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2187                        kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2188        if (!ret)
2189                set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2190
2191        return ret;
2192}
2193
2194static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2195{
2196        struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2197        union l5cm_specific_data l5_data;
2198        int ret;
2199
2200        memset(&l5_data, 0, sizeof(l5_data));
2201        ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2202                        req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2203        return ret;
2204}
2205
2206static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2207{
2208        struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2209        union l5cm_specific_data l5_data;
2210        int ret;
2211
2212        memset(&l5_data, 0, sizeof(l5_data));
2213        ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2214                        req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2215        return ret;
2216}
2217static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2218{
2219        struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2220        struct l4_kcq kcqe;
2221        struct kcqe *cqes[1];
2222
2223        memset(&kcqe, 0, sizeof(kcqe));
2224        kcqe.pg_host_opaque = req->host_opaque;
2225        kcqe.pg_cid = req->host_opaque;
2226        kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2227        cqes[0] = (struct kcqe *) &kcqe;
2228        cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2229        return 0;
2230}
2231
2232static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2233{
2234        struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2235        struct l4_kcq kcqe;
2236        struct kcqe *cqes[1];
2237
2238        memset(&kcqe, 0, sizeof(kcqe));
2239        kcqe.pg_host_opaque = req->pg_host_opaque;
2240        kcqe.pg_cid = req->pg_cid;
2241        kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2242        cqes[0] = (struct kcqe *) &kcqe;
2243        cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2244        return 0;
2245}
2246
2247static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2248{
2249        struct fcoe_kwqe_stat *req;
2250        struct fcoe_stat_ramrod_params *fcoe_stat;
2251        union l5cm_specific_data l5_data;
2252        struct cnic_local *cp = dev->cnic_priv;
2253        struct bnx2x *bp = netdev_priv(dev->netdev);
2254        int ret;
2255        u32 cid;
2256
2257        req = (struct fcoe_kwqe_stat *) kwqe;
2258        cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2259
2260        fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2261        if (!fcoe_stat)
2262                return -ENOMEM;
2263
2264        memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2265        memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2266
2267        ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2268                                  FCOE_CONNECTION_TYPE, &l5_data);
2269        return ret;
2270}
2271
2272static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2273                                 u32 num, int *work)
2274{
2275        int ret;
2276        struct cnic_local *cp = dev->cnic_priv;
2277        struct bnx2x *bp = netdev_priv(dev->netdev);
2278        u32 cid;
2279        struct fcoe_init_ramrod_params *fcoe_init;
2280        struct fcoe_kwqe_init1 *req1;
2281        struct fcoe_kwqe_init2 *req2;
2282        struct fcoe_kwqe_init3 *req3;
2283        union l5cm_specific_data l5_data;
2284
2285        if (num < 3) {
2286                *work = num;
2287                return -EINVAL;
2288        }
2289        req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2290        req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2291        req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2292        if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2293                *work = 1;
2294                return -EINVAL;
2295        }
2296        if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2297                *work = 2;
2298                return -EINVAL;
2299        }
2300
2301        if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2302                netdev_err(dev->netdev, "fcoe_init size too big\n");
2303                return -ENOMEM;
2304        }
2305        fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2306        if (!fcoe_init)
2307                return -ENOMEM;
2308
2309        memset(fcoe_init, 0, sizeof(*fcoe_init));
2310        memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2311        memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2312        memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2313        fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2314        fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2315        fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2316
2317        fcoe_init->sb_num = cp->status_blk_num;
2318        fcoe_init->eq_prod = MAX_KCQ_IDX;
2319        fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2320        cp->kcq2.sw_prod_idx = 0;
2321
2322        cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2323        ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2324                                  FCOE_CONNECTION_TYPE, &l5_data);
2325        *work = 3;
2326        return ret;
2327}
2328
2329static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2330                                 u32 num, int *work)
2331{
2332        int ret = 0;
2333        u32 cid = -1, l5_cid;
2334        struct cnic_local *cp = dev->cnic_priv;
2335        struct bnx2x *bp = netdev_priv(dev->netdev);
2336        struct fcoe_kwqe_conn_offload1 *req1;
2337        struct fcoe_kwqe_conn_offload2 *req2;
2338        struct fcoe_kwqe_conn_offload3 *req3;
2339        struct fcoe_kwqe_conn_offload4 *req4;
2340        struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2341        struct cnic_context *ctx;
2342        struct fcoe_context *fctx;
2343        struct regpair ctx_addr;
2344        union l5cm_specific_data l5_data;
2345        struct fcoe_kcqe kcqe;
2346        struct kcqe *cqes[1];
2347
2348        if (num < 4) {
2349                *work = num;
2350                return -EINVAL;
2351        }
2352        req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2353        req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2354        req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2355        req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2356
2357        *work = 4;
2358
2359        l5_cid = req1->fcoe_conn_id;
2360        if (l5_cid >= dev->max_fcoe_conn)
2361                goto err_reply;
2362
2363        l5_cid += BNX2X_FCOE_L5_CID_BASE;
2364
2365        ctx = &cp->ctx_tbl[l5_cid];
2366        if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2367                goto err_reply;
2368
2369        ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2370        if (ret) {
2371                ret = 0;
2372                goto err_reply;
2373        }
2374        cid = ctx->cid;
2375
2376        fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2377        if (fctx) {
2378                u32 hw_cid = BNX2X_HW_CID(bp, cid);
2379                u32 val;
2380
2381                val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2382                                             FCOE_CONNECTION_TYPE);
2383                fctx->xstorm_ag_context.cdu_reserved = val;
2384                val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2385                                             FCOE_CONNECTION_TYPE);
2386                fctx->ustorm_ag_context.cdu_usage = val;
2387        }
2388        if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2389                netdev_err(dev->netdev, "fcoe_offload size too big\n");
2390                goto err_reply;
2391        }
2392        fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2393        if (!fcoe_offload)
2394                goto err_reply;
2395
2396        memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2397        memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2398        memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2399        memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2400        memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2401
2402        cid = BNX2X_HW_CID(bp, cid);
2403        ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2404                                  FCOE_CONNECTION_TYPE, &l5_data);
2405        if (!ret)
2406                set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2407
2408        return ret;
2409
2410err_reply:
2411        if (cid != -1)
2412                cnic_free_bnx2x_conn_resc(dev, l5_cid);
2413
2414        memset(&kcqe, 0, sizeof(kcqe));
2415        kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2416        kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2417        kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2418
2419        cqes[0] = (struct kcqe *) &kcqe;
2420        cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2421        return ret;
2422}
2423
2424static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2425{
2426        struct fcoe_kwqe_conn_enable_disable *req;
2427        struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2428        union l5cm_specific_data l5_data;
2429        int ret;
2430        u32 cid, l5_cid;
2431        struct cnic_local *cp = dev->cnic_priv;
2432
2433        req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2434        cid = req->context_id;
2435        l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2436
2437        if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2438                netdev_err(dev->netdev, "fcoe_enable size too big\n");
2439                return -ENOMEM;
2440        }
2441        fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2442        if (!fcoe_enable)
2443                return -ENOMEM;
2444
2445        memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2446        memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2447        ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2448                                  FCOE_CONNECTION_TYPE, &l5_data);
2449        return ret;
2450}
2451
2452static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2453{
2454        struct fcoe_kwqe_conn_enable_disable *req;
2455        struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2456        union l5cm_specific_data l5_data;
2457        int ret;
2458        u32 cid, l5_cid;
2459        struct cnic_local *cp = dev->cnic_priv;
2460
2461        req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2462        cid = req->context_id;
2463        l5_cid = req->conn_id;
2464        if (l5_cid >= dev->max_fcoe_conn)
2465                return -EINVAL;
2466
2467        l5_cid += BNX2X_FCOE_L5_CID_BASE;
2468
2469        if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2470                netdev_err(dev->netdev, "fcoe_disable size too big\n");
2471                return -ENOMEM;
2472        }
2473        fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2474        if (!fcoe_disable)
2475                return -ENOMEM;
2476
2477        memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2478        memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2479        ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2480                                  FCOE_CONNECTION_TYPE, &l5_data);
2481        return ret;
2482}
2483
2484static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2485{
2486        struct fcoe_kwqe_conn_destroy *req;
2487        union l5cm_specific_data l5_data;
2488        int ret;
2489        u32 cid, l5_cid;
2490        struct cnic_local *cp = dev->cnic_priv;
2491        struct cnic_context *ctx;
2492        struct fcoe_kcqe kcqe;
2493        struct kcqe *cqes[1];
2494
2495        req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2496        cid = req->context_id;
2497        l5_cid = req->conn_id;
2498        if (l5_cid >= dev->max_fcoe_conn)
2499                return -EINVAL;
2500
2501        l5_cid += BNX2X_FCOE_L5_CID_BASE;
2502
2503        ctx = &cp->ctx_tbl[l5_cid];
2504
2505        init_waitqueue_head(&ctx->waitq);
2506        ctx->wait_cond = 0;
2507
2508        memset(&kcqe, 0, sizeof(kcqe));
2509        kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2510        memset(&l5_data, 0, sizeof(l5_data));
2511        ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2512                                  FCOE_CONNECTION_TYPE, &l5_data);
2513        if (ret == 0) {
2514                wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2515                if (ctx->wait_cond)
2516                        kcqe.completion_status = 0;
2517        }
2518
2519        set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2520        queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2521
2522        kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2523        kcqe.fcoe_conn_id = req->conn_id;
2524        kcqe.fcoe_conn_context_id = cid;
2525
2526        cqes[0] = (struct kcqe *) &kcqe;
2527        cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2528        return ret;
2529}
2530
2531static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2532{
2533        struct cnic_local *cp = dev->cnic_priv;
2534        u32 i;
2535
2536        for (i = start_cid; i < cp->max_cid_space; i++) {
2537                struct cnic_context *ctx = &cp->ctx_tbl[i];
2538                int j;
2539
2540                while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2541                        msleep(10);
2542
2543                for (j = 0; j < 5; j++) {
2544                        if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2545                                break;
2546                        msleep(20);
2547                }
2548
2549                if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2550                        netdev_warn(dev->netdev, "CID %x not deleted\n",
2551                                   ctx->cid);
2552        }
2553}
2554
2555static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2556{
2557        struct fcoe_kwqe_destroy *req;
2558        union l5cm_specific_data l5_data;
2559        struct cnic_local *cp = dev->cnic_priv;
2560        struct bnx2x *bp = netdev_priv(dev->netdev);
2561        int ret;
2562        u32 cid;
2563
2564        cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2565
2566        req = (struct fcoe_kwqe_destroy *) kwqe;
2567        cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2568
2569        memset(&l5_data, 0, sizeof(l5_data));
2570        ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2571                                  FCOE_CONNECTION_TYPE, &l5_data);
2572        return ret;
2573}
2574
2575static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2576{
2577        struct cnic_local *cp = dev->cnic_priv;
2578        struct kcqe kcqe;
2579        struct kcqe *cqes[1];
2580        u32 cid;
2581        u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2582        u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2583        u32 kcqe_op;
2584        int ulp_type;
2585
2586        cid = kwqe->kwqe_info0;
2587        memset(&kcqe, 0, sizeof(kcqe));
2588
2589        if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2590                u32 l5_cid = 0;
2591
2592                ulp_type = CNIC_ULP_FCOE;
2593                if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2594                        struct fcoe_kwqe_conn_enable_disable *req;
2595
2596                        req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2597                        kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2598                        cid = req->context_id;
2599                        l5_cid = req->conn_id;
2600                } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2601                        kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2602                } else {
2603                        return;
2604                }
2605                kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2606                kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2607                kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2608                kcqe.kcqe_info2 = cid;
2609                kcqe.kcqe_info0 = l5_cid;
2610
2611        } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2612                ulp_type = CNIC_ULP_ISCSI;
2613                if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2614                        cid = kwqe->kwqe_info1;
2615
2616                kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2617                kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2618                kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2619                kcqe.kcqe_info2 = cid;
2620                cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2621
2622        } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2623                struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2624
2625                ulp_type = CNIC_ULP_L4;
2626                if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2627                        kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2628                else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2629                        kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2630                else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2631                        kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2632                else
2633                        return;
2634
2635                kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2636                                    KCQE_FLAGS_LAYER_MASK_L4;
2637                l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2638                l4kcqe->cid = cid;
2639                cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2640        } else {
2641                return;
2642        }
2643
2644        cqes[0] = &kcqe;
2645        cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2646}
2647
2648static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2649                                         struct kwqe *wqes[], u32 num_wqes)
2650{
2651        int i, work, ret;
2652        u32 opcode;
2653        struct kwqe *kwqe;
2654
2655        if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2656                return -EAGAIN;         /* bnx2 is down */
2657
2658        for (i = 0; i < num_wqes; ) {
2659                kwqe = wqes[i];
2660                opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2661                work = 1;
2662
2663                switch (opcode) {
2664                case ISCSI_KWQE_OPCODE_INIT1:
2665                        ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2666                        break;
2667                case ISCSI_KWQE_OPCODE_INIT2:
2668                        ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2669                        break;
2670                case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2671                        ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2672                                                     num_wqes - i, &work);
2673                        break;
2674                case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2675                        ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2676                        break;
2677                case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2678                        ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2679                        break;
2680                case L4_KWQE_OPCODE_VALUE_CONNECT1:
2681                        ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2682                                                 &work);
2683                        break;
2684                case L4_KWQE_OPCODE_VALUE_CLOSE:
2685                        ret = cnic_bnx2x_close(dev, kwqe);
2686                        break;
2687                case L4_KWQE_OPCODE_VALUE_RESET:
2688                        ret = cnic_bnx2x_reset(dev, kwqe);
2689                        break;
2690                case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2691                        ret = cnic_bnx2x_offload_pg(dev, kwqe);
2692                        break;
2693                case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2694                        ret = cnic_bnx2x_update_pg(dev, kwqe);
2695                        break;
2696                case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2697                        ret = 0;
2698                        break;
2699                default:
2700                        ret = 0;
2701                        netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2702                                   opcode);
2703                        break;
2704                }
2705                if (ret < 0) {
2706                        netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2707                                   opcode);
2708
2709                        /* Possibly bnx2x parity error, send completion
2710                         * to ulp drivers with error code to speed up
2711                         * cleanup and reset recovery.
2712                         */
2713                        if (ret == -EIO || ret == -EAGAIN)
2714                                cnic_bnx2x_kwqe_err(dev, kwqe);
2715                }
2716                i += work;
2717        }
2718        return 0;
2719}
2720
2721static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2722                                        struct kwqe *wqes[], u32 num_wqes)
2723{
2724        struct bnx2x *bp = netdev_priv(dev->netdev);
2725        int i, work, ret;
2726        u32 opcode;
2727        struct kwqe *kwqe;
2728
2729        if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2730                return -EAGAIN;         /* bnx2 is down */
2731
2732        if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2733                return -EINVAL;
2734
2735        for (i = 0; i < num_wqes; ) {
2736                kwqe = wqes[i];
2737                opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2738                work = 1;
2739
2740                switch (opcode) {
2741                case FCOE_KWQE_OPCODE_INIT1:
2742                        ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2743                                                    num_wqes - i, &work);
2744                        break;
2745                case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2746                        ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2747                                                    num_wqes - i, &work);
2748                        break;
2749                case FCOE_KWQE_OPCODE_ENABLE_CONN:
2750                        ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2751                        break;
2752                case FCOE_KWQE_OPCODE_DISABLE_CONN:
2753                        ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2754                        break;
2755                case FCOE_KWQE_OPCODE_DESTROY_CONN:
2756                        ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2757                        break;
2758                case FCOE_KWQE_OPCODE_DESTROY:
2759                        ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2760                        break;
2761                case FCOE_KWQE_OPCODE_STAT:
2762                        ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2763                        break;
2764                default:
2765                        ret = 0;
2766                        netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2767                                   opcode);
2768                        break;
2769                }
2770                if (ret < 0) {
2771                        netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2772                                   opcode);
2773
2774                        /* Possibly bnx2x parity error, send completion
2775                         * to ulp drivers with error code to speed up
2776                         * cleanup and reset recovery.
2777                         */
2778                        if (ret == -EIO || ret == -EAGAIN)
2779                                cnic_bnx2x_kwqe_err(dev, kwqe);
2780                }
2781                i += work;
2782        }
2783        return 0;
2784}
2785
2786static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2787                                   u32 num_wqes)
2788{
2789        int ret = -EINVAL;
2790        u32 layer_code;
2791
2792        if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2793                return -EAGAIN;         /* bnx2x is down */
2794
2795        if (!num_wqes)
2796                return 0;
2797
2798        layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2799        switch (layer_code) {
2800        case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2801        case KWQE_FLAGS_LAYER_MASK_L4:
2802        case KWQE_FLAGS_LAYER_MASK_L2:
2803                ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2804                break;
2805
2806        case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2807                ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2808                break;
2809        }
2810        return ret;
2811}
2812
2813static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2814{
2815        if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2816                return KCQE_FLAGS_LAYER_MASK_L4;
2817
2818        return opflag & KCQE_FLAGS_LAYER_MASK;
2819}
2820
2821static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2822{
2823        struct cnic_local *cp = dev->cnic_priv;
2824        int i, j, comp = 0;
2825
2826        i = 0;
2827        j = 1;
2828        while (num_cqes) {
2829                struct cnic_ulp_ops *ulp_ops;
2830                int ulp_type;
2831                u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2832                u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2833
2834                if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2835                        comp++;
2836
2837                while (j < num_cqes) {
2838                        u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2839
2840                        if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2841                                break;
2842
2843                        if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2844                                comp++;
2845                        j++;
2846                }
2847
2848                if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2849                        ulp_type = CNIC_ULP_RDMA;
2850                else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2851                        ulp_type = CNIC_ULP_ISCSI;
2852                else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2853                        ulp_type = CNIC_ULP_FCOE;
2854                else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2855                        ulp_type = CNIC_ULP_L4;
2856                else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2857                        goto end;
2858                else {
2859                        netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2860                                   kcqe_op_flag);
2861                        goto end;
2862                }
2863
2864                rcu_read_lock();
2865                ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2866                if (likely(ulp_ops)) {
2867                        ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2868                                                  cp->completed_kcq + i, j);
2869                }
2870                rcu_read_unlock();
2871end:
2872                num_cqes -= j;
2873                i += j;
2874                j = 1;
2875        }
2876        if (unlikely(comp))
2877                cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2878}
2879
2880static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2881{
2882        struct cnic_local *cp = dev->cnic_priv;
2883        u16 i, ri, hw_prod, last;
2884        struct kcqe *kcqe;
2885        int kcqe_cnt = 0, last_cnt = 0;
2886
2887        i = ri = last = info->sw_prod_idx;
2888        ri &= MAX_KCQ_IDX;
2889        hw_prod = *info->hw_prod_idx_ptr;
2890        hw_prod = info->hw_idx(hw_prod);
2891
2892        while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2893                kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2894                cp->completed_kcq[kcqe_cnt++] = kcqe;
2895                i = info->next_idx(i);
2896                ri = i & MAX_KCQ_IDX;
2897                if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2898                        last_cnt = kcqe_cnt;
2899                        last = i;
2900                }
2901        }
2902
2903        info->sw_prod_idx = last;
2904        return last_cnt;
2905}
2906
2907static int cnic_l2_completion(struct cnic_local *cp)
2908{
2909        u16 hw_cons, sw_cons;
2910        struct cnic_uio_dev *udev = cp->udev;
2911        union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2912                                        (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2913        u32 cmd;
2914        int comp = 0;
2915
2916        if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2917                return 0;
2918
2919        hw_cons = *cp->rx_cons_ptr;
2920        if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2921                hw_cons++;
2922
2923        sw_cons = cp->rx_cons;
2924        while (sw_cons != hw_cons) {
2925                u8 cqe_fp_flags;
2926
2927                cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2928                cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2929                if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2930                        cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2931                        cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2932                        if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2933                            cmd == RAMROD_CMD_ID_ETH_HALT)
2934                                comp++;
2935                }
2936                sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2937        }
2938        return comp;
2939}
2940
2941static void cnic_chk_pkt_rings(struct cnic_local *cp)
2942{
2943        u16 rx_cons, tx_cons;
2944        int comp = 0;
2945
2946        if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2947                return;
2948
2949        rx_cons = *cp->rx_cons_ptr;
2950        tx_cons = *cp->tx_cons_ptr;
2951        if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2952                if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2953                        comp = cnic_l2_completion(cp);
2954
2955                cp->tx_cons = tx_cons;
2956                cp->rx_cons = rx_cons;
2957
2958                if (cp->udev)
2959                        uio_event_notify(&cp->udev->cnic_uinfo);
2960        }
2961        if (comp)
2962                clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2963}
2964
2965static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2966{
2967        struct cnic_local *cp = dev->cnic_priv;
2968        u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2969        int kcqe_cnt;
2970
2971        /* status block index must be read before reading other fields */
2972        rmb();
2973        cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2974
2975        while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2976
2977                service_kcqes(dev, kcqe_cnt);
2978
2979                /* Tell compiler that status_blk fields can change. */
2980                barrier();
2981                status_idx = (u16) *cp->kcq1.status_idx_ptr;
2982                /* status block index must be read first */
2983                rmb();
2984                cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2985        }
2986
2987        CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2988
2989        cnic_chk_pkt_rings(cp);
2990
2991        return status_idx;
2992}
2993
2994static int cnic_service_bnx2(void *data, void *status_blk)
2995{
2996        struct cnic_dev *dev = data;
2997
2998        if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2999                struct status_block *sblk = status_blk;
3000
3001                return sblk->status_idx;
3002        }
3003
3004        return cnic_service_bnx2_queues(dev);
3005}
3006
3007static void cnic_service_bnx2_msix(unsigned long data)
3008{
3009        struct cnic_dev *dev = (struct cnic_dev *) data;
3010        struct cnic_local *cp = dev->cnic_priv;
3011
3012        cp->last_status_idx = cnic_service_bnx2_queues(dev);
3013
3014        CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3015                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3016}
3017
3018static void cnic_doirq(struct cnic_dev *dev)
3019{
3020        struct cnic_local *cp = dev->cnic_priv;
3021
3022        if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3023                u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3024
3025                prefetch(cp->status_blk.gen);
3026                prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3027
3028                tasklet_schedule(&cp->cnic_irq_task);
3029        }
3030}
3031
3032static irqreturn_t cnic_irq(int irq, void *dev_instance)
3033{
3034        struct cnic_dev *dev = dev_instance;
3035        struct cnic_local *cp = dev->cnic_priv;
3036
3037        if (cp->ack_int)
3038                cp->ack_int(dev);
3039
3040        cnic_doirq(dev);
3041
3042        return IRQ_HANDLED;
3043}
3044
3045static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3046                                      u16 index, u8 op, u8 update)
3047{
3048        struct bnx2x *bp = netdev_priv(dev->netdev);
3049        u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3050                       COMMAND_REG_INT_ACK);
3051        struct igu_ack_register igu_ack;
3052
3053        igu_ack.status_block_index = index;
3054        igu_ack.sb_id_and_flags =
3055                        ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3056                         (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3057                         (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3058                         (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3059
3060        CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3061}
3062
3063static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3064                            u16 index, u8 op, u8 update)
3065{
3066        struct igu_regular cmd_data;
3067        u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3068
3069        cmd_data.sb_id_and_flags =
3070                (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3071                (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3072                (update << IGU_REGULAR_BUPDATE_SHIFT) |
3073                (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3074
3075
3076        CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3077}
3078
3079static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3080{
3081        struct cnic_local *cp = dev->cnic_priv;
3082
3083        cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3084                           IGU_INT_DISABLE, 0);
3085}
3086
3087static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3088{
3089        struct cnic_local *cp = dev->cnic_priv;
3090
3091        cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3092                        IGU_INT_DISABLE, 0);
3093}
3094
3095static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3096{
3097        struct cnic_local *cp = dev->cnic_priv;
3098
3099        cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3100                           IGU_INT_ENABLE, 1);
3101}
3102
3103static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3104{
3105        struct cnic_local *cp = dev->cnic_priv;
3106
3107        cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3108                        IGU_INT_ENABLE, 1);
3109}
3110
3111static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3112{
3113        u32 last_status = *info->status_idx_ptr;
3114        int kcqe_cnt;
3115
3116        /* status block index must be read before reading the KCQ */
3117        rmb();
3118        while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3119
3120                service_kcqes(dev, kcqe_cnt);
3121
3122                /* Tell compiler that sblk fields can change. */
3123                barrier();
3124
3125                last_status = *info->status_idx_ptr;
3126                /* status block index must be read before reading the KCQ */
3127                rmb();
3128        }
3129        return last_status;
3130}
3131
3132static void cnic_service_bnx2x_bh(unsigned long data)
3133{
3134        struct cnic_dev *dev = (struct cnic_dev *) data;
3135        struct cnic_local *cp = dev->cnic_priv;
3136        struct bnx2x *bp = netdev_priv(dev->netdev);
3137        u32 status_idx, new_status_idx;
3138
3139        if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3140                return;
3141
3142        while (1) {
3143                status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3144
3145                CNIC_WR16(dev, cp->kcq1.io_addr,
3146                          cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3147
3148                if (!CNIC_SUPPORTS_FCOE(bp)) {
3149                        cp->arm_int(dev, status_idx);
3150                        break;
3151                }
3152
3153                new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3154
3155                if (new_status_idx != status_idx)
3156                        continue;
3157
3158                CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3159                          MAX_KCQ_IDX);
3160
3161                cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3162                                status_idx, IGU_INT_ENABLE, 1);
3163
3164                break;
3165        }
3166}
3167
3168static int cnic_service_bnx2x(void *data, void *status_blk)
3169{
3170        struct cnic_dev *dev = data;
3171        struct cnic_local *cp = dev->cnic_priv;
3172
3173        if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3174                cnic_doirq(dev);
3175
3176        cnic_chk_pkt_rings(cp);
3177
3178        return 0;
3179}
3180
3181static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3182{
3183        struct cnic_ulp_ops *ulp_ops;
3184
3185        if (if_type == CNIC_ULP_ISCSI)
3186                cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3187
3188        mutex_lock(&cnic_lock);
3189        ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3190                                            lockdep_is_held(&cnic_lock));
3191        if (!ulp_ops) {
3192                mutex_unlock(&cnic_lock);
3193                return;
3194        }
3195        set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3196        mutex_unlock(&cnic_lock);
3197
3198        if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3199                ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3200
3201        clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3202}
3203
3204static void cnic_ulp_stop(struct cnic_dev *dev)
3205{
3206        struct cnic_local *cp = dev->cnic_priv;
3207        int if_type;
3208
3209        for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3210                cnic_ulp_stop_one(cp, if_type);
3211}
3212
3213static void cnic_ulp_start(struct cnic_dev *dev)
3214{
3215        struct cnic_local *cp = dev->cnic_priv;
3216        int if_type;
3217
3218        for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3219                struct cnic_ulp_ops *ulp_ops;
3220
3221                mutex_lock(&cnic_lock);
3222                ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3223                                                    lockdep_is_held(&cnic_lock));
3224                if (!ulp_ops || !ulp_ops->cnic_start) {
3225                        mutex_unlock(&cnic_lock);
3226                        continue;
3227                }
3228                set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3229                mutex_unlock(&cnic_lock);
3230
3231                if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3232                        ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3233
3234                clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3235        }
3236}
3237
3238static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3239{
3240        struct cnic_local *cp = dev->cnic_priv;
3241        struct cnic_ulp_ops *ulp_ops;
3242        int rc;
3243
3244        mutex_lock(&cnic_lock);
3245        ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3246                                            lockdep_is_held(&cnic_lock));
3247        if (ulp_ops && ulp_ops->cnic_get_stats)
3248                rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3249        else
3250                rc = -ENODEV;
3251        mutex_unlock(&cnic_lock);
3252        return rc;
3253}
3254
3255static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3256{
3257        struct cnic_dev *dev = data;
3258        int ulp_type = CNIC_ULP_ISCSI;
3259
3260        switch (info->cmd) {
3261        case CNIC_CTL_STOP_CMD:
3262                cnic_hold(dev);
3263
3264                cnic_ulp_stop(dev);
3265                cnic_stop_hw(dev);
3266
3267                cnic_put(dev);
3268                break;
3269        case CNIC_CTL_START_CMD:
3270                cnic_hold(dev);
3271
3272                if (!cnic_start_hw(dev))
3273                        cnic_ulp_start(dev);
3274
3275                cnic_put(dev);
3276                break;
3277        case CNIC_CTL_STOP_ISCSI_CMD: {
3278                struct cnic_local *cp = dev->cnic_priv;
3279                set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3280                queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3281                break;
3282        }
3283        case CNIC_CTL_COMPLETION_CMD: {
3284                struct cnic_ctl_completion *comp = &info->data.comp;
3285                u32 cid = BNX2X_SW_CID(comp->cid);
3286                u32 l5_cid;
3287                struct cnic_local *cp = dev->cnic_priv;
3288
3289                if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3290                        break;
3291
3292                if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3293                        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3294
3295                        if (unlikely(comp->error)) {
3296                                set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3297                                netdev_err(dev->netdev,
3298                                           "CID %x CFC delete comp error %x\n",
3299                                           cid, comp->error);
3300                        }
3301
3302                        ctx->wait_cond = 1;
3303                        wake_up(&ctx->waitq);
3304                }
3305                break;
3306        }
3307        case CNIC_CTL_FCOE_STATS_GET_CMD:
3308                ulp_type = CNIC_ULP_FCOE;
3309                /* fall through */
3310        case CNIC_CTL_ISCSI_STATS_GET_CMD:
3311                cnic_hold(dev);
3312                cnic_copy_ulp_stats(dev, ulp_type);
3313                cnic_put(dev);
3314                break;
3315
3316        default:
3317                return -EINVAL;
3318        }
3319        return 0;
3320}
3321
3322static void cnic_ulp_init(struct cnic_dev *dev)
3323{
3324        int i;
3325        struct cnic_local *cp = dev->cnic_priv;
3326
3327        for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3328                struct cnic_ulp_ops *ulp_ops;
3329
3330                mutex_lock(&cnic_lock);
3331                ulp_ops = cnic_ulp_tbl_prot(i);
3332                if (!ulp_ops || !ulp_ops->cnic_init) {
3333                        mutex_unlock(&cnic_lock);
3334                        continue;
3335                }
3336                ulp_get(ulp_ops);
3337                mutex_unlock(&cnic_lock);
3338
3339                if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3340                        ulp_ops->cnic_init(dev);
3341
3342                ulp_put(ulp_ops);
3343        }
3344}
3345
3346static void cnic_ulp_exit(struct cnic_dev *dev)
3347{
3348        int i;
3349        struct cnic_local *cp = dev->cnic_priv;
3350
3351        for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3352                struct cnic_ulp_ops *ulp_ops;
3353
3354                mutex_lock(&cnic_lock);
3355                ulp_ops = cnic_ulp_tbl_prot(i);
3356                if (!ulp_ops || !ulp_ops->cnic_exit) {
3357                        mutex_unlock(&cnic_lock);
3358                        continue;
3359                }
3360                ulp_get(ulp_ops);
3361                mutex_unlock(&cnic_lock);
3362
3363                if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3364                        ulp_ops->cnic_exit(dev);
3365
3366                ulp_put(ulp_ops);
3367        }
3368}
3369
3370static int cnic_cm_offload_pg(struct cnic_sock *csk)
3371{
3372        struct cnic_dev *dev = csk->dev;
3373        struct l4_kwq_offload_pg *l4kwqe;
3374        struct kwqe *wqes[1];
3375
3376        l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3377        memset(l4kwqe, 0, sizeof(*l4kwqe));
3378        wqes[0] = (struct kwqe *) l4kwqe;
3379
3380        l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3381        l4kwqe->flags =
3382                L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3383        l4kwqe->l2hdr_nbytes = ETH_HLEN;
3384
3385        l4kwqe->da0 = csk->ha[0];
3386        l4kwqe->da1 = csk->ha[1];
3387        l4kwqe->da2 = csk->ha[2];
3388        l4kwqe->da3 = csk->ha[3];
3389        l4kwqe->da4 = csk->ha[4];
3390        l4kwqe->da5 = csk->ha[5];
3391
3392        l4kwqe->sa0 = dev->mac_addr[0];
3393        l4kwqe->sa1 = dev->mac_addr[1];
3394        l4kwqe->sa2 = dev->mac_addr[2];
3395        l4kwqe->sa3 = dev->mac_addr[3];
3396        l4kwqe->sa4 = dev->mac_addr[4];
3397        l4kwqe->sa5 = dev->mac_addr[5];
3398
3399        l4kwqe->etype = ETH_P_IP;
3400        l4kwqe->ipid_start = DEF_IPID_START;
3401        l4kwqe->host_opaque = csk->l5_cid;
3402
3403        if (csk->vlan_id) {
3404                l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3405                l4kwqe->vlan_tag = csk->vlan_id;
3406                l4kwqe->l2hdr_nbytes += 4;
3407        }
3408
3409        return dev->submit_kwqes(dev, wqes, 1);
3410}
3411
3412static int cnic_cm_update_pg(struct cnic_sock *csk)
3413{
3414        struct cnic_dev *dev = csk->dev;
3415        struct l4_kwq_update_pg *l4kwqe;
3416        struct kwqe *wqes[1];
3417
3418        l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3419        memset(l4kwqe, 0, sizeof(*l4kwqe));
3420        wqes[0] = (struct kwqe *) l4kwqe;
3421
3422        l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3423        l4kwqe->flags =
3424                L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3425        l4kwqe->pg_cid = csk->pg_cid;
3426
3427        l4kwqe->da0 = csk->ha[0];
3428        l4kwqe->da1 = csk->ha[1];
3429        l4kwqe->da2 = csk->ha[2];
3430        l4kwqe->da3 = csk->ha[3];
3431        l4kwqe->da4 = csk->ha[4];
3432        l4kwqe->da5 = csk->ha[5];
3433
3434        l4kwqe->pg_host_opaque = csk->l5_cid;
3435        l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3436
3437        return dev->submit_kwqes(dev, wqes, 1);
3438}
3439
3440static int cnic_cm_upload_pg(struct cnic_sock *csk)
3441{
3442        struct cnic_dev *dev = csk->dev;
3443        struct l4_kwq_upload *l4kwqe;
3444        struct kwqe *wqes[1];
3445
3446        l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3447        memset(l4kwqe, 0, sizeof(*l4kwqe));
3448        wqes[0] = (struct kwqe *) l4kwqe;
3449
3450        l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3451        l4kwqe->flags =
3452                L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3453        l4kwqe->cid = csk->pg_cid;
3454
3455        return dev->submit_kwqes(dev, wqes, 1);
3456}
3457
3458static int cnic_cm_conn_req(struct cnic_sock *csk)
3459{
3460        struct cnic_dev *dev = csk->dev;
3461        struct l4_kwq_connect_req1 *l4kwqe1;
3462        struct l4_kwq_connect_req2 *l4kwqe2;
3463        struct l4_kwq_connect_req3 *l4kwqe3;
3464        struct kwqe *wqes[3];
3465        u8 tcp_flags = 0;
3466        int num_wqes = 2;
3467
3468        l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3469        l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3470        l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3471        memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3472        memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3473        memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3474
3475        l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3476        l4kwqe3->flags =
3477                L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3478        l4kwqe3->ka_timeout = csk->ka_timeout;
3479        l4kwqe3->ka_interval = csk->ka_interval;
3480        l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3481        l4kwqe3->tos = csk->tos;
3482        l4kwqe3->ttl = csk->ttl;
3483        l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3484        l4kwqe3->pmtu = csk->mtu;
3485        l4kwqe3->rcv_buf = csk->rcv_buf;
3486        l4kwqe3->snd_buf = csk->snd_buf;
3487        l4kwqe3->seed = csk->seed;
3488
3489        wqes[0] = (struct kwqe *) l4kwqe1;
3490        if (test_bit(SK_F_IPV6, &csk->flags)) {
3491                wqes[1] = (struct kwqe *) l4kwqe2;
3492                wqes[2] = (struct kwqe *) l4kwqe3;
3493                num_wqes = 3;
3494
3495                l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3496                l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3497                l4kwqe2->flags =
3498                        L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3499                        L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3500                l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3501                l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3502                l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3503                l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3504                l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3505                l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3506                l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3507                               sizeof(struct tcphdr);
3508        } else {
3509                wqes[1] = (struct kwqe *) l4kwqe3;
3510                l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3511                               sizeof(struct tcphdr);
3512        }
3513
3514        l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3515        l4kwqe1->flags =
3516                (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3517                 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3518        l4kwqe1->cid = csk->cid;
3519        l4kwqe1->pg_cid = csk->pg_cid;
3520        l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3521        l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3522        l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3523        l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3524        if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3525                tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3526        if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3527                tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3528        if (csk->tcp_flags & SK_TCP_NAGLE)
3529                tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3530        if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3531                tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3532        if (csk->tcp_flags & SK_TCP_SACK)
3533                tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3534        if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3535                tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3536
3537        l4kwqe1->tcp_flags = tcp_flags;
3538
3539        return dev->submit_kwqes(dev, wqes, num_wqes);
3540}
3541
3542static int cnic_cm_close_req(struct cnic_sock *csk)
3543{
3544        struct cnic_dev *dev = csk->dev;
3545        struct l4_kwq_close_req *l4kwqe;
3546        struct kwqe *wqes[1];
3547
3548        l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3549        memset(l4kwqe, 0, sizeof(*l4kwqe));
3550        wqes[0] = (struct kwqe *) l4kwqe;
3551
3552        l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3553        l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3554        l4kwqe->cid = csk->cid;
3555
3556        return dev->submit_kwqes(dev, wqes, 1);
3557}
3558
3559static int cnic_cm_abort_req(struct cnic_sock *csk)
3560{
3561        struct cnic_dev *dev = csk->dev;
3562        struct l4_kwq_reset_req *l4kwqe;
3563        struct kwqe *wqes[1];
3564
3565        l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3566        memset(l4kwqe, 0, sizeof(*l4kwqe));
3567        wqes[0] = (struct kwqe *) l4kwqe;
3568
3569        l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3570        l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3571        l4kwqe->cid = csk->cid;
3572
3573        return dev->submit_kwqes(dev, wqes, 1);
3574}
3575
3576static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3577                          u32 l5_cid, struct cnic_sock **csk, void *context)
3578{
3579        struct cnic_local *cp = dev->cnic_priv;
3580        struct cnic_sock *csk1;
3581
3582        if (l5_cid >= MAX_CM_SK_TBL_SZ)
3583                return -EINVAL;
3584
3585        if (cp->ctx_tbl) {
3586                struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3587
3588                if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3589                        return -EAGAIN;
3590        }
3591
3592        csk1 = &cp->csk_tbl[l5_cid];
3593        if (atomic_read(&csk1->ref_count))
3594                return -EAGAIN;
3595
3596        if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3597                return -EBUSY;
3598
3599        csk1->dev = dev;
3600        csk1->cid = cid;
3601        csk1->l5_cid = l5_cid;
3602        csk1->ulp_type = ulp_type;
3603        csk1->context = context;
3604
3605        csk1->ka_timeout = DEF_KA_TIMEOUT;
3606        csk1->ka_interval = DEF_KA_INTERVAL;
3607        csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3608        csk1->tos = DEF_TOS;
3609        csk1->ttl = DEF_TTL;
3610        csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3611        csk1->rcv_buf = DEF_RCV_BUF;
3612        csk1->snd_buf = DEF_SND_BUF;
3613        csk1->seed = DEF_SEED;
3614        csk1->tcp_flags = 0;
3615
3616        *csk = csk1;
3617        return 0;
3618}
3619
3620static void cnic_cm_cleanup(struct cnic_sock *csk)
3621{
3622        if (csk->src_port) {
3623                struct cnic_dev *dev = csk->dev;
3624                struct cnic_local *cp = dev->cnic_priv;
3625
3626                cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3627                csk->src_port = 0;
3628        }
3629}
3630
3631static void cnic_close_conn(struct cnic_sock *csk)
3632{
3633        if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3634                cnic_cm_upload_pg(csk);
3635                clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3636        }
3637        cnic_cm_cleanup(csk);
3638}
3639
3640static int cnic_cm_destroy(struct cnic_sock *csk)
3641{
3642        if (!cnic_in_use(csk))
3643                return -EINVAL;
3644
3645        csk_hold(csk);
3646        clear_bit(SK_F_INUSE, &csk->flags);
3647        smp_mb__after_atomic();
3648        while (atomic_read(&csk->ref_count) != 1)
3649                msleep(1);
3650        cnic_cm_cleanup(csk);
3651
3652        csk->flags = 0;
3653        csk_put(csk);
3654        return 0;
3655}
3656
3657static inline u16 cnic_get_vlan(struct net_device *dev,
3658                                struct net_device **vlan_dev)
3659{
3660        if (dev->priv_flags & IFF_802_1Q_VLAN) {
3661                *vlan_dev = vlan_dev_real_dev(dev);
3662                return vlan_dev_vlan_id(dev);
3663        }
3664        *vlan_dev = dev;
3665        return 0;
3666}
3667
3668static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3669                             struct dst_entry **dst)
3670{
3671#if defined(CONFIG_INET)
3672        struct rtable *rt;
3673
3674        rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3675        if (!IS_ERR(rt)) {
3676                *dst = &rt->dst;
3677                return 0;
3678        }
3679        return PTR_ERR(rt);
3680#else
3681        return -ENETUNREACH;
3682#endif
3683}
3684
3685static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3686                             struct dst_entry **dst)
3687{
3688#if IS_ENABLED(CONFIG_IPV6)
3689        struct flowi6 fl6;
3690
3691        memset(&fl6, 0, sizeof(fl6));
3692        fl6.daddr = dst_addr->sin6_addr;
3693        if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3694                fl6.flowi6_oif = dst_addr->sin6_scope_id;
3695
3696        *dst = ip6_route_output(&init_net, NULL, &fl6);
3697        if ((*dst)->error) {
3698                dst_release(*dst);
3699                *dst = NULL;
3700                return -ENETUNREACH;
3701        } else
3702                return 0;
3703#endif
3704
3705        return -ENETUNREACH;
3706}
3707
3708static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3709                                           int ulp_type)
3710{
3711        struct cnic_dev *dev = NULL;
3712        struct dst_entry *dst;
3713        struct net_device *netdev = NULL;
3714        int err = -ENETUNREACH;
3715
3716        if (dst_addr->sin_family == AF_INET)
3717                err = cnic_get_v4_route(dst_addr, &dst);
3718        else if (dst_addr->sin_family == AF_INET6) {
3719                struct sockaddr_in6 *dst_addr6 =
3720                        (struct sockaddr_in6 *) dst_addr;
3721
3722                err = cnic_get_v6_route(dst_addr6, &dst);
3723        } else
3724                return NULL;
3725
3726        if (err)
3727                return NULL;
3728
3729        if (!dst->dev)
3730                goto done;
3731
3732        cnic_get_vlan(dst->dev, &netdev);
3733
3734        dev = cnic_from_netdev(netdev);
3735
3736done:
3737        dst_release(dst);
3738        if (dev)
3739                cnic_put(dev);
3740        return dev;
3741}
3742
3743static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3744{
3745        struct cnic_dev *dev = csk->dev;
3746        struct cnic_local *cp = dev->cnic_priv;
3747
3748        return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3749}
3750
3751static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3752{
3753        struct cnic_dev *dev = csk->dev;
3754        struct cnic_local *cp = dev->cnic_priv;
3755        int is_v6, rc = 0;
3756        struct dst_entry *dst = NULL;
3757        struct net_device *realdev;
3758        __be16 local_port;
3759        u32 port_id;
3760
3761        if (saddr->local.v6.sin6_family == AF_INET6 &&
3762            saddr->remote.v6.sin6_family == AF_INET6)
3763                is_v6 = 1;
3764        else if (saddr->local.v4.sin_family == AF_INET &&
3765                 saddr->remote.v4.sin_family == AF_INET)
3766                is_v6 = 0;
3767        else
3768                return -EINVAL;
3769
3770        clear_bit(SK_F_IPV6, &csk->flags);
3771
3772        if (is_v6) {
3773                set_bit(SK_F_IPV6, &csk->flags);
3774                cnic_get_v6_route(&saddr->remote.v6, &dst);
3775
3776                memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3777                       sizeof(struct in6_addr));
3778                csk->dst_port = saddr->remote.v6.sin6_port;
3779                local_port = saddr->local.v6.sin6_port;
3780
3781        } else {
3782                cnic_get_v4_route(&saddr->remote.v4, &dst);
3783
3784                csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3785                csk->dst_port = saddr->remote.v4.sin_port;
3786                local_port = saddr->local.v4.sin_port;
3787        }
3788
3789        csk->vlan_id = 0;
3790        csk->mtu = dev->netdev->mtu;
3791        if (dst && dst->dev) {
3792                u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3793                if (realdev == dev->netdev) {
3794                        csk->vlan_id = vlan;
3795                        csk->mtu = dst_mtu(dst);
3796                }
3797        }
3798
3799        port_id = be16_to_cpu(local_port);
3800        if (port_id >= CNIC_LOCAL_PORT_MIN &&
3801            port_id < CNIC_LOCAL_PORT_MAX) {
3802                if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3803                        port_id = 0;
3804        } else
3805                port_id = 0;
3806
3807        if (!port_id) {
3808                port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3809                if (port_id == -1) {
3810                        rc = -ENOMEM;
3811                        goto err_out;
3812                }
3813                local_port = cpu_to_be16(port_id);
3814        }
3815        csk->src_port = local_port;
3816
3817err_out:
3818        dst_release(dst);
3819        return rc;
3820}
3821
3822static void cnic_init_csk_state(struct cnic_sock *csk)
3823{
3824        csk->state = 0;
3825        clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3826        clear_bit(SK_F_CLOSING, &csk->flags);
3827}
3828
3829static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3830{
3831        struct cnic_local *cp = csk->dev->cnic_priv;
3832        int err = 0;
3833
3834        if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3835                return -EOPNOTSUPP;
3836
3837        if (!cnic_in_use(csk))
3838                return -EINVAL;
3839
3840        if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3841                return -EINVAL;
3842
3843        cnic_init_csk_state(csk);
3844
3845        err = cnic_get_route(csk, saddr);
3846        if (err)
3847                goto err_out;
3848
3849        err = cnic_resolve_addr(csk, saddr);
3850        if (!err)
3851                return 0;
3852
3853err_out:
3854        clear_bit(SK_F_CONNECT_START, &csk->flags);
3855        return err;
3856}
3857
3858static int cnic_cm_abort(struct cnic_sock *csk)
3859{
3860        struct cnic_local *cp = csk->dev->cnic_priv;
3861        u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3862
3863        if (!cnic_in_use(csk))
3864                return -EINVAL;
3865
3866        if (cnic_abort_prep(csk))
3867                return cnic_cm_abort_req(csk);
3868
3869        /* Getting here means that we haven't started connect, or
3870         * connect was not successful, or it has been reset by the target.
3871         */
3872
3873        cp->close_conn(csk, opcode);
3874        if (csk->state != opcode) {
3875                /* Wait for remote reset sequence to complete */
3876                while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3877                        msleep(1);
3878
3879                return -EALREADY;
3880        }
3881
3882        return 0;
3883}
3884
3885static int cnic_cm_close(struct cnic_sock *csk)
3886{
3887        if (!cnic_in_use(csk))
3888                return -EINVAL;
3889
3890        if (cnic_close_prep(csk)) {
3891                csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3892                return cnic_cm_close_req(csk);
3893        } else {
3894                /* Wait for remote reset sequence to complete */
3895                while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3896                        msleep(1);
3897
3898                return -EALREADY;
3899        }
3900        return 0;
3901}
3902
3903static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3904                           u8 opcode)
3905{
3906        struct cnic_ulp_ops *ulp_ops;
3907        int ulp_type = csk->ulp_type;
3908
3909        rcu_read_lock();
3910        ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3911        if (ulp_ops) {
3912                if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3913                        ulp_ops->cm_connect_complete(csk);
3914                else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3915                        ulp_ops->cm_close_complete(csk);
3916                else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3917                        ulp_ops->cm_remote_abort(csk);
3918                else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3919                        ulp_ops->cm_abort_complete(csk);
3920                else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3921                        ulp_ops->cm_remote_close(csk);
3922        }
3923        rcu_read_unlock();
3924}
3925
3926static int cnic_cm_set_pg(struct cnic_sock *csk)
3927{
3928        if (cnic_offld_prep(csk)) {
3929                if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3930                        cnic_cm_update_pg(csk);
3931                else
3932                        cnic_cm_offload_pg(csk);
3933        }
3934        return 0;
3935}
3936
3937static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3938{
3939        struct cnic_local *cp = dev->cnic_priv;
3940        u32 l5_cid = kcqe->pg_host_opaque;
3941        u8 opcode = kcqe->op_code;
3942        struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3943
3944        csk_hold(csk);
3945        if (!cnic_in_use(csk))
3946                goto done;
3947
3948        if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3949                clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3950                goto done;
3951        }
3952        /* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3953        if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3954                clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3955                cnic_cm_upcall(cp, csk,
3956                               L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3957                goto done;
3958        }
3959
3960        csk->pg_cid = kcqe->pg_cid;
3961        set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3962        cnic_cm_conn_req(csk);
3963
3964done:
3965        csk_put(csk);
3966}
3967
3968static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3969{
3970        struct cnic_local *cp = dev->cnic_priv;
3971        struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3972        u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3973        struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3974
3975        ctx->timestamp = jiffies;
3976        ctx->wait_cond = 1;
3977        wake_up(&ctx->waitq);
3978}
3979
3980static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3981{
3982        struct cnic_local *cp = dev->cnic_priv;
3983        struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3984        u8 opcode = l4kcqe->op_code;
3985        u32 l5_cid;
3986        struct cnic_sock *csk;
3987
3988        if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3989                cnic_process_fcoe_term_conn(dev, kcqe);
3990                return;
3991        }
3992        if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3993            opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3994                cnic_cm_process_offld_pg(dev, l4kcqe);
3995                return;
3996        }
3997
3998        l5_cid = l4kcqe->conn_id;
3999        if (opcode & 0x80)
4000                l5_cid = l4kcqe->cid;
4001        if (l5_cid >= MAX_CM_SK_TBL_SZ)
4002                return;
4003
4004        csk = &cp->csk_tbl[l5_cid];
4005        csk_hold(csk);
4006
4007        if (!cnic_in_use(csk)) {
4008                csk_put(csk);
4009                return;
4010        }
4011
4012        switch (opcode) {
4013        case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4014                if (l4kcqe->status != 0) {
4015                        clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4016                        cnic_cm_upcall(cp, csk,
4017                                       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4018                }
4019                break;
4020        case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4021                if (l4kcqe->status == 0)
4022                        set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4023                else if (l4kcqe->status ==
4024                         L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4025                        set_bit(SK_F_HW_ERR, &csk->flags);
4026
4027                smp_mb__before_atomic();
4028                clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4029                cnic_cm_upcall(cp, csk, opcode);
4030                break;
4031
4032        case L5CM_RAMROD_CMD_ID_CLOSE: {
4033                struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4034
4035                if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
4036                        netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4037                                    l4kcqe->status, l5kcqe->completion_status);
4038                        opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4039                        /* Fall through */
4040                } else {
4041                        break;
4042                }
4043        }
4044        case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4045        case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4046        case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4047        case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4048        case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4049                if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4050                        set_bit(SK_F_HW_ERR, &csk->flags);
4051
4052                cp->close_conn(csk, opcode);
4053                break;
4054
4055        case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4056                /* after we already sent CLOSE_REQ */
4057                if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4058                    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4059                    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4060                        cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4061                else
4062                        cnic_cm_upcall(cp, csk, opcode);
4063                break;
4064        }
4065        csk_put(csk);
4066}
4067
4068static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4069{
4070        struct cnic_dev *dev = data;
4071        int i;
4072
4073        for (i = 0; i < num; i++)
4074                cnic_cm_process_kcqe(dev, kcqe[i]);
4075}
4076
4077static struct cnic_ulp_ops cm_ulp_ops = {
4078        .indicate_kcqes         = cnic_cm_indicate_kcqe,
4079};
4080
4081static void cnic_cm_free_mem(struct cnic_dev *dev)
4082{
4083        struct cnic_local *cp = dev->cnic_priv;
4084
4085        kfree(cp->csk_tbl);
4086        cp->csk_tbl = NULL;
4087        cnic_free_id_tbl(&cp->csk_port_tbl);
4088}
4089
4090static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4091{
4092        struct cnic_local *cp = dev->cnic_priv;
4093        u32 port_id;
4094
4095        cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4096                              GFP_KERNEL);
4097        if (!cp->csk_tbl)
4098                return -ENOMEM;
4099
4100        port_id = prandom_u32();
4101        port_id %= CNIC_LOCAL_PORT_RANGE;
4102        if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4103                             CNIC_LOCAL_PORT_MIN, port_id)) {
4104                cnic_cm_free_mem(dev);
4105                return -ENOMEM;
4106        }
4107        return 0;
4108}
4109
4110static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4111{
4112        if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4113                /* Unsolicited RESET_COMP or RESET_RECEIVED */
4114                opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4115                csk->state = opcode;
4116        }
4117
4118        /* 1. If event opcode matches the expected event in csk->state
4119         * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4120         *    event
4121         * 3. If the expected event is 0, meaning the connection was never
4122         *    never established, we accept the opcode from cm_abort.
4123         */
4124        if (opcode == csk->state || csk->state == 0 ||
4125            csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4126            csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4127                if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4128                        if (csk->state == 0)
4129                                csk->state = opcode;
4130                        return 1;
4131                }
4132        }
4133        return 0;
4134}
4135
4136static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4137{
4138        struct cnic_dev *dev = csk->dev;
4139        struct cnic_local *cp = dev->cnic_priv;
4140
4141        if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4142                cnic_cm_upcall(cp, csk, opcode);
4143                return;
4144        }
4145
4146        clear_bit(SK_F_CONNECT_START, &csk->flags);
4147        cnic_close_conn(csk);
4148        csk->state = opcode;
4149        cnic_cm_upcall(cp, csk, opcode);
4150}
4151
4152static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4153{
4154}
4155
4156static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4157{
4158        u32 seed;
4159
4160        seed = prandom_u32();
4161        cnic_ctx_wr(dev, 45, 0, seed);
4162        return 0;
4163}
4164
4165static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4166{
4167        struct cnic_dev *dev = csk->dev;
4168        struct cnic_local *cp = dev->cnic_priv;
4169        struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4170        union l5cm_specific_data l5_data;
4171        u32 cmd = 0;
4172        int close_complete = 0;
4173
4174        switch (opcode) {
4175        case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4176        case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4177        case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4178                if (cnic_ready_to_close(csk, opcode)) {
4179                        if (test_bit(SK_F_HW_ERR, &csk->flags))
4180                                close_complete = 1;
4181                        else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4182                                cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4183                        else
4184                                close_complete = 1;
4185                }
4186                break;
4187        case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4188                cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4189                break;
4190        case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4191                close_complete = 1;
4192                break;
4193        }
4194        if (cmd) {
4195                memset(&l5_data, 0, sizeof(l5_data));
4196
4197                cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4198                                    &l5_data);
4199        } else if (close_complete) {
4200                ctx->timestamp = jiffies;
4201                cnic_close_conn(csk);
4202                cnic_cm_upcall(cp, csk, csk->state);
4203        }
4204}
4205
4206static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4207{
4208        struct cnic_local *cp = dev->cnic_priv;
4209
4210        if (!cp->ctx_tbl)
4211                return;
4212
4213        if (!netif_running(dev->netdev))
4214                return;
4215
4216        cnic_bnx2x_delete_wait(dev, 0);
4217
4218        cancel_delayed_work(&cp->delete_task);
4219        flush_workqueue(cnic_wq);
4220
4221        if (atomic_read(&cp->iscsi_conn) != 0)
4222                netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4223                            atomic_read(&cp->iscsi_conn));
4224}
4225
4226static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4227{
4228        struct bnx2x *bp = netdev_priv(dev->netdev);
4229        u32 pfid = bp->pfid;
4230        u32 port = BP_PORT(bp);
4231
4232        cnic_init_bnx2x_mac(dev);
4233        cnic_bnx2x_set_tcp_options(dev, 0, 1);
4234
4235        CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4236                  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4237
4238        CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4239                XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4240        CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4241                XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4242                DEF_MAX_DA_COUNT);
4243
4244        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4245                 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4246        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4247                 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4248        CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4249                 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4250        CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4251                XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4252
4253        CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4254                DEF_MAX_CWND);
4255        return 0;
4256}
4257
4258static void cnic_delete_task(struct work_struct *work)
4259{
4260        struct cnic_local *cp;
4261        struct cnic_dev *dev;
4262        u32 i;
4263        int need_resched = 0;
4264
4265        cp = container_of(work, struct cnic_local, delete_task.work);
4266        dev = cp->dev;
4267
4268        if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4269                struct drv_ctl_info info;
4270
4271                cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4272
4273                info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4274                cp->ethdev->drv_ctl(dev->netdev, &info);
4275        }
4276
4277        for (i = 0; i < cp->max_cid_space; i++) {
4278                struct cnic_context *ctx = &cp->ctx_tbl[i];
4279                int err;
4280
4281                if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4282                    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4283                        continue;
4284
4285                if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4286                        need_resched = 1;
4287                        continue;
4288                }
4289
4290                if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4291                        continue;
4292
4293                err = cnic_bnx2x_destroy_ramrod(dev, i);
4294
4295                cnic_free_bnx2x_conn_resc(dev, i);
4296                if (!err) {
4297                        if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4298                                atomic_dec(&cp->iscsi_conn);
4299
4300                        clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4301                }
4302        }
4303
4304        if (need_resched)
4305                queue_delayed_work(cnic_wq, &cp->delete_task,
4306                                   msecs_to_jiffies(10));
4307
4308}
4309
4310static int cnic_cm_open(struct cnic_dev *dev)
4311{
4312        struct cnic_local *cp = dev->cnic_priv;
4313        int err;
4314
4315        err = cnic_cm_alloc_mem(dev);
4316        if (err)
4317                return err;
4318
4319        err = cp->start_cm(dev);
4320
4321        if (err)
4322                goto err_out;
4323
4324        INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4325
4326        dev->cm_create = cnic_cm_create;
4327        dev->cm_destroy = cnic_cm_destroy;
4328        dev->cm_connect = cnic_cm_connect;
4329        dev->cm_abort = cnic_cm_abort;
4330        dev->cm_close = cnic_cm_close;
4331        dev->cm_select_dev = cnic_cm_select_dev;
4332
4333        cp->ulp_handle[CNIC_ULP_L4] = dev;
4334        rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4335        return 0;
4336
4337err_out:
4338        cnic_cm_free_mem(dev);
4339        return err;
4340}
4341
4342static int cnic_cm_shutdown(struct cnic_dev *dev)
4343{
4344        struct cnic_local *cp = dev->cnic_priv;
4345        int i;
4346
4347        if (!cp->csk_tbl)
4348                return 0;
4349
4350        for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4351                struct cnic_sock *csk = &cp->csk_tbl[i];
4352
4353                clear_bit(SK_F_INUSE, &csk->flags);
4354                cnic_cm_cleanup(csk);
4355        }
4356        cnic_cm_free_mem(dev);
4357
4358        return 0;
4359}
4360
4361static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4362{
4363        u32 cid_addr;
4364        int i;
4365
4366        cid_addr = GET_CID_ADDR(cid);
4367
4368        for (i = 0; i < CTX_SIZE; i += 4)
4369                cnic_ctx_wr(dev, cid_addr, i, 0);
4370}
4371
4372static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4373{
4374        struct cnic_local *cp = dev->cnic_priv;
4375        int ret = 0, i;
4376        u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4377
4378        if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4379                return 0;
4380
4381        for (i = 0; i < cp->ctx_blks; i++) {
4382                int j;
4383                u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4384                u32 val;
4385
4386                memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4387
4388                CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4389                        (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4390                CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4391                        (u64) cp->ctx_arr[i].mapping >> 32);
4392                CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4393                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4394                for (j = 0; j < 10; j++) {
4395
4396                        val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4397                        if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4398                                break;
4399                        udelay(5);
4400                }
4401                if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4402                        ret = -EBUSY;
4403                        break;
4404                }
4405        }
4406        return ret;
4407}
4408
4409static void cnic_free_irq(struct cnic_dev *dev)
4410{
4411        struct cnic_local *cp = dev->cnic_priv;
4412        struct cnic_eth_dev *ethdev = cp->ethdev;
4413
4414        if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4415                cp->disable_int_sync(dev);
4416                tasklet_kill(&cp->cnic_irq_task);
4417                free_irq(ethdev->irq_arr[0].vector, dev);
4418        }
4419}
4420
4421static int cnic_request_irq(struct cnic_dev *dev)
4422{
4423        struct cnic_local *cp = dev->cnic_priv;
4424        struct cnic_eth_dev *ethdev = cp->ethdev;
4425        int err;
4426
4427        err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4428        if (err)
4429                tasklet_disable(&cp->cnic_irq_task);
4430
4431        return err;
4432}
4433
4434static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4435{
4436        struct cnic_local *cp = dev->cnic_priv;
4437        struct cnic_eth_dev *ethdev = cp->ethdev;
4438
4439        if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4440                int err, i = 0;
4441                int sblk_num = cp->status_blk_num;
4442                u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4443                           BNX2_HC_SB_CONFIG_1;
4444
4445                CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4446
4447                CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4448                CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4449                CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4450
4451                cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4452                tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4453                             (unsigned long) dev);
4454                err = cnic_request_irq(dev);
4455                if (err)
4456                        return err;
4457
4458                while (cp->status_blk.bnx2->status_completion_producer_index &&
4459                       i < 10) {
4460                        CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4461                                1 << (11 + sblk_num));
4462                        udelay(10);
4463                        i++;
4464                        barrier();
4465                }
4466                if (cp->status_blk.bnx2->status_completion_producer_index) {
4467                        cnic_free_irq(dev);
4468                        goto failed;
4469                }
4470
4471        } else {
4472                struct status_block *sblk = cp->status_blk.gen;
4473                u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4474                int i = 0;
4475
4476                while (sblk->status_completion_producer_index && i < 10) {
4477                        CNIC_WR(dev, BNX2_HC_COMMAND,
4478                                hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4479                        udelay(10);
4480                        i++;
4481                        barrier();
4482                }
4483                if (sblk->status_completion_producer_index)
4484                        goto failed;
4485
4486        }
4487        return 0;
4488
4489failed:
4490        netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4491        return -EBUSY;
4492}
4493
4494static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4495{
4496        struct cnic_local *cp = dev->cnic_priv;
4497        struct cnic_eth_dev *ethdev = cp->ethdev;
4498
4499        if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4500                return;
4501
4502        CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4503                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4504}
4505
4506static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4507{
4508        struct cnic_local *cp = dev->cnic_priv;
4509        struct cnic_eth_dev *ethdev = cp->ethdev;
4510
4511        if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4512                return;
4513
4514        CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4515                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4516        CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4517        synchronize_irq(ethdev->irq_arr[0].vector);
4518}
4519
4520static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4521{
4522        struct cnic_local *cp = dev->cnic_priv;
4523        struct cnic_eth_dev *ethdev = cp->ethdev;
4524        struct cnic_uio_dev *udev = cp->udev;
4525        u32 cid_addr, tx_cid, sb_id;
4526        u32 val, offset0, offset1, offset2, offset3;
4527        int i;
4528        struct bnx2_tx_bd *txbd;
4529        dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4530        struct status_block *s_blk = cp->status_blk.gen;
4531
4532        sb_id = cp->status_blk_num;
4533        tx_cid = 20;
4534        cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4535        if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4536                struct status_block_msix *sblk = cp->status_blk.bnx2;
4537
4538                tx_cid = TX_TSS_CID + sb_id - 1;
4539                CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4540                        (TX_TSS_CID << 7));
4541                cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4542        }
4543        cp->tx_cons = *cp->tx_cons_ptr;
4544
4545        cid_addr = GET_CID_ADDR(tx_cid);
4546        if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4547                u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4548
4549                for (i = 0; i < PHY_CTX_SIZE; i += 4)
4550                        cnic_ctx_wr(dev, cid_addr2, i, 0);
4551
4552                offset0 = BNX2_L2CTX_TYPE_XI;
4553                offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4554                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4555                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4556        } else {
4557                cnic_init_context(dev, tx_cid);
4558                cnic_init_context(dev, tx_cid + 1);
4559
4560                offset0 = BNX2_L2CTX_TYPE;
4561                offset1 = BNX2_L2CTX_CMD_TYPE;
4562                offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4563                offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4564        }
4565        val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4566        cnic_ctx_wr(dev, cid_addr, offset0, val);
4567
4568        val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4569        cnic_ctx_wr(dev, cid_addr, offset1, val);
4570
4571        txbd = udev->l2_ring;
4572
4573        buf_map = udev->l2_buf_map;
4574        for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4575                txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4576                txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4577        }
4578        val = (u64) ring_map >> 32;
4579        cnic_ctx_wr(dev, cid_addr, offset2, val);
4580        txbd->tx_bd_haddr_hi = val;
4581
4582        val = (u64) ring_map & 0xffffffff;
4583        cnic_ctx_wr(dev, cid_addr, offset3, val);
4584        txbd->tx_bd_haddr_lo = val;
4585}
4586
4587static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4588{
4589        struct cnic_local *cp = dev->cnic_priv;
4590        struct cnic_eth_dev *ethdev = cp->ethdev;
4591        struct cnic_uio_dev *udev = cp->udev;
4592        u32 cid_addr, sb_id, val, coal_reg, coal_val;
4593        int i;
4594        struct bnx2_rx_bd *rxbd;
4595        struct status_block *s_blk = cp->status_blk.gen;
4596        dma_addr_t ring_map = udev->l2_ring_map;
4597
4598        sb_id = cp->status_blk_num;
4599        cnic_init_context(dev, 2);
4600        cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4601        coal_reg = BNX2_HC_COMMAND;
4602        coal_val = CNIC_RD(dev, coal_reg);
4603        if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4604                struct status_block_msix *sblk = cp->status_blk.bnx2;
4605
4606                cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4607                coal_reg = BNX2_HC_COALESCE_NOW;
4608                coal_val = 1 << (11 + sb_id);
4609        }
4610        i = 0;
4611        while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4612                CNIC_WR(dev, coal_reg, coal_val);
4613                udelay(10);
4614                i++;
4615                barrier();
4616        }
4617        cp->rx_cons = *cp->rx_cons_ptr;
4618
4619        cid_addr = GET_CID_ADDR(2);
4620        val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4621              BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4622        cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4623
4624        if (sb_id == 0)
4625                val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4626        else
4627                val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4628        cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4629
4630        rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4631        for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4632                dma_addr_t buf_map;
4633                int n = (i % cp->l2_rx_ring_size) + 1;
4634
4635                buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4636                rxbd->rx_bd_len = cp->l2_single_buf_size;
4637                rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4638                rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4639                rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4640        }
4641        val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4642        cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4643        rxbd->rx_bd_haddr_hi = val;
4644
4645        val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4646        cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4647        rxbd->rx_bd_haddr_lo = val;
4648
4649        val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4650        cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4651}
4652
4653static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4654{
4655        struct kwqe *wqes[1], l2kwqe;
4656
4657        memset(&l2kwqe, 0, sizeof(l2kwqe));
4658        wqes[0] = &l2kwqe;
4659        l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4660                              (L2_KWQE_OPCODE_VALUE_FLUSH <<
4661                               KWQE_OPCODE_SHIFT) | 2;
4662        dev->submit_kwqes(dev, wqes, 1);
4663}
4664
4665static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4666{
4667        struct cnic_local *cp = dev->cnic_priv;
4668        u32 val;
4669
4670        val = cp->func << 2;
4671
4672        cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4673
4674        val = cnic_reg_rd_ind(dev, cp->shmem_base +
4675                              BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4676        dev->mac_addr[0] = (u8) (val >> 8);
4677        dev->mac_addr[1] = (u8) val;
4678
4679        CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4680
4681        val = cnic_reg_rd_ind(dev, cp->shmem_base +
4682                              BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4683        dev->mac_addr[2] = (u8) (val >> 24);
4684        dev->mac_addr[3] = (u8) (val >> 16);
4685        dev->mac_addr[4] = (u8) (val >> 8);
4686        dev->mac_addr[5] = (u8) val;
4687
4688        CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4689
4690        val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4691        if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4692                val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4693
4694        CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4695        CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4696        CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4697}
4698
4699static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4700{
4701        struct cnic_local *cp = dev->cnic_priv;
4702        struct cnic_eth_dev *ethdev = cp->ethdev;
4703        struct status_block *sblk = cp->status_blk.gen;
4704        u32 val, kcq_cid_addr, kwq_cid_addr;
4705        int err;
4706
4707        cnic_set_bnx2_mac(dev);
4708
4709        val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4710        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4711        if (CNIC_PAGE_BITS > 12)
4712                val |= (12 - 8)  << 4;
4713        else
4714                val |= (CNIC_PAGE_BITS - 8)  << 4;
4715
4716        CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4717
4718        CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4719        CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4720        CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4721
4722        err = cnic_setup_5709_context(dev, 1);
4723        if (err)
4724                return err;
4725
4726        cnic_init_context(dev, KWQ_CID);
4727        cnic_init_context(dev, KCQ_CID);
4728
4729        kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4730        cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4731
4732        cp->max_kwq_idx = MAX_KWQ_IDX;
4733        cp->kwq_prod_idx = 0;
4734        cp->kwq_con_idx = 0;
4735        set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4736
4737        if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4738                cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4739        else
4740                cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4741
4742        /* Initialize the kernel work queue context. */
4743        val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4744              (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4745        cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4746
4747        val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4748        cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4749
4750        val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4751        cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4752
4753        val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4754        cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4755
4756        val = (u32) cp->kwq_info.pgtbl_map;
4757        cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4758
4759        kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4760        cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4761
4762        cp->kcq1.sw_prod_idx = 0;
4763        cp->kcq1.hw_prod_idx_ptr =
4764                &sblk->status_completion_producer_index;
4765
4766        cp->kcq1.status_idx_ptr = &sblk->status_idx;
4767
4768        /* Initialize the kernel complete queue context. */
4769        val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4770              (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4771        cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4772
4773        val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4774        cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4775
4776        val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4777        cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4778
4779        val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4780        cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4781
4782        val = (u32) cp->kcq1.dma.pgtbl_map;
4783        cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4784
4785        cp->int_num = 0;
4786        if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4787                struct status_block_msix *msblk = cp->status_blk.bnx2;
4788                u32 sb_id = cp->status_blk_num;
4789                u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4790
4791                cp->kcq1.hw_prod_idx_ptr =
4792                        &msblk->status_completion_producer_index;
4793                cp->kcq1.status_idx_ptr = &msblk->status_idx;
4794                cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4795                cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4796                cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4797                cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4798        }
4799
4800        /* Enable Commnad Scheduler notification when we write to the
4801         * host producer index of the kernel contexts. */
4802        CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4803
4804        /* Enable Command Scheduler notification when we write to either
4805         * the Send Queue or Receive Queue producer indexes of the kernel
4806         * bypass contexts. */
4807        CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4808        CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4809
4810        /* Notify COM when the driver post an application buffer. */
4811        CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4812
4813        /* Set the CP and COM doorbells.  These two processors polls the
4814         * doorbell for a non zero value before running.  This must be done
4815         * after setting up the kernel queue contexts. */
4816        cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4817        cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4818
4819        cnic_init_bnx2_tx_ring(dev);
4820        cnic_init_bnx2_rx_ring(dev);
4821
4822        err = cnic_init_bnx2_irq(dev);
4823        if (err) {
4824                netdev_err(dev->netdev, "cnic_init_irq failed\n");
4825                cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4826                cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4827                return err;
4828        }
4829
4830        ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4831
4832        return 0;
4833}
4834
4835static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4836{
4837        struct cnic_local *cp = dev->cnic_priv;
4838        struct cnic_eth_dev *ethdev = cp->ethdev;
4839        u32 start_offset = ethdev->ctx_tbl_offset;
4840        int i;
4841
4842        for (i = 0; i < cp->ctx_blks; i++) {
4843                struct cnic_ctx *ctx = &cp->ctx_arr[i];
4844                dma_addr_t map = ctx->mapping;
4845
4846                if (cp->ctx_align) {
4847                        unsigned long mask = cp->ctx_align - 1;
4848
4849                        map = (map + mask) & ~mask;
4850                }
4851
4852                cnic_ctx_tbl_wr(dev, start_offset + i, map);
4853        }
4854}
4855
4856static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4857{
4858        struct cnic_local *cp = dev->cnic_priv;
4859        struct cnic_eth_dev *ethdev = cp->ethdev;
4860        int err = 0;
4861
4862        tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4863                     (unsigned long) dev);
4864        if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4865                err = cnic_request_irq(dev);
4866
4867        return err;
4868}
4869
4870static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4871                                                u16 sb_id, u8 sb_index,
4872                                                u8 disable)
4873{
4874        struct bnx2x *bp = netdev_priv(dev->netdev);
4875
4876        u32 addr = BAR_CSTRORM_INTMEM +
4877                        CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4878                        offsetof(struct hc_status_block_data_e1x, index_data) +
4879                        sizeof(struct hc_index_data)*sb_index +
4880                        offsetof(struct hc_index_data, flags);
4881        u16 flags = CNIC_RD16(dev, addr);
4882        /* clear and set */
4883        flags &= ~HC_INDEX_DATA_HC_ENABLED;
4884        flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4885                  HC_INDEX_DATA_HC_ENABLED);
4886        CNIC_WR16(dev, addr, flags);
4887}
4888
4889static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4890{
4891        struct cnic_local *cp = dev->cnic_priv;
4892        struct bnx2x *bp = netdev_priv(dev->netdev);
4893        u8 sb_id = cp->status_blk_num;
4894
4895        CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4896                        CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4897                        offsetof(struct hc_status_block_data_e1x, index_data) +
4898                        sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4899                        offsetof(struct hc_index_data, timeout), 64 / 4);
4900        cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4901}
4902
4903static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4904{
4905}
4906
4907static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4908                                    struct client_init_ramrod_data *data)
4909{
4910        struct cnic_local *cp = dev->cnic_priv;
4911        struct bnx2x *bp = netdev_priv(dev->netdev);
4912        struct cnic_uio_dev *udev = cp->udev;
4913        union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4914        dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4915        struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4916        int i;
4917        u32 cli = cp->ethdev->iscsi_l2_client_id;
4918        u32 val;
4919
4920        memset(txbd, 0, CNIC_PAGE_SIZE);
4921
4922        buf_map = udev->l2_buf_map;
4923        for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4924                struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4925                struct eth_tx_parse_bd_e1x *pbd_e1x =
4926                        &((txbd + 1)->parse_bd_e1x);
4927                struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4928                struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4929
4930                start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4931                start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4932                reg_bd->addr_hi = start_bd->addr_hi;
4933                reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4934                start_bd->nbytes = cpu_to_le16(0x10);
4935                start_bd->nbd = cpu_to_le16(3);
4936                start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4937                start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4938                start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4939
4940                if (BNX2X_CHIP_IS_E2_PLUS(bp))
4941                        pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4942                                ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4943                else
4944                        pbd_e1x->global_data = (UNICAST_ADDRESS <<
4945                                ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4946        }
4947
4948        val = (u64) ring_map >> 32;
4949        txbd->next_bd.addr_hi = cpu_to_le32(val);
4950
4951        data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4952
4953        val = (u64) ring_map & 0xffffffff;
4954        txbd->next_bd.addr_lo = cpu_to_le32(val);
4955
4956        data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4957
4958        /* Other ramrod params */
4959        data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4960        data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4961
4962        /* reset xstorm per client statistics */
4963        if (cli < MAX_STAT_COUNTER_ID) {
4964                data->general.statistics_zero_flg = 1;
4965                data->general.statistics_en_flg = 1;
4966                data->general.statistics_counter_id = cli;
4967        }
4968
4969        cp->tx_cons_ptr =
4970                &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4971}
4972
4973static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4974                                    struct client_init_ramrod_data *data)
4975{
4976        struct cnic_local *cp = dev->cnic_priv;
4977        struct bnx2x *bp = netdev_priv(dev->netdev);
4978        struct cnic_uio_dev *udev = cp->udev;
4979        struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4980                                CNIC_PAGE_SIZE);
4981        struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4982                                (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4983        struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4984        int i;
4985        u32 cli = cp->ethdev->iscsi_l2_client_id;
4986        int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4987        u32 val;
4988        dma_addr_t ring_map = udev->l2_ring_map;
4989
4990        /* General data */
4991        data->general.client_id = cli;
4992        data->general.activate_flg = 1;
4993        data->general.sp_client_id = cli;
4994        data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4995        data->general.func_id = bp->pfid;
4996
4997        for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4998                dma_addr_t buf_map;
4999                int n = (i % cp->l2_rx_ring_size) + 1;
5000
5001                buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5002                rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5003                rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5004        }
5005
5006        val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5007        rxbd->addr_hi = cpu_to_le32(val);
5008        data->rx.bd_page_base.hi = cpu_to_le32(val);
5009
5010        val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5011        rxbd->addr_lo = cpu_to_le32(val);
5012        data->rx.bd_page_base.lo = cpu_to_le32(val);
5013
5014        rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5015        val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5016        rxcqe->addr_hi = cpu_to_le32(val);
5017        data->rx.cqe_page_base.hi = cpu_to_le32(val);
5018
5019        val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5020        rxcqe->addr_lo = cpu_to_le32(val);
5021        data->rx.cqe_page_base.lo = cpu_to_le32(val);
5022
5023        /* Other ramrod params */
5024        data->rx.client_qzone_id = cl_qzone_id;
5025        data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5026        data->rx.status_block_id = BNX2X_DEF_SB_ID;
5027
5028        data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5029
5030        data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5031        data->rx.outer_vlan_removal_enable_flg = 1;
5032        data->rx.silent_vlan_removal_flg = 1;
5033        data->rx.silent_vlan_value = 0;
5034        data->rx.silent_vlan_mask = 0xffff;
5035
5036        cp->rx_cons_ptr =
5037                &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5038        cp->rx_cons = *cp->rx_cons_ptr;
5039}
5040
5041static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5042{
5043        struct cnic_local *cp = dev->cnic_priv;
5044        struct bnx2x *bp = netdev_priv(dev->netdev);
5045        u32 pfid = bp->pfid;
5046
5047        cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5048                           CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5049        cp->kcq1.sw_prod_idx = 0;
5050
5051        if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5052                struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5053
5054                cp->kcq1.hw_prod_idx_ptr =
5055                        &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5056                cp->kcq1.status_idx_ptr =
5057                        &sb->sb.running_index[SM_RX_ID];
5058        } else {
5059                struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5060
5061                cp->kcq1.hw_prod_idx_ptr =
5062                        &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5063                cp->kcq1.status_idx_ptr =
5064                        &sb->sb.running_index[SM_RX_ID];
5065        }
5066
5067        if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5068                struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5069
5070                cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5071                                        USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5072                cp->kcq2.sw_prod_idx = 0;
5073                cp->kcq2.hw_prod_idx_ptr =
5074                        &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5075                cp->kcq2.status_idx_ptr =
5076                        &sb->sb.running_index[SM_RX_ID];
5077        }
5078}
5079
5080static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5081{
5082        struct cnic_local *cp = dev->cnic_priv;
5083        struct bnx2x *bp = netdev_priv(dev->netdev);
5084        struct cnic_eth_dev *ethdev = cp->ethdev;
5085        int func, ret;
5086        u32 pfid;
5087
5088        dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5089        cp->func = bp->pf_num;
5090
5091        func = CNIC_FUNC(cp);
5092        pfid = bp->pfid;
5093
5094        ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5095                               cp->iscsi_start_cid, 0);
5096
5097        if (ret)
5098                return -ENOMEM;
5099
5100        if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5101                ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5102                                        cp->fcoe_start_cid, 0);
5103
5104                if (ret)
5105                        return -ENOMEM;
5106        }
5107
5108        cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5109
5110        cnic_init_bnx2x_kcq(dev);
5111
5112        /* Only 1 EQ */
5113        CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5114        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5115                CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5116        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5117                CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5118                cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5119        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5120                CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5121                (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5122        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5123                CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5124                cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5125        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126                CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5127                (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5128        CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5129                CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5130        CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5131                CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5132        CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5133                CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5134                HC_INDEX_ISCSI_EQ_CONS);
5135
5136        CNIC_WR(dev, BAR_USTRORM_INTMEM +
5137                USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5138                cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5139        CNIC_WR(dev, BAR_USTRORM_INTMEM +
5140                USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5141                (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5142
5143        CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5144                TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5145
5146        cnic_setup_bnx2x_context(dev);
5147
5148        ret = cnic_init_bnx2x_irq(dev);
5149        if (ret)
5150                return ret;
5151
5152        ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5153        return 0;
5154}
5155
5156static void cnic_init_rings(struct cnic_dev *dev)
5157{
5158        struct cnic_local *cp = dev->cnic_priv;
5159        struct bnx2x *bp = netdev_priv(dev->netdev);
5160        struct cnic_uio_dev *udev = cp->udev;
5161
5162        if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5163                return;
5164
5165        if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5166                cnic_init_bnx2_tx_ring(dev);
5167                cnic_init_bnx2_rx_ring(dev);
5168                set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5169        } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5170                u32 cli = cp->ethdev->iscsi_l2_client_id;
5171                u32 cid = cp->ethdev->iscsi_l2_cid;
5172                u32 cl_qzone_id;
5173                struct client_init_ramrod_data *data;
5174                union l5cm_specific_data l5_data;
5175                struct ustorm_eth_rx_producers rx_prods = {0};
5176                u32 off, i, *cid_ptr;
5177
5178                rx_prods.bd_prod = 0;
5179                rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5180                barrier();
5181
5182                cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5183
5184                off = BAR_USTRORM_INTMEM +
5185                        (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5186                         USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5187                         USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5188
5189                for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5190                        CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5191
5192                set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5193
5194                data = udev->l2_buf;
5195                cid_ptr = udev->l2_buf + 12;
5196
5197                memset(data, 0, sizeof(*data));
5198
5199                cnic_init_bnx2x_tx_ring(dev, data);
5200                cnic_init_bnx2x_rx_ring(dev, data);
5201
5202                l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5203                l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5204
5205                set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5206
5207                cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5208                        cid, ETH_CONNECTION_TYPE, &l5_data);
5209
5210                i = 0;
5211                while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5212                       ++i < 10)
5213                        msleep(1);
5214
5215                if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5216                        netdev_err(dev->netdev,
5217                                "iSCSI CLIENT_SETUP did not complete\n");
5218                cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5219                cnic_ring_ctl(dev, cid, cli, 1);
5220                *cid_ptr = cid >> 4;
5221                *(cid_ptr + 1) = cid * bp->db_size;
5222                *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5223        }
5224}
5225
5226static void cnic_shutdown_rings(struct cnic_dev *dev)
5227{
5228        struct cnic_local *cp = dev->cnic_priv;
5229        struct cnic_uio_dev *udev = cp->udev;
5230        void *rx_ring;
5231
5232        if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5233                return;
5234
5235        if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5236                cnic_shutdown_bnx2_rx_ring(dev);
5237        } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5238                u32 cli = cp->ethdev->iscsi_l2_client_id;
5239                u32 cid = cp->ethdev->iscsi_l2_cid;
5240                union l5cm_specific_data l5_data;
5241                int i;
5242
5243                cnic_ring_ctl(dev, cid, cli, 0);
5244
5245                set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5246
5247                l5_data.phy_address.lo = cli;
5248                l5_data.phy_address.hi = 0;
5249                cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5250                        cid, ETH_CONNECTION_TYPE, &l5_data);
5251                i = 0;
5252                while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5253                       ++i < 10)
5254                        msleep(1);
5255
5256                if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5257                        netdev_err(dev->netdev,
5258                                "iSCSI CLIENT_HALT did not complete\n");
5259                cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5260
5261                memset(&l5_data, 0, sizeof(l5_data));
5262                cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5263                        cid, NONE_CONNECTION_TYPE, &l5_data);
5264                msleep(10);
5265        }
5266        clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5267        rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5268        memset(rx_ring, 0, CNIC_PAGE_SIZE);
5269}
5270
5271static int cnic_register_netdev(struct cnic_dev *dev)
5272{
5273        struct cnic_local *cp = dev->cnic_priv;
5274        struct cnic_eth_dev *ethdev = cp->ethdev;
5275        int err;
5276
5277        if (!ethdev)
5278                return -ENODEV;
5279
5280        if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5281                return 0;
5282
5283        err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5284        if (err)
5285                netdev_err(dev->netdev, "register_cnic failed\n");
5286
5287        /* Read iSCSI config again.  On some bnx2x device, iSCSI config
5288         * can change after firmware is downloaded.
5289         */
5290        dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5291        if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5292                dev->max_iscsi_conn = 0;
5293
5294        return err;
5295}
5296
5297static void cnic_unregister_netdev(struct cnic_dev *dev)
5298{
5299        struct cnic_local *cp = dev->cnic_priv;
5300        struct cnic_eth_dev *ethdev = cp->ethdev;
5301
5302        if (!ethdev)
5303                return;
5304
5305        ethdev->drv_unregister_cnic(dev->netdev);
5306}
5307
5308static int cnic_start_hw(struct cnic_dev *dev)
5309{
5310        struct cnic_local *cp = dev->cnic_priv;
5311        struct cnic_eth_dev *ethdev = cp->ethdev;
5312        int err;
5313
5314        if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5315                return -EALREADY;
5316
5317        dev->regview = ethdev->io_base;
5318        pci_dev_get(dev->pcidev);
5319        cp->func = PCI_FUNC(dev->pcidev->devfn);
5320        cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5321        cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5322
5323        err = cp->alloc_resc(dev);
5324        if (err) {
5325                netdev_err(dev->netdev, "allocate resource failure\n");
5326                goto err1;
5327        }
5328
5329        err = cp->start_hw(dev);
5330        if (err)
5331                goto err1;
5332
5333        err = cnic_cm_open(dev);
5334        if (err)
5335                goto err1;
5336
5337        set_bit(CNIC_F_CNIC_UP, &dev->flags);
5338
5339        cp->enable_int(dev);
5340
5341        return 0;
5342
5343err1:
5344        cp->free_resc(dev);
5345        pci_dev_put(dev->pcidev);
5346        return err;
5347}
5348
5349static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5350{
5351        cnic_disable_bnx2_int_sync(dev);
5352
5353        cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5354        cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5355
5356        cnic_init_context(dev, KWQ_CID);
5357        cnic_init_context(dev, KCQ_CID);
5358
5359        cnic_setup_5709_context(dev, 0);
5360        cnic_free_irq(dev);
5361
5362        cnic_free_resc(dev);
5363}
5364
5365
5366static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5367{
5368        struct cnic_local *cp = dev->cnic_priv;
5369        struct bnx2x *bp = netdev_priv(dev->netdev);
5370        u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5371        u32 sb_id = cp->status_blk_num;
5372        u32 idx_off, syn_off;
5373
5374        cnic_free_irq(dev);
5375
5376        if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5377                idx_off = offsetof(struct hc_status_block_e2, index_values) +
5378                          (hc_index * sizeof(u16));
5379
5380                syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5381        } else {
5382                idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5383                          (hc_index * sizeof(u16));
5384
5385                syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5386        }
5387        CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5388        CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5389                  idx_off, 0);
5390
5391        *cp->kcq1.hw_prod_idx_ptr = 0;
5392        CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5393                CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5394        CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5395        cnic_free_resc(dev);
5396}
5397
5398static void cnic_stop_hw(struct cnic_dev *dev)
5399{
5400        if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5401                struct cnic_local *cp = dev->cnic_priv;
5402                int i = 0;
5403
5404                /* Need to wait for the ring shutdown event to complete
5405                 * before clearing the CNIC_UP flag.
5406                 */
5407                while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5408                        msleep(100);
5409                        i++;
5410                }
5411                cnic_shutdown_rings(dev);
5412                cp->stop_cm(dev);
5413                cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5414                clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5415                RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5416                synchronize_rcu();
5417                cnic_cm_shutdown(dev);
5418                cp->stop_hw(dev);
5419                pci_dev_put(dev->pcidev);
5420        }
5421}
5422
5423static void cnic_free_dev(struct cnic_dev *dev)
5424{
5425        int i = 0;
5426
5427        while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5428                msleep(100);
5429                i++;
5430        }
5431        if (atomic_read(&dev->ref_count) != 0)
5432                netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5433
5434        netdev_info(dev->netdev, "Removed CNIC device\n");
5435        dev_put(dev->netdev);
5436        kfree(dev);
5437}
5438
5439static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5440                                       struct pci_dev *pdev)
5441{
5442        struct cnic_dev *cdev;
5443        struct cnic_local *cp;
5444        int alloc_size;
5445
5446        alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5447
5448        cdev = kzalloc(alloc_size, GFP_KERNEL);
5449        if (cdev == NULL)
5450                return NULL;
5451
5452        cdev->netdev = dev;
5453        cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5454        cdev->register_device = cnic_register_device;
5455        cdev->unregister_device = cnic_unregister_device;
5456        cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5457
5458        cp = cdev->cnic_priv;
5459        cp->dev = cdev;
5460        cp->l2_single_buf_size = 0x400;
5461        cp->l2_rx_ring_size = 3;
5462
5463        spin_lock_init(&cp->cnic_ulp_lock);
5464
5465        netdev_info(dev, "Added CNIC device\n");
5466
5467        return cdev;
5468}
5469
5470static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5471{
5472        struct pci_dev *pdev;
5473        struct cnic_dev *cdev;
5474        struct cnic_local *cp;
5475        struct bnx2 *bp = netdev_priv(dev);
5476        struct cnic_eth_dev *ethdev = NULL;
5477
5478        if (bp->cnic_probe)
5479                ethdev = (bp->cnic_probe)(dev);
5480
5481        if (!ethdev)
5482                return NULL;
5483
5484        pdev = ethdev->pdev;
5485        if (!pdev)
5486                return NULL;
5487
5488        dev_hold(dev);
5489        pci_dev_get(pdev);
5490        if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5491             pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5492            (pdev->revision < 0x10)) {
5493                pci_dev_put(pdev);
5494                goto cnic_err;
5495        }
5496        pci_dev_put(pdev);
5497
5498        cdev = cnic_alloc_dev(dev, pdev);
5499        if (cdev == NULL)
5500                goto cnic_err;
5501
5502        set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5503        cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5504
5505        cp = cdev->cnic_priv;
5506        cp->ethdev = ethdev;
5507        cdev->pcidev = pdev;
5508        cp->chip_id = ethdev->chip_id;
5509
5510        cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5511
5512        cp->cnic_ops = &cnic_bnx2_ops;
5513        cp->start_hw = cnic_start_bnx2_hw;
5514        cp->stop_hw = cnic_stop_bnx2_hw;
5515        cp->setup_pgtbl = cnic_setup_page_tbl;
5516        cp->alloc_resc = cnic_alloc_bnx2_resc;
5517        cp->free_resc = cnic_free_resc;
5518        cp->start_cm = cnic_cm_init_bnx2_hw;
5519        cp->stop_cm = cnic_cm_stop_bnx2_hw;
5520        cp->enable_int = cnic_enable_bnx2_int;
5521        cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5522        cp->close_conn = cnic_close_bnx2_conn;
5523        return cdev;
5524
5525cnic_err:
5526        dev_put(dev);
5527        return NULL;
5528}
5529
5530static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5531{
5532        struct pci_dev *pdev;
5533        struct cnic_dev *cdev;
5534        struct cnic_local *cp;
5535        struct bnx2x *bp = netdev_priv(dev);
5536        struct cnic_eth_dev *ethdev = NULL;
5537
5538        if (bp->cnic_probe)
5539                ethdev = bp->cnic_probe(dev);
5540
5541        if (!ethdev)
5542                return NULL;
5543
5544        pdev = ethdev->pdev;
5545        if (!pdev)
5546                return NULL;
5547
5548        dev_hold(dev);
5549        cdev = cnic_alloc_dev(dev, pdev);
5550        if (cdev == NULL) {
5551                dev_put(dev);
5552                return NULL;
5553        }
5554
5555        set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5556        cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5557
5558        cp = cdev->cnic_priv;
5559        cp->ethdev = ethdev;
5560        cdev->pcidev = pdev;
5561        cp->chip_id = ethdev->chip_id;
5562
5563        cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5564
5565        if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5566                cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5567        if (CNIC_SUPPORTS_FCOE(bp)) {
5568                cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5569                cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5570        }
5571
5572        if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5573                cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5574
5575        memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5576
5577        cp->cnic_ops = &cnic_bnx2x_ops;
5578        cp->start_hw = cnic_start_bnx2x_hw;
5579        cp->stop_hw = cnic_stop_bnx2x_hw;
5580        cp->setup_pgtbl = cnic_setup_page_tbl_le;
5581        cp->alloc_resc = cnic_alloc_bnx2x_resc;
5582        cp->free_resc = cnic_free_resc;
5583        cp->start_cm = cnic_cm_init_bnx2x_hw;
5584        cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5585        cp->enable_int = cnic_enable_bnx2x_int;
5586        cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5587        if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5588                cp->ack_int = cnic_ack_bnx2x_e2_msix;
5589                cp->arm_int = cnic_arm_bnx2x_e2_msix;
5590        } else {
5591                cp->ack_int = cnic_ack_bnx2x_msix;
5592                cp->arm_int = cnic_arm_bnx2x_msix;
5593        }
5594        cp->close_conn = cnic_close_bnx2x_conn;
5595        return cdev;
5596}
5597
5598static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5599{
5600        struct ethtool_drvinfo drvinfo;
5601        struct cnic_dev *cdev = NULL;
5602
5603        if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5604                memset(&drvinfo, 0, sizeof(drvinfo));
5605                dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5606
5607                if (!strcmp(drvinfo.driver, "bnx2"))
5608                        cdev = init_bnx2_cnic(dev);
5609                if (!strcmp(drvinfo.driver, "bnx2x"))
5610                        cdev = init_bnx2x_cnic(dev);
5611                if (cdev) {
5612                        write_lock(&cnic_dev_lock);
5613                        list_add(&cdev->list, &cnic_dev_list);
5614                        write_unlock(&cnic_dev_lock);
5615                }
5616        }
5617        return cdev;
5618}
5619
5620static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5621                              u16 vlan_id)
5622{
5623        int if_type;
5624
5625        for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5626                struct cnic_ulp_ops *ulp_ops;
5627                void *ctx;
5628
5629                mutex_lock(&cnic_lock);
5630                ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5631                                                lockdep_is_held(&cnic_lock));
5632                if (!ulp_ops || !ulp_ops->indicate_netevent) {
5633                        mutex_unlock(&cnic_lock);
5634                        continue;
5635                }
5636
5637                ctx = cp->ulp_handle[if_type];
5638
5639                set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5640                mutex_unlock(&cnic_lock);
5641
5642                ulp_ops->indicate_netevent(ctx, event, vlan_id);
5643
5644                clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5645        }
5646}
5647
5648/* netdev event handler */
5649static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5650                                                         void *ptr)
5651{
5652        struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5653        struct cnic_dev *dev;
5654        int new_dev = 0;
5655
5656        dev = cnic_from_netdev(netdev);
5657
5658        if (!dev && event == NETDEV_REGISTER) {
5659                /* Check for the hot-plug device */
5660                dev = is_cnic_dev(netdev);
5661                if (dev) {
5662                        new_dev = 1;
5663                        cnic_hold(dev);
5664                }
5665        }
5666        if (dev) {
5667                struct cnic_local *cp = dev->cnic_priv;
5668
5669                if (new_dev)
5670                        cnic_ulp_init(dev);
5671                else if (event == NETDEV_UNREGISTER)
5672                        cnic_ulp_exit(dev);
5673
5674                if (event == NETDEV_UP) {
5675                        if (cnic_register_netdev(dev) != 0) {
5676                                cnic_put(dev);
5677                                goto done;
5678                        }
5679                        if (!cnic_start_hw(dev))
5680                                cnic_ulp_start(dev);
5681                }
5682
5683                cnic_rcv_netevent(cp, event, 0);
5684
5685                if (event == NETDEV_GOING_DOWN) {
5686                        cnic_ulp_stop(dev);
5687                        cnic_stop_hw(dev);
5688                        cnic_unregister_netdev(dev);
5689                } else if (event == NETDEV_UNREGISTER) {
5690                        write_lock(&cnic_dev_lock);
5691                        list_del_init(&dev->list);
5692                        write_unlock(&cnic_dev_lock);
5693
5694                        cnic_put(dev);
5695                        cnic_free_dev(dev);
5696                        goto done;
5697                }
5698                cnic_put(dev);
5699        } else {
5700                struct net_device *realdev;
5701                u16 vid;
5702
5703                vid = cnic_get_vlan(netdev, &realdev);
5704                if (realdev) {
5705                        dev = cnic_from_netdev(realdev);
5706                        if (dev) {
5707                                vid |= VLAN_TAG_PRESENT;
5708                                cnic_rcv_netevent(dev->cnic_priv, event, vid);
5709                                cnic_put(dev);
5710                        }
5711                }
5712        }
5713done:
5714        return NOTIFY_DONE;
5715}
5716
5717static struct notifier_block cnic_netdev_notifier = {
5718        .notifier_call = cnic_netdev_event
5719};
5720
5721static void cnic_release(void)
5722{
5723        struct cnic_uio_dev *udev;
5724
5725        while (!list_empty(&cnic_udev_list)) {
5726                udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5727                                  list);
5728                cnic_free_uio(udev);
5729        }
5730}
5731
5732static int __init cnic_init(void)
5733{
5734        int rc = 0;
5735
5736        pr_info("%s", version);
5737
5738        rc = register_netdevice_notifier(&cnic_netdev_notifier);
5739        if (rc) {
5740                cnic_release();
5741                return rc;
5742        }
5743
5744        cnic_wq = create_singlethread_workqueue("cnic_wq");
5745        if (!cnic_wq) {
5746                cnic_release();
5747                unregister_netdevice_notifier(&cnic_netdev_notifier);
5748                return -ENOMEM;
5749        }
5750
5751        return 0;
5752}
5753
5754static void __exit cnic_exit(void)
5755{
5756        unregister_netdevice_notifier(&cnic_netdev_notifier);
5757        cnic_release();
5758        destroy_workqueue(cnic_wq);
5759}
5760
5761module_init(cnic_init);
5762module_exit(cnic_exit);
5763