linux/drivers/scsi/qedf/qedf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  QLogic FCoE Offload Driver
   4 *  Copyright (c) 2016-2018 Cavium Inc.
   5 */
   6#include <linux/init.h>
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10#include <linux/device.h>
  11#include <linux/highmem.h>
  12#include <linux/crc32.h>
  13#include <linux/interrupt.h>
  14#include <linux/list.h>
  15#include <linux/kthread.h>
  16#include <linux/phylink.h>
  17#include <scsi/libfc.h>
  18#include <scsi/scsi_host.h>
  19#include <scsi/fc_frame.h>
  20#include <linux/if_ether.h>
  21#include <linux/if_vlan.h>
  22#include <linux/cpu.h>
  23#include "qedf.h"
  24#include "qedf_dbg.h"
  25#include <uapi/linux/pci_regs.h>
  26
  27const struct qed_fcoe_ops *qed_ops;
  28
  29static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  30static void qedf_remove(struct pci_dev *pdev);
  31static void qedf_shutdown(struct pci_dev *pdev);
  32static void qedf_schedule_recovery_handler(void *dev);
  33static void qedf_recovery_handler(struct work_struct *work);
  34
  35/*
  36 * Driver module parameters.
  37 */
  38static unsigned int qedf_dev_loss_tmo = 60;
  39module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
  40MODULE_PARM_DESC(dev_loss_tmo,  " dev_loss_tmo setting for attached "
  41        "remote ports (default 60)");
  42
  43uint qedf_debug = QEDF_LOG_INFO;
  44module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
  45MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
  46        " mask");
  47
  48static uint qedf_fipvlan_retries = 60;
  49module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
  50MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
  51        "before giving up (default 60)");
  52
  53static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
  54module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
  55MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
  56        "(default 1002).");
  57
  58static int qedf_default_prio = -1;
  59module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
  60MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
  61        " traffic (value between 0 and 7, default 3).");
  62
  63uint qedf_dump_frames;
  64module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
  65MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
  66        "(default off)");
  67
  68static uint qedf_queue_depth;
  69module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
  70MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
  71        "by the qedf driver. Default is 0 (use OS default).");
  72
  73uint qedf_io_tracing;
  74module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
  75MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
  76        "into trace buffer. (default off).");
  77
  78static uint qedf_max_lun = MAX_FIBRE_LUNS;
  79module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
  80MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
  81        "supports. (default 0xffffffff)");
  82
  83uint qedf_link_down_tmo;
  84module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
  85MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
  86        "link is down by N seconds.");
  87
  88bool qedf_retry_delay;
  89module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
  90MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
  91        "delay handling (default off).");
  92
  93static bool qedf_dcbx_no_wait;
  94module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
  95MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
  96        "sending FIP VLAN requests on link up (Default: off).");
  97
  98static uint qedf_dp_module;
  99module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
 100MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
 101        "qed module during probe.");
 102
 103static uint qedf_dp_level = QED_LEVEL_NOTICE;
 104module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
 105MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module  "
 106        "during probe (0-3: 0 more verbose).");
 107
 108static bool qedf_enable_recovery = true;
 109module_param_named(enable_recovery, qedf_enable_recovery,
 110                bool, S_IRUGO | S_IWUSR);
 111MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
 112                "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
 113
 114struct workqueue_struct *qedf_io_wq;
 115
 116static struct fcoe_percpu_s qedf_global;
 117static DEFINE_SPINLOCK(qedf_global_lock);
 118
 119static struct kmem_cache *qedf_io_work_cache;
 120
 121void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
 122{
 123        int vlan_id_tmp = 0;
 124
 125        vlan_id_tmp = vlan_id  | (qedf->prio << VLAN_PRIO_SHIFT);
 126        qedf->vlan_id = vlan_id_tmp;
 127        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 128                  "Setting vlan_id=0x%04x prio=%d.\n",
 129                  vlan_id_tmp, qedf->prio);
 130}
 131
 132/* Returns true if we have a valid vlan, false otherwise */
 133static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
 134{
 135
 136        while (qedf->fipvlan_retries--) {
 137                /* This is to catch if link goes down during fipvlan retries */
 138                if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
 139                        QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
 140                        return false;
 141                }
 142
 143                if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
 144                        QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
 145                        return false;
 146                }
 147
 148                if (qedf->vlan_id > 0) {
 149                        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 150                                  "vlan = 0x%x already set, calling ctlr_link_up.\n",
 151                                  qedf->vlan_id);
 152                        if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
 153                                fcoe_ctlr_link_up(&qedf->ctlr);
 154                        return true;
 155                }
 156
 157                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 158                           "Retry %d.\n", qedf->fipvlan_retries);
 159                init_completion(&qedf->fipvlan_compl);
 160                qedf_fcoe_send_vlan_req(qedf);
 161                wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
 162        }
 163
 164        return false;
 165}
 166
 167static void qedf_handle_link_update(struct work_struct *work)
 168{
 169        struct qedf_ctx *qedf =
 170            container_of(work, struct qedf_ctx, link_update.work);
 171        int rc;
 172
 173        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
 174                  atomic_read(&qedf->link_state));
 175
 176        if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
 177                rc = qedf_initiate_fipvlan_req(qedf);
 178                if (rc)
 179                        return;
 180
 181                if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
 182                        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 183                                  "Link is down, resetting vlan_id.\n");
 184                        qedf->vlan_id = 0;
 185                        return;
 186                }
 187
 188                /*
 189                 * If we get here then we never received a repsonse to our
 190                 * fip vlan request so set the vlan_id to the default and
 191                 * tell FCoE that the link is up
 192                 */
 193                QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
 194                           "response, falling back to default VLAN %d.\n",
 195                           qedf_fallback_vlan);
 196                qedf_set_vlan_id(qedf, qedf_fallback_vlan);
 197
 198                /*
 199                 * Zero out data_src_addr so we'll update it with the new
 200                 * lport port_id
 201                 */
 202                eth_zero_addr(qedf->data_src_addr);
 203                fcoe_ctlr_link_up(&qedf->ctlr);
 204        } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
 205                /*
 206                 * If we hit here and link_down_tmo_valid is still 1 it means
 207                 * that link_down_tmo timed out so set it to 0 to make sure any
 208                 * other readers have accurate state.
 209                 */
 210                atomic_set(&qedf->link_down_tmo_valid, 0);
 211                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 212                    "Calling fcoe_ctlr_link_down().\n");
 213                fcoe_ctlr_link_down(&qedf->ctlr);
 214                if (qedf_wait_for_upload(qedf) == false)
 215                        QEDF_ERR(&qedf->dbg_ctx,
 216                                 "Could not upload all sessions.\n");
 217                /* Reset the number of FIP VLAN retries */
 218                qedf->fipvlan_retries = qedf_fipvlan_retries;
 219        }
 220}
 221
 222#define QEDF_FCOE_MAC_METHOD_GRANGED_MAC                1
 223#define QEDF_FCOE_MAC_METHOD_FCF_MAP                    2
 224#define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC               3
 225static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
 226{
 227        u8 *granted_mac;
 228        struct fc_frame_header *fh = fc_frame_header_get(fp);
 229        u8 fc_map[3];
 230        int method = 0;
 231
 232        /* Get granted MAC address from FIP FLOGI payload */
 233        granted_mac = fr_cb(fp)->granted_mac;
 234
 235        /*
 236         * We set the source MAC for FCoE traffic based on the Granted MAC
 237         * address from the switch.
 238         *
 239         * If granted_mac is non-zero, we used that.
 240         * If the granted_mac is zeroed out, created the FCoE MAC based on
 241         * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
 242         * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
 243         * d_id of the FLOGI frame.
 244         */
 245        if (!is_zero_ether_addr(granted_mac)) {
 246                ether_addr_copy(qedf->data_src_addr, granted_mac);
 247                method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
 248        } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
 249                hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
 250                qedf->data_src_addr[0] = fc_map[0];
 251                qedf->data_src_addr[1] = fc_map[1];
 252                qedf->data_src_addr[2] = fc_map[2];
 253                qedf->data_src_addr[3] = fh->fh_d_id[0];
 254                qedf->data_src_addr[4] = fh->fh_d_id[1];
 255                qedf->data_src_addr[5] = fh->fh_d_id[2];
 256                method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
 257        } else {
 258                fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
 259                method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
 260        }
 261
 262        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 263            "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
 264}
 265
 266static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 267        void *arg)
 268{
 269        struct fc_exch *exch = fc_seq_exch(seq);
 270        struct fc_lport *lport = exch->lp;
 271        struct qedf_ctx *qedf = lport_priv(lport);
 272
 273        if (!qedf) {
 274                QEDF_ERR(NULL, "qedf is NULL.\n");
 275                return;
 276        }
 277
 278        /*
 279         * If ERR_PTR is set then don't try to stat anything as it will cause
 280         * a crash when we access fp.
 281         */
 282        if (IS_ERR(fp)) {
 283                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
 284                    "fp has IS_ERR() set.\n");
 285                goto skip_stat;
 286        }
 287
 288        /* Log stats for FLOGI reject */
 289        if (fc_frame_payload_op(fp) == ELS_LS_RJT)
 290                qedf->flogi_failed++;
 291        else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
 292                /* Set the source MAC we will use for FCoE traffic */
 293                qedf_set_data_src_addr(qedf, fp);
 294                qedf->flogi_pending = 0;
 295        }
 296
 297        /* Complete flogi_compl so we can proceed to sending ADISCs */
 298        complete(&qedf->flogi_compl);
 299
 300skip_stat:
 301        /* Report response to libfc */
 302        fc_lport_flogi_resp(seq, fp, lport);
 303}
 304
 305static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
 306        struct fc_frame *fp, unsigned int op,
 307        void (*resp)(struct fc_seq *,
 308        struct fc_frame *,
 309        void *),
 310        void *arg, u32 timeout)
 311{
 312        struct qedf_ctx *qedf = lport_priv(lport);
 313
 314        /*
 315         * Intercept FLOGI for statistic purposes. Note we use the resp
 316         * callback to tell if this is really a flogi.
 317         */
 318        if (resp == fc_lport_flogi_resp) {
 319                qedf->flogi_cnt++;
 320                if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
 321                        schedule_delayed_work(&qedf->stag_work, 2);
 322                        return NULL;
 323                }
 324                qedf->flogi_pending++;
 325                return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
 326                    arg, timeout);
 327        }
 328
 329        return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
 330}
 331
 332int qedf_send_flogi(struct qedf_ctx *qedf)
 333{
 334        struct fc_lport *lport;
 335        struct fc_frame *fp;
 336
 337        lport = qedf->lport;
 338
 339        if (!lport->tt.elsct_send) {
 340                QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
 341                return -EINVAL;
 342        }
 343
 344        fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
 345        if (!fp) {
 346                QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
 347                return -ENOMEM;
 348        }
 349
 350        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
 351            "Sending FLOGI to reestablish session with switch.\n");
 352        lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
 353            ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
 354
 355        init_completion(&qedf->flogi_compl);
 356
 357        return 0;
 358}
 359
 360/*
 361 * This function is called if link_down_tmo is in use.  If we get a link up and
 362 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
 363 * sessions with targets.  Otherwise, just call fcoe_ctlr_link_up().
 364 */
 365static void qedf_link_recovery(struct work_struct *work)
 366{
 367        struct qedf_ctx *qedf =
 368            container_of(work, struct qedf_ctx, link_recovery.work);
 369        struct fc_lport *lport = qedf->lport;
 370        struct fc_rport_priv *rdata;
 371        bool rc;
 372        int retries = 30;
 373        int rval, i;
 374        struct list_head rdata_login_list;
 375
 376        INIT_LIST_HEAD(&rdata_login_list);
 377
 378        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 379            "Link down tmo did not expire.\n");
 380
 381        /*
 382         * Essentially reset the fcoe_ctlr here without affecting the state
 383         * of the libfc structs.
 384         */
 385        qedf->ctlr.state = FIP_ST_LINK_WAIT;
 386        fcoe_ctlr_link_down(&qedf->ctlr);
 387
 388        /*
 389         * Bring the link up before we send the fipvlan request so libfcoe
 390         * can select a new fcf in parallel
 391         */
 392        fcoe_ctlr_link_up(&qedf->ctlr);
 393
 394        /* Since the link when down and up to verify which vlan we're on */
 395        qedf->fipvlan_retries = qedf_fipvlan_retries;
 396        rc = qedf_initiate_fipvlan_req(qedf);
 397        /* If getting the VLAN fails, set the VLAN to the fallback one */
 398        if (!rc)
 399                qedf_set_vlan_id(qedf, qedf_fallback_vlan);
 400
 401        /*
 402         * We need to wait for an FCF to be selected due to the
 403         * fcoe_ctlr_link_up other the FLOGI will be rejected.
 404         */
 405        while (retries > 0) {
 406                if (qedf->ctlr.sel_fcf) {
 407                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 408                            "FCF reselected, proceeding with FLOGI.\n");
 409                        break;
 410                }
 411                msleep(500);
 412                retries--;
 413        }
 414
 415        if (retries < 1) {
 416                QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
 417                    "FCF selection.\n");
 418                return;
 419        }
 420
 421        rval = qedf_send_flogi(qedf);
 422        if (rval)
 423                return;
 424
 425        /* Wait for FLOGI completion before proceeding with sending ADISCs */
 426        i = wait_for_completion_timeout(&qedf->flogi_compl,
 427            qedf->lport->r_a_tov);
 428        if (i == 0) {
 429                QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
 430                return;
 431        }
 432
 433        /*
 434         * Call lport->tt.rport_login which will cause libfc to send an
 435         * ADISC since the rport is in state ready.
 436         */
 437        mutex_lock(&lport->disc.disc_mutex);
 438        list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
 439                if (kref_get_unless_zero(&rdata->kref)) {
 440                        fc_rport_login(rdata);
 441                        kref_put(&rdata->kref, fc_rport_destroy);
 442                }
 443        }
 444        mutex_unlock(&lport->disc.disc_mutex);
 445}
 446
 447static void qedf_update_link_speed(struct qedf_ctx *qedf,
 448        struct qed_link_output *link)
 449{
 450        __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
 451        struct fc_lport *lport = qedf->lport;
 452
 453        lport->link_speed = FC_PORTSPEED_UNKNOWN;
 454        lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
 455
 456        /* Set fc_host link speed */
 457        switch (link->speed) {
 458        case 10000:
 459                lport->link_speed = FC_PORTSPEED_10GBIT;
 460                break;
 461        case 25000:
 462                lport->link_speed = FC_PORTSPEED_25GBIT;
 463                break;
 464        case 40000:
 465                lport->link_speed = FC_PORTSPEED_40GBIT;
 466                break;
 467        case 50000:
 468                lport->link_speed = FC_PORTSPEED_50GBIT;
 469                break;
 470        case 100000:
 471                lport->link_speed = FC_PORTSPEED_100GBIT;
 472                break;
 473        case 20000:
 474                lport->link_speed = FC_PORTSPEED_20GBIT;
 475                break;
 476        default:
 477                lport->link_speed = FC_PORTSPEED_UNKNOWN;
 478                break;
 479        }
 480
 481        /*
 482         * Set supported link speed by querying the supported
 483         * capabilities of the link.
 484         */
 485
 486        phylink_zero(sup_caps);
 487        phylink_set(sup_caps, 10000baseT_Full);
 488        phylink_set(sup_caps, 10000baseKX4_Full);
 489        phylink_set(sup_caps, 10000baseR_FEC);
 490        phylink_set(sup_caps, 10000baseCR_Full);
 491        phylink_set(sup_caps, 10000baseSR_Full);
 492        phylink_set(sup_caps, 10000baseLR_Full);
 493        phylink_set(sup_caps, 10000baseLRM_Full);
 494        phylink_set(sup_caps, 10000baseKR_Full);
 495
 496        if (linkmode_intersects(link->supported_caps, sup_caps))
 497                lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
 498
 499        phylink_zero(sup_caps);
 500        phylink_set(sup_caps, 25000baseKR_Full);
 501        phylink_set(sup_caps, 25000baseCR_Full);
 502        phylink_set(sup_caps, 25000baseSR_Full);
 503
 504        if (linkmode_intersects(link->supported_caps, sup_caps))
 505                lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
 506
 507        phylink_zero(sup_caps);
 508        phylink_set(sup_caps, 40000baseLR4_Full);
 509        phylink_set(sup_caps, 40000baseKR4_Full);
 510        phylink_set(sup_caps, 40000baseCR4_Full);
 511        phylink_set(sup_caps, 40000baseSR4_Full);
 512
 513        if (linkmode_intersects(link->supported_caps, sup_caps))
 514                lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
 515
 516        phylink_zero(sup_caps);
 517        phylink_set(sup_caps, 50000baseKR2_Full);
 518        phylink_set(sup_caps, 50000baseCR2_Full);
 519        phylink_set(sup_caps, 50000baseSR2_Full);
 520
 521        if (linkmode_intersects(link->supported_caps, sup_caps))
 522                lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
 523
 524        phylink_zero(sup_caps);
 525        phylink_set(sup_caps, 100000baseKR4_Full);
 526        phylink_set(sup_caps, 100000baseSR4_Full);
 527        phylink_set(sup_caps, 100000baseCR4_Full);
 528        phylink_set(sup_caps, 100000baseLR4_ER4_Full);
 529
 530        if (linkmode_intersects(link->supported_caps, sup_caps))
 531                lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
 532
 533        phylink_zero(sup_caps);
 534        phylink_set(sup_caps, 20000baseKR2_Full);
 535
 536        if (linkmode_intersects(link->supported_caps, sup_caps))
 537                lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
 538
 539        if (lport->host && lport->host->shost_data)
 540                fc_host_supported_speeds(lport->host) =
 541                        lport->link_supported_speeds;
 542}
 543
 544static void qedf_bw_update(void *dev)
 545{
 546        struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
 547        struct qed_link_output link;
 548
 549        /* Get the latest status of the link */
 550        qed_ops->common->get_link(qedf->cdev, &link);
 551
 552        if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
 553                QEDF_ERR(&qedf->dbg_ctx,
 554                         "Ignore link update, driver getting unload.\n");
 555                return;
 556        }
 557
 558        if (link.link_up) {
 559                if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
 560                        qedf_update_link_speed(qedf, &link);
 561                else
 562                        QEDF_ERR(&qedf->dbg_ctx,
 563                                 "Ignore bw update, link is down.\n");
 564
 565        } else {
 566                QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
 567        }
 568}
 569
 570static void qedf_link_update(void *dev, struct qed_link_output *link)
 571{
 572        struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
 573
 574        /*
 575         * Prevent race where we're removing the module and we get link update
 576         * for qed.
 577         */
 578        if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
 579                QEDF_ERR(&qedf->dbg_ctx,
 580                         "Ignore link update, driver getting unload.\n");
 581                return;
 582        }
 583
 584        if (link->link_up) {
 585                if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
 586                        QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
 587                            "Ignoring link up event as link is already up.\n");
 588                        return;
 589                }
 590                QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
 591                    link->speed / 1000);
 592
 593                /* Cancel any pending link down work */
 594                cancel_delayed_work(&qedf->link_update);
 595
 596                atomic_set(&qedf->link_state, QEDF_LINK_UP);
 597                qedf_update_link_speed(qedf, link);
 598
 599                if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
 600                    qedf_dcbx_no_wait) {
 601                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 602                             "DCBx done.\n");
 603                        if (atomic_read(&qedf->link_down_tmo_valid) > 0)
 604                                queue_delayed_work(qedf->link_update_wq,
 605                                    &qedf->link_recovery, 0);
 606                        else
 607                                queue_delayed_work(qedf->link_update_wq,
 608                                    &qedf->link_update, 0);
 609                        atomic_set(&qedf->link_down_tmo_valid, 0);
 610                }
 611
 612        } else {
 613                QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
 614
 615                atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
 616                atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
 617                /*
 618                 * Flag that we're waiting for the link to come back up before
 619                 * informing the fcoe layer of the event.
 620                 */
 621                if (qedf_link_down_tmo > 0) {
 622                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 623                            "Starting link down tmo.\n");
 624                        atomic_set(&qedf->link_down_tmo_valid, 1);
 625                }
 626                qedf->vlan_id = 0;
 627                qedf_update_link_speed(qedf, link);
 628                queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 629                    qedf_link_down_tmo * HZ);
 630        }
 631}
 632
 633
 634static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
 635{
 636        struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
 637        u8 tmp_prio;
 638
 639        QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
 640            "prio=%d.\n", get->operational.valid, get->operational.enabled,
 641            get->operational.app_prio.fcoe);
 642
 643        if (get->operational.enabled && get->operational.valid) {
 644                /* If DCBX was already negotiated on link up then just exit */
 645                if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
 646                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 647                            "DCBX already set on link up.\n");
 648                        return;
 649                }
 650
 651                atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
 652
 653                /*
 654                 * Set the 8021q priority in the following manner:
 655                 *
 656                 * 1. If a modparam is set use that
 657                 * 2. If the value is not between 0..7 use the default
 658                 * 3. Use the priority we get from the DCBX app tag
 659                 */
 660                tmp_prio = get->operational.app_prio.fcoe;
 661                if (qedf_default_prio > -1)
 662                        qedf->prio = qedf_default_prio;
 663                else if (tmp_prio > 7) {
 664                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 665                            "FIP/FCoE prio %d out of range, setting to %d.\n",
 666                            tmp_prio, QEDF_DEFAULT_PRIO);
 667                        qedf->prio = QEDF_DEFAULT_PRIO;
 668                } else
 669                        qedf->prio = tmp_prio;
 670
 671                if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
 672                    !qedf_dcbx_no_wait) {
 673                        if (atomic_read(&qedf->link_down_tmo_valid) > 0)
 674                                queue_delayed_work(qedf->link_update_wq,
 675                                    &qedf->link_recovery, 0);
 676                        else
 677                                queue_delayed_work(qedf->link_update_wq,
 678                                    &qedf->link_update, 0);
 679                        atomic_set(&qedf->link_down_tmo_valid, 0);
 680                }
 681        }
 682
 683}
 684
 685static u32 qedf_get_login_failures(void *cookie)
 686{
 687        struct qedf_ctx *qedf;
 688
 689        qedf = (struct qedf_ctx *)cookie;
 690        return qedf->flogi_failed;
 691}
 692
 693static struct qed_fcoe_cb_ops qedf_cb_ops = {
 694        {
 695                .link_update = qedf_link_update,
 696                .bw_update = qedf_bw_update,
 697                .schedule_recovery_handler = qedf_schedule_recovery_handler,
 698                .dcbx_aen = qedf_dcbx_handler,
 699                .get_generic_tlv_data = qedf_get_generic_tlv_data,
 700                .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
 701                .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
 702        }
 703};
 704
 705/*
 706 * Various transport templates.
 707 */
 708
 709static struct scsi_transport_template *qedf_fc_transport_template;
 710static struct scsi_transport_template *qedf_fc_vport_transport_template;
 711
 712/*
 713 * SCSI EH handlers
 714 */
 715static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
 716{
 717        struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
 718        struct fc_lport *lport;
 719        struct qedf_ctx *qedf;
 720        struct qedf_ioreq *io_req;
 721        struct fc_rport_libfc_priv *rp = rport->dd_data;
 722        struct fc_rport_priv *rdata;
 723        struct qedf_rport *fcport = NULL;
 724        int rc = FAILED;
 725        int wait_count = 100;
 726        int refcount = 0;
 727        int rval;
 728        int got_ref = 0;
 729
 730        lport = shost_priv(sc_cmd->device->host);
 731        qedf = (struct qedf_ctx *)lport_priv(lport);
 732
 733        /* rport and tgt are allocated together, so tgt should be non-NULL */
 734        fcport = (struct qedf_rport *)&rp[1];
 735        rdata = fcport->rdata;
 736        if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
 737                QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
 738                rc = SUCCESS;
 739                goto out;
 740        }
 741
 742
 743        io_req = qedf_priv(sc_cmd)->io_req;
 744        if (!io_req) {
 745                QEDF_ERR(&qedf->dbg_ctx,
 746                         "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
 747                         sc_cmd, sc_cmd->cmnd[0],
 748                         rdata->ids.port_id);
 749                rc = SUCCESS;
 750                goto drop_rdata_kref;
 751        }
 752
 753        rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
 754        if (rval)
 755                got_ref = 1;
 756
 757        /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
 758        if (!rval || io_req->sc_cmd != sc_cmd) {
 759                QEDF_ERR(&qedf->dbg_ctx,
 760                         "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
 761                         io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
 762
 763                goto drop_rdata_kref;
 764        }
 765
 766        if (fc_remote_port_chkready(rport)) {
 767                refcount = kref_read(&io_req->refcount);
 768                QEDF_ERR(&qedf->dbg_ctx,
 769                         "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
 770                         io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
 771                         refcount, rdata->ids.port_id);
 772
 773                goto drop_rdata_kref;
 774        }
 775
 776        rc = fc_block_scsi_eh(sc_cmd);
 777        if (rc)
 778                goto drop_rdata_kref;
 779
 780        if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
 781                QEDF_ERR(&qedf->dbg_ctx,
 782                         "Connection uploading, xid=0x%x., port_id=%06x\n",
 783                         io_req->xid, rdata->ids.port_id);
 784                while (io_req->sc_cmd && (wait_count != 0)) {
 785                        msleep(100);
 786                        wait_count--;
 787                }
 788                if (wait_count) {
 789                        QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
 790                        rc = SUCCESS;
 791                } else {
 792                        QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
 793                        rc = FAILED;
 794                }
 795                goto drop_rdata_kref;
 796        }
 797
 798        if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
 799                QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
 800                goto drop_rdata_kref;
 801        }
 802
 803        QEDF_ERR(&qedf->dbg_ctx,
 804                 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
 805                 io_req, sc_cmd, io_req->xid, io_req->fp_idx,
 806                 rdata->ids.port_id);
 807
 808        if (qedf->stop_io_on_error) {
 809                qedf_stop_all_io(qedf);
 810                rc = SUCCESS;
 811                goto drop_rdata_kref;
 812        }
 813
 814        init_completion(&io_req->abts_done);
 815        rval = qedf_initiate_abts(io_req, true);
 816        if (rval) {
 817                QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
 818                /*
 819                 * If we fail to queue the ABTS then return this command to
 820                 * the SCSI layer as it will own and free the xid
 821                 */
 822                rc = SUCCESS;
 823                qedf_scsi_done(qedf, io_req, DID_ERROR);
 824                goto drop_rdata_kref;
 825        }
 826
 827        wait_for_completion(&io_req->abts_done);
 828
 829        if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
 830            io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
 831            io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
 832                /*
 833                 * If we get a reponse to the abort this is success from
 834                 * the perspective that all references to the command have
 835                 * been removed from the driver and firmware
 836                 */
 837                rc = SUCCESS;
 838        } else {
 839                /* If the abort and cleanup failed then return a failure */
 840                rc = FAILED;
 841        }
 842
 843        if (rc == SUCCESS)
 844                QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
 845                          io_req->xid);
 846        else
 847                QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
 848                          io_req->xid);
 849
 850drop_rdata_kref:
 851        kref_put(&rdata->kref, fc_rport_destroy);
 852out:
 853        if (got_ref)
 854                kref_put(&io_req->refcount, qedf_release_cmd);
 855        return rc;
 856}
 857
 858static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
 859{
 860        QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
 861                 sc_cmd->device->host->host_no, sc_cmd->device->id,
 862                 sc_cmd->device->lun);
 863        return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
 864}
 865
 866static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 867{
 868        QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
 869                 sc_cmd->device->host->host_no, sc_cmd->device->id,
 870                 sc_cmd->device->lun);
 871        return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 872}
 873
 874bool qedf_wait_for_upload(struct qedf_ctx *qedf)
 875{
 876        struct qedf_rport *fcport;
 877        int wait_cnt = 120;
 878
 879        while (wait_cnt--) {
 880                if (atomic_read(&qedf->num_offloads))
 881                        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 882                                  "Waiting for all uploads to complete num_offloads = 0x%x.\n",
 883                                  atomic_read(&qedf->num_offloads));
 884                else
 885                        return true;
 886                msleep(500);
 887        }
 888
 889        rcu_read_lock();
 890        list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
 891                if (test_bit(QEDF_RPORT_SESSION_READY,
 892                                       &fcport->flags)) {
 893                        if (fcport->rdata)
 894                                QEDF_ERR(&qedf->dbg_ctx,
 895                                         "Waiting for fcport %p portid=%06x.\n",
 896                                         fcport, fcport->rdata->ids.port_id);
 897                        } else {
 898                                QEDF_ERR(&qedf->dbg_ctx,
 899                                         "Waiting for fcport %p.\n", fcport);
 900                        }
 901        }
 902
 903        rcu_read_unlock();
 904        return false;
 905}
 906
 907/* Performs soft reset of qedf_ctx by simulating a link down/up */
 908void qedf_ctx_soft_reset(struct fc_lport *lport)
 909{
 910        struct qedf_ctx *qedf;
 911        struct qed_link_output if_link;
 912
 913        if (lport->vport) {
 914                printk_ratelimited("Cannot issue host reset on NPIV port.\n");
 915                return;
 916        }
 917
 918        qedf = lport_priv(lport);
 919
 920        qedf->flogi_pending = 0;
 921        /* For host reset, essentially do a soft link up/down */
 922        atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
 923        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 924                  "Queuing link down work.\n");
 925        queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 926            0);
 927
 928        if (qedf_wait_for_upload(qedf) == false) {
 929                QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
 930                WARN_ON(atomic_read(&qedf->num_offloads));
 931        }
 932
 933        /* Before setting link up query physical link state */
 934        qed_ops->common->get_link(qedf->cdev, &if_link);
 935        /* Bail if the physical link is not up */
 936        if (!if_link.link_up) {
 937                QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 938                          "Physical link is not up.\n");
 939                return;
 940        }
 941        /* Flush and wait to make sure link down is processed */
 942        flush_delayed_work(&qedf->link_update);
 943        msleep(500);
 944
 945        atomic_set(&qedf->link_state, QEDF_LINK_UP);
 946        qedf->vlan_id  = 0;
 947        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
 948                  "Queue link up work.\n");
 949        queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 950            0);
 951}
 952
 953/* Reset the host by gracefully logging out and then logging back in */
 954static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
 955{
 956        struct fc_lport *lport;
 957        struct qedf_ctx *qedf;
 958
 959        lport = shost_priv(sc_cmd->device->host);
 960        qedf = lport_priv(lport);
 961
 962        if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
 963            test_bit(QEDF_UNLOADING, &qedf->flags))
 964                return FAILED;
 965
 966        QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
 967
 968        qedf_ctx_soft_reset(lport);
 969
 970        return SUCCESS;
 971}
 972
 973static int qedf_slave_configure(struct scsi_device *sdev)
 974{
 975        if (qedf_queue_depth) {
 976                scsi_change_queue_depth(sdev, qedf_queue_depth);
 977        }
 978
 979        return 0;
 980}
 981
 982static struct scsi_host_template qedf_host_template = {
 983        .module         = THIS_MODULE,
 984        .name           = QEDF_MODULE_NAME,
 985        .this_id        = -1,
 986        .cmd_per_lun    = 32,
 987        .max_sectors    = 0xffff,
 988        .queuecommand   = qedf_queuecommand,
 989        .shost_groups   = qedf_host_groups,
 990        .eh_abort_handler       = qedf_eh_abort,
 991        .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
 992        .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
 993        .eh_host_reset_handler  = qedf_eh_host_reset,
 994        .slave_configure        = qedf_slave_configure,
 995        .dma_boundary = QED_HW_DMA_BOUNDARY,
 996        .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
 997        .can_queue = FCOE_PARAMS_NUM_TASKS,
 998        .change_queue_depth = scsi_change_queue_depth,
 999        .cmd_size = sizeof(struct qedf_cmd_priv),
1000};
1001
1002static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1003{
1004        int rc;
1005
1006        spin_lock(&qedf_global_lock);
1007        rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
1008        spin_unlock(&qedf_global_lock);
1009
1010        return rc;
1011}
1012
1013static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
1014{
1015        struct qedf_rport *fcport;
1016        struct fc_rport_priv *rdata;
1017
1018        rcu_read_lock();
1019        list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
1020                rdata = fcport->rdata;
1021                if (rdata == NULL)
1022                        continue;
1023                if (rdata->ids.port_id == port_id) {
1024                        rcu_read_unlock();
1025                        return fcport;
1026                }
1027        }
1028        rcu_read_unlock();
1029
1030        /* Return NULL to caller to let them know fcport was not found */
1031        return NULL;
1032}
1033
1034/* Transmits an ELS frame over an offloaded session */
1035static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
1036{
1037        struct fc_frame_header *fh;
1038        int rc = 0;
1039
1040        fh = fc_frame_header_get(fp);
1041        if ((fh->fh_type == FC_TYPE_ELS) &&
1042            (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1043                switch (fc_frame_payload_op(fp)) {
1044                case ELS_ADISC:
1045                        qedf_send_adisc(fcport, fp);
1046                        rc = 1;
1047                        break;
1048                }
1049        }
1050
1051        return rc;
1052}
1053
1054/*
1055 * qedf_xmit - qedf FCoE frame transmit function
1056 */
1057static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
1058{
1059        struct fc_lport         *base_lport;
1060        struct qedf_ctx         *qedf;
1061        struct ethhdr           *eh;
1062        struct fcoe_crc_eof     *cp;
1063        struct sk_buff          *skb;
1064        struct fc_frame_header  *fh;
1065        struct fcoe_hdr         *hp;
1066        u8                      sof, eof;
1067        u32                     crc;
1068        unsigned int            hlen, tlen, elen;
1069        int                     wlen;
1070        struct fc_lport *tmp_lport;
1071        struct fc_lport *vn_port = NULL;
1072        struct qedf_rport *fcport;
1073        int rc;
1074        u16 vlan_tci = 0;
1075
1076        qedf = (struct qedf_ctx *)lport_priv(lport);
1077
1078        fh = fc_frame_header_get(fp);
1079        skb = fp_skb(fp);
1080
1081        /* Filter out traffic to other NPIV ports on the same host */
1082        if (lport->vport)
1083                base_lport = shost_priv(vport_to_shost(lport->vport));
1084        else
1085                base_lport = lport;
1086
1087        /* Flag if the destination is the base port */
1088        if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
1089                vn_port = base_lport;
1090        } else {
1091                /* Got through the list of vports attached to the base_lport
1092                 * and see if we have a match with the destination address.
1093                 */
1094                list_for_each_entry(tmp_lport, &base_lport->vports, list) {
1095                        if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
1096                                vn_port = tmp_lport;
1097                                break;
1098                        }
1099                }
1100        }
1101        if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
1102                struct fc_rport_priv *rdata = NULL;
1103
1104                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
1105                    "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
1106                kfree_skb(skb);
1107                rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
1108                if (rdata) {
1109                        rdata->retries = lport->max_rport_retry_count;
1110                        kref_put(&rdata->kref, fc_rport_destroy);
1111                }
1112                return -EINVAL;
1113        }
1114        /* End NPIV filtering */
1115
1116        if (!qedf->ctlr.sel_fcf) {
1117                kfree_skb(skb);
1118                return 0;
1119        }
1120
1121        if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
1122                QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
1123                kfree_skb(skb);
1124                return 0;
1125        }
1126
1127        if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1128                QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
1129                kfree_skb(skb);
1130                return 0;
1131        }
1132
1133        if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1134                if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
1135                        return 0;
1136        }
1137
1138        /* Check to see if this needs to be sent on an offloaded session */
1139        fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
1140
1141        if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1142                rc = qedf_xmit_l2_frame(fcport, fp);
1143                /*
1144                 * If the frame was successfully sent over the middle path
1145                 * then do not try to also send it over the LL2 path
1146                 */
1147                if (rc)
1148                        return 0;
1149        }
1150
1151        sof = fr_sof(fp);
1152        eof = fr_eof(fp);
1153
1154        elen = sizeof(struct ethhdr);
1155        hlen = sizeof(struct fcoe_hdr);
1156        tlen = sizeof(struct fcoe_crc_eof);
1157        wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1158
1159        skb->ip_summed = CHECKSUM_NONE;
1160        crc = fcoe_fc_crc(fp);
1161
1162        /* copy port crc and eof to the skb buff */
1163        if (skb_is_nonlinear(skb)) {
1164                skb_frag_t *frag;
1165
1166                if (qedf_get_paged_crc_eof(skb, tlen)) {
1167                        kfree_skb(skb);
1168                        return -ENOMEM;
1169                }
1170                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1171                cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
1172        } else {
1173                cp = skb_put(skb, tlen);
1174        }
1175
1176        memset(cp, 0, sizeof(*cp));
1177        cp->fcoe_eof = eof;
1178        cp->fcoe_crc32 = cpu_to_le32(~crc);
1179        if (skb_is_nonlinear(skb)) {
1180                kunmap_atomic(cp);
1181                cp = NULL;
1182        }
1183
1184
1185        /* adjust skb network/transport offsets to match mac/fcoe/port */
1186        skb_push(skb, elen + hlen);
1187        skb_reset_mac_header(skb);
1188        skb_reset_network_header(skb);
1189        skb->mac_len = elen;
1190        skb->protocol = htons(ETH_P_FCOE);
1191
1192        /*
1193         * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
1194         * for FIP/FCoE traffic.
1195         */
1196        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
1197
1198        /* fill up mac and fcoe headers */
1199        eh = eth_hdr(skb);
1200        eh->h_proto = htons(ETH_P_FCOE);
1201        if (qedf->ctlr.map_dest)
1202                fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1203        else
1204                /* insert GW address */
1205                ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
1206
1207        /* Set the source MAC address */
1208        ether_addr_copy(eh->h_source, qedf->data_src_addr);
1209
1210        hp = (struct fcoe_hdr *)(eh + 1);
1211        memset(hp, 0, sizeof(*hp));
1212        if (FC_FCOE_VER)
1213                FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1214        hp->fcoe_sof = sof;
1215
1216        /*update tx stats */
1217        this_cpu_inc(lport->stats->TxFrames);
1218        this_cpu_add(lport->stats->TxWords, wlen);
1219
1220        /* Get VLAN ID from skb for printing purposes */
1221        __vlan_hwaccel_get_tag(skb, &vlan_tci);
1222
1223        /* send down to lld */
1224        fr_dev(fp) = lport;
1225        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1226            "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
1227            ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
1228            vlan_tci);
1229        if (qedf_dump_frames)
1230                print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1231                    1, skb->data, skb->len, false);
1232        rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1233        if (rc) {
1234                QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1235                kfree_skb(skb);
1236                return rc;
1237        }
1238
1239        return 0;
1240}
1241
1242static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1243{
1244        int rval = 0;
1245        u32 *pbl;
1246        dma_addr_t page;
1247        int num_pages;
1248
1249        /* Calculate appropriate queue and PBL sizes */
1250        fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
1251        fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
1252        fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
1253            sizeof(void *);
1254        fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1255
1256        fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1257                                        &fcport->sq_dma, GFP_KERNEL);
1258        if (!fcport->sq) {
1259                QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1260                rval = 1;
1261                goto out;
1262        }
1263
1264        fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1265                                            fcport->sq_pbl_size,
1266                                            &fcport->sq_pbl_dma, GFP_KERNEL);
1267        if (!fcport->sq_pbl) {
1268                QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1269                rval = 1;
1270                goto out_free_sq;
1271        }
1272
1273        /* Create PBL */
1274        num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
1275        page = fcport->sq_dma;
1276        pbl = (u32 *)fcport->sq_pbl;
1277
1278        while (num_pages--) {
1279                *pbl = U64_LO(page);
1280                pbl++;
1281                *pbl = U64_HI(page);
1282                pbl++;
1283                page += QEDF_PAGE_SIZE;
1284        }
1285
1286        return rval;
1287
1288out_free_sq:
1289        dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
1290            fcport->sq_dma);
1291out:
1292        return rval;
1293}
1294
1295static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1296{
1297        if (fcport->sq_pbl)
1298                dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
1299                    fcport->sq_pbl, fcport->sq_pbl_dma);
1300        if (fcport->sq)
1301                dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1302                    fcport->sq, fcport->sq_dma);
1303}
1304
1305static int qedf_offload_connection(struct qedf_ctx *qedf,
1306        struct qedf_rport *fcport)
1307{
1308        struct qed_fcoe_params_offload conn_info;
1309        u32 port_id;
1310        int rval;
1311        uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
1312
1313        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1314                   "portid=%06x.\n", fcport->rdata->ids.port_id);
1315        rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1316            &fcport->fw_cid, &fcport->p_doorbell);
1317        if (rval) {
1318                QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1319                           "for portid=%06x.\n", fcport->rdata->ids.port_id);
1320                rval = 1; /* For some reason qed returns 0 on failure here */
1321                goto out;
1322        }
1323
1324        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1325                   "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
1326                   fcport->fw_cid, fcport->handle);
1327
1328        memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
1329
1330        /* Fill in the offload connection info */
1331        conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
1332
1333        conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
1334        conn_info.sq_next_page_addr =
1335            (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
1336
1337        /* Need to use our FCoE MAC for the offload session */
1338        ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
1339
1340        ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1341
1342        conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1343        conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
1344        conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
1345        conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
1346
1347        /* Set VLAN data */
1348        conn_info.vlan_tag = qedf->vlan_id <<
1349            FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1350        conn_info.vlan_tag |=
1351            qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1352        conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1353            FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1354
1355        /* Set host port source id */
1356        port_id = fc_host_port_id(qedf->lport->host);
1357        fcport->sid = port_id;
1358        conn_info.s_id.addr_hi = (port_id & 0x000000FF);
1359        conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1360        conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1361
1362        conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
1363
1364        /* Set remote port destination id */
1365        port_id = fcport->rdata->rport->port_id;
1366        conn_info.d_id.addr_hi = (port_id & 0x000000FF);
1367        conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1368        conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1369
1370        conn_info.def_q_idx = 0; /* Default index for send queue? */
1371
1372        /* Set FC-TAPE specific flags if needed */
1373        if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1374                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1375                    "Enable CONF, REC for portid=%06x.\n",
1376                    fcport->rdata->ids.port_id);
1377                conn_info.flags |= 1 <<
1378                    FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
1379                conn_info.flags |=
1380                    ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
1381                    FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
1382        }
1383
1384        rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1385        if (rval) {
1386                QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1387                           "for portid=%06x.\n", fcport->rdata->ids.port_id);
1388                goto out_free_conn;
1389        } else
1390                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1391                           "succeeded portid=%06x total_sqe=%d.\n",
1392                           fcport->rdata->ids.port_id, total_sqe);
1393
1394        spin_lock_init(&fcport->rport_lock);
1395        atomic_set(&fcport->free_sqes, total_sqe);
1396        return 0;
1397out_free_conn:
1398        qed_ops->release_conn(qedf->cdev, fcport->handle);
1399out:
1400        return rval;
1401}
1402
1403#define QEDF_TERM_BUFF_SIZE             10
1404static void qedf_upload_connection(struct qedf_ctx *qedf,
1405        struct qedf_rport *fcport)
1406{
1407        void *term_params;
1408        dma_addr_t term_params_dma;
1409
1410        /* Term params needs to be a DMA coherent buffer as qed shared the
1411         * physical DMA address with the firmware. The buffer may be used in
1412         * the receive path so we may eventually have to move this.
1413         */
1414        term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1415                &term_params_dma, GFP_KERNEL);
1416        if (!term_params)
1417                return;
1418
1419        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1420                   "port_id=%06x.\n", fcport->rdata->ids.port_id);
1421
1422        qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1423        qed_ops->release_conn(qedf->cdev, fcport->handle);
1424
1425        dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1426            term_params_dma);
1427}
1428
1429static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1430        struct qedf_rport *fcport)
1431{
1432        struct fc_rport_priv *rdata = fcport->rdata;
1433
1434        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1435            fcport->rdata->ids.port_id);
1436
1437        /* Flush any remaining i/o's before we upload the connection */
1438        qedf_flush_active_ios(fcport, -1);
1439
1440        if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
1441                qedf_upload_connection(qedf, fcport);
1442        qedf_free_sq(qedf, fcport);
1443        fcport->rdata = NULL;
1444        fcport->qedf = NULL;
1445        kref_put(&rdata->kref, fc_rport_destroy);
1446}
1447
1448/*
1449 * This event_callback is called after successful completion of libfc
1450 * initiated target login. qedf can proceed with initiating the session
1451 * establishment.
1452 */
1453static void qedf_rport_event_handler(struct fc_lport *lport,
1454                                struct fc_rport_priv *rdata,
1455                                enum fc_rport_event event)
1456{
1457        struct qedf_ctx *qedf = lport_priv(lport);
1458        struct fc_rport *rport = rdata->rport;
1459        struct fc_rport_libfc_priv *rp;
1460        struct qedf_rport *fcport;
1461        u32 port_id;
1462        int rval;
1463        unsigned long flags;
1464
1465        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1466                   "port_id = 0x%x\n", event, rdata->ids.port_id);
1467
1468        switch (event) {
1469        case RPORT_EV_READY:
1470                if (!rport) {
1471                        QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1472                        break;
1473                }
1474
1475                rp = rport->dd_data;
1476                fcport = (struct qedf_rport *)&rp[1];
1477                fcport->qedf = qedf;
1478
1479                if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1480                        QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1481                            "portid=0x%x as max number of offloaded sessions "
1482                            "reached.\n", rdata->ids.port_id);
1483                        return;
1484                }
1485
1486                /*
1487                 * Don't try to offload the session again. Can happen when we
1488                 * get an ADISC
1489                 */
1490                if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1491                        QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1492                                   "offloaded, portid=0x%x.\n",
1493                                   rdata->ids.port_id);
1494                        return;
1495                }
1496
1497                if (rport->port_id == FC_FID_DIR_SERV) {
1498                        /*
1499                         * qedf_rport structure doesn't exist for
1500                         * directory server.
1501                         * We should not come here, as lport will
1502                         * take care of fabric login
1503                         */
1504                        QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1505                            "exist for dir server port_id=%x\n",
1506                            rdata->ids.port_id);
1507                        break;
1508                }
1509
1510                if (rdata->spp_type != FC_TYPE_FCP) {
1511                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1512                            "Not offloading since spp type isn't FCP\n");
1513                        break;
1514                }
1515                if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1516                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1517                            "Not FCP target so not offloading\n");
1518                        break;
1519                }
1520
1521                /* Initial reference held on entry, so this can't fail */
1522                kref_get(&rdata->kref);
1523                fcport->rdata = rdata;
1524                fcport->rport = rport;
1525
1526                rval = qedf_alloc_sq(qedf, fcport);
1527                if (rval) {
1528                        qedf_cleanup_fcport(qedf, fcport);
1529                        break;
1530                }
1531
1532                /* Set device type */
1533                if (rdata->flags & FC_RP_FLAGS_RETRY &&
1534                    rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
1535                    !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
1536                        fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
1537                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1538                            "portid=%06x is a TAPE device.\n",
1539                            rdata->ids.port_id);
1540                } else {
1541                        fcport->dev_type = QEDF_RPORT_TYPE_DISK;
1542                }
1543
1544                rval = qedf_offload_connection(qedf, fcport);
1545                if (rval) {
1546                        qedf_cleanup_fcport(qedf, fcport);
1547                        break;
1548                }
1549
1550                /* Add fcport to list of qedf_ctx list of offloaded ports */
1551                spin_lock_irqsave(&qedf->hba_lock, flags);
1552                list_add_rcu(&fcport->peers, &qedf->fcports);
1553                spin_unlock_irqrestore(&qedf->hba_lock, flags);
1554
1555                /*
1556                 * Set the session ready bit to let everyone know that this
1557                 * connection is ready for I/O
1558                 */
1559                set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
1560                atomic_inc(&qedf->num_offloads);
1561
1562                break;
1563        case RPORT_EV_LOGO:
1564        case RPORT_EV_FAILED:
1565        case RPORT_EV_STOP:
1566                port_id = rdata->ids.port_id;
1567                if (port_id == FC_FID_DIR_SERV)
1568                        break;
1569
1570                if (rdata->spp_type != FC_TYPE_FCP) {
1571                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1572                            "No action since spp type isn't FCP\n");
1573                        break;
1574                }
1575                if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1576                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1577                            "Not FCP target so no action\n");
1578                        break;
1579                }
1580
1581                if (!rport) {
1582                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1583                            "port_id=%x - rport notcreated Yet!!\n", port_id);
1584                        break;
1585                }
1586                rp = rport->dd_data;
1587                /*
1588                 * Perform session upload. Note that rdata->peers is already
1589                 * removed from disc->rports list before we get this event.
1590                 */
1591                fcport = (struct qedf_rport *)&rp[1];
1592
1593                spin_lock_irqsave(&fcport->rport_lock, flags);
1594                /* Only free this fcport if it is offloaded already */
1595                if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
1596                    !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1597                    &fcport->flags)) {
1598                        set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1599                                &fcport->flags);
1600                        spin_unlock_irqrestore(&fcport->rport_lock, flags);
1601                        qedf_cleanup_fcport(qedf, fcport);
1602                        /*
1603                         * Remove fcport to list of qedf_ctx list of offloaded
1604                         * ports
1605                         */
1606                        spin_lock_irqsave(&qedf->hba_lock, flags);
1607                        list_del_rcu(&fcport->peers);
1608                        spin_unlock_irqrestore(&qedf->hba_lock, flags);
1609
1610                        clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1611                            &fcport->flags);
1612                        atomic_dec(&qedf->num_offloads);
1613                } else {
1614                        spin_unlock_irqrestore(&fcport->rport_lock, flags);
1615                }
1616                break;
1617
1618        case RPORT_EV_NONE:
1619                break;
1620        }
1621}
1622
1623static void qedf_abort_io(struct fc_lport *lport)
1624{
1625        /* NO-OP but need to fill in the template */
1626}
1627
1628static void qedf_fcp_cleanup(struct fc_lport *lport)
1629{
1630        /*
1631         * NO-OP but need to fill in template to prevent a NULL
1632         * function pointer dereference during link down. I/Os
1633         * will be flushed when port is uploaded.
1634         */
1635}
1636
1637static struct libfc_function_template qedf_lport_template = {
1638        .frame_send             = qedf_xmit,
1639        .fcp_abort_io           = qedf_abort_io,
1640        .fcp_cleanup            = qedf_fcp_cleanup,
1641        .rport_event_callback   = qedf_rport_event_handler,
1642        .elsct_send             = qedf_elsct_send,
1643};
1644
1645static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1646{
1647        fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
1648
1649        qedf->ctlr.send = qedf_fip_send;
1650        qedf->ctlr.get_src_addr = qedf_get_src_mac;
1651        ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1652}
1653
1654static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1655{
1656        struct fc_lport *lport = qedf->lport;
1657        u8 buf[8];
1658        int pos;
1659        uint32_t i;
1660
1661        /*
1662         * fdmi_enabled needs to be set for libfc
1663         * to execute FDMI registration
1664         */
1665        lport->fdmi_enabled = 1;
1666
1667        /*
1668         * Setup the necessary fc_host attributes to that will be used to fill
1669         * in the FDMI information.
1670         */
1671
1672        /* Get the PCI-e Device Serial Number Capability */
1673        pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
1674        if (pos) {
1675                pos += 4;
1676                for (i = 0; i < 8; i++)
1677                        pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
1678
1679                snprintf(fc_host_serial_number(lport->host),
1680                    FC_SERIAL_NUMBER_SIZE,
1681                    "%02X%02X%02X%02X%02X%02X%02X%02X",
1682                    buf[7], buf[6], buf[5], buf[4],
1683                    buf[3], buf[2], buf[1], buf[0]);
1684        } else
1685                snprintf(fc_host_serial_number(lport->host),
1686                    FC_SERIAL_NUMBER_SIZE, "Unknown");
1687
1688        snprintf(fc_host_manufacturer(lport->host),
1689            FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
1690
1691        if (qedf->pdev->device == QL45xxx) {
1692                snprintf(fc_host_model(lport->host),
1693                        FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
1694
1695                snprintf(fc_host_model_description(lport->host),
1696                        FC_SYMBOLIC_NAME_SIZE, "%s",
1697                        "Marvell FastLinQ QL45xxx FCoE Adapter");
1698        }
1699
1700        if (qedf->pdev->device == QL41xxx) {
1701                snprintf(fc_host_model(lport->host),
1702                        FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
1703
1704                snprintf(fc_host_model_description(lport->host),
1705                        FC_SYMBOLIC_NAME_SIZE, "%s",
1706                        "Marvell FastLinQ QL41xxx FCoE Adapter");
1707        }
1708
1709        snprintf(fc_host_hardware_version(lport->host),
1710            FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
1711
1712        snprintf(fc_host_driver_version(lport->host),
1713            FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
1714
1715        snprintf(fc_host_firmware_version(lport->host),
1716            FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
1717            FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1718            FW_ENGINEERING_VERSION);
1719
1720        snprintf(fc_host_vendor_identifier(lport->host),
1721                FC_VENDOR_IDENTIFIER, "%s", "Marvell");
1722
1723}
1724
1725static int qedf_lport_setup(struct qedf_ctx *qedf)
1726{
1727        struct fc_lport *lport = qedf->lport;
1728
1729        lport->link_up = 0;
1730        lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1731        lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1732        lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1733            FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1734        lport->boot_time = jiffies;
1735        lport->e_d_tov = 2 * 1000;
1736        lport->r_a_tov = 10 * 1000;
1737
1738        /* Set NPIV support */
1739        lport->does_npiv = 1;
1740        fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
1741
1742        fc_set_wwnn(lport, qedf->wwnn);
1743        fc_set_wwpn(lport, qedf->wwpn);
1744
1745        if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
1746                QEDF_ERR(&qedf->dbg_ctx,
1747                         "fcoe_libfc_config failed.\n");
1748                return -ENOMEM;
1749        }
1750
1751        /* Allocate the exchange manager */
1752        fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
1753                          0xfffe, NULL);
1754
1755        if (fc_lport_init_stats(lport))
1756                return -ENOMEM;
1757
1758        /* Finish lport config */
1759        fc_lport_config(lport);
1760
1761        /* Set max frame size */
1762        fc_set_mfs(lport, QEDF_MFS);
1763        fc_host_maxframe_size(lport->host) = lport->mfs;
1764
1765        /* Set default dev_loss_tmo based on module parameter */
1766        fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
1767
1768        /* Set symbolic node name */
1769        if (qedf->pdev->device == QL45xxx)
1770                snprintf(fc_host_symbolic_name(lport->host), 256,
1771                        "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1772
1773        if (qedf->pdev->device == QL41xxx)
1774                snprintf(fc_host_symbolic_name(lport->host), 256,
1775                        "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1776
1777        qedf_setup_fdmi(qedf);
1778
1779        return 0;
1780}
1781
1782/*
1783 * NPIV functions
1784 */
1785
1786static int qedf_vport_libfc_config(struct fc_vport *vport,
1787        struct fc_lport *lport)
1788{
1789        lport->link_up = 0;
1790        lport->qfull = 0;
1791        lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1792        lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1793        lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1794            FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1795        lport->boot_time = jiffies;
1796        lport->e_d_tov = 2 * 1000;
1797        lport->r_a_tov = 10 * 1000;
1798        lport->does_npiv = 1; /* Temporary until we add NPIV support */
1799
1800        /* Allocate stats for vport */
1801        if (fc_lport_init_stats(lport))
1802                return -ENOMEM;
1803
1804        /* Finish lport config */
1805        fc_lport_config(lport);
1806
1807        /* offload related configuration */
1808        lport->crc_offload = 0;
1809        lport->seq_offload = 0;
1810        lport->lro_enabled = 0;
1811        lport->lro_xid = 0;
1812        lport->lso_max = 0;
1813
1814        return 0;
1815}
1816
1817static int qedf_vport_create(struct fc_vport *vport, bool disabled)
1818{
1819        struct Scsi_Host *shost = vport_to_shost(vport);
1820        struct fc_lport *n_port = shost_priv(shost);
1821        struct fc_lport *vn_port;
1822        struct qedf_ctx *base_qedf = lport_priv(n_port);
1823        struct qedf_ctx *vport_qedf;
1824
1825        char buf[32];
1826        int rc = 0;
1827
1828        rc = fcoe_validate_vport_create(vport);
1829        if (rc) {
1830                fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1831                QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
1832                           "WWPN (0x%s) already exists.\n", buf);
1833                return rc;
1834        }
1835
1836        if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
1837                QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
1838                           "because link is not up.\n");
1839                return -EIO;
1840        }
1841
1842        vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
1843        if (!vn_port) {
1844                QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
1845                           "for vport.\n");
1846                return -ENOMEM;
1847        }
1848
1849        fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1850        QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
1851            buf);
1852
1853        /* Copy some fields from base_qedf */
1854        vport_qedf = lport_priv(vn_port);
1855        memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
1856
1857        /* Set qedf data specific to this vport */
1858        vport_qedf->lport = vn_port;
1859        /* Use same hba_lock as base_qedf */
1860        vport_qedf->hba_lock = base_qedf->hba_lock;
1861        vport_qedf->pdev = base_qedf->pdev;
1862        vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
1863        init_completion(&vport_qedf->flogi_compl);
1864        INIT_LIST_HEAD(&vport_qedf->fcports);
1865        INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
1866
1867        rc = qedf_vport_libfc_config(vport, vn_port);
1868        if (rc) {
1869                QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
1870                    "for lport stats.\n");
1871                goto err;
1872        }
1873
1874        fc_set_wwnn(vn_port, vport->node_name);
1875        fc_set_wwpn(vn_port, vport->port_name);
1876        vport_qedf->wwnn = vn_port->wwnn;
1877        vport_qedf->wwpn = vn_port->wwpn;
1878
1879        vn_port->host->transportt = qedf_fc_vport_transport_template;
1880        vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
1881        vn_port->host->max_lun = qedf_max_lun;
1882        vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
1883        vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
1884        vn_port->host->max_id = QEDF_MAX_SESSIONS;
1885
1886        rc = scsi_add_host(vn_port->host, &vport->dev);
1887        if (rc) {
1888                QEDF_WARN(&base_qedf->dbg_ctx,
1889                          "Error adding Scsi_Host rc=0x%x.\n", rc);
1890                goto err;
1891        }
1892
1893        /* Set default dev_loss_tmo based on module parameter */
1894        fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
1895
1896        /* Init libfc stuffs */
1897        memcpy(&vn_port->tt, &qedf_lport_template,
1898                sizeof(qedf_lport_template));
1899        fc_exch_init(vn_port);
1900        fc_elsct_init(vn_port);
1901        fc_lport_init(vn_port);
1902        fc_disc_init(vn_port);
1903        fc_disc_config(vn_port, vn_port);
1904
1905
1906        /* Allocate the exchange manager */
1907        shost = vport_to_shost(vport);
1908        n_port = shost_priv(shost);
1909        fc_exch_mgr_list_clone(n_port, vn_port);
1910
1911        /* Set max frame size */
1912        fc_set_mfs(vn_port, QEDF_MFS);
1913
1914        fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
1915
1916        if (disabled) {
1917                fc_vport_set_state(vport, FC_VPORT_DISABLED);
1918        } else {
1919                vn_port->boot_time = jiffies;
1920                fc_fabric_login(vn_port);
1921                fc_vport_setlink(vn_port);
1922        }
1923
1924        QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
1925                   vn_port);
1926
1927        /* Set up debug context for vport */
1928        vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
1929        vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
1930
1931        return 0;
1932
1933err:
1934        scsi_host_put(vn_port->host);
1935        return rc;
1936}
1937
1938static int qedf_vport_destroy(struct fc_vport *vport)
1939{
1940        struct Scsi_Host *shost = vport_to_shost(vport);
1941        struct fc_lport *n_port = shost_priv(shost);
1942        struct fc_lport *vn_port = vport->dd_data;
1943        struct qedf_ctx *qedf = lport_priv(vn_port);
1944
1945        if (!qedf) {
1946                QEDF_ERR(NULL, "qedf is NULL.\n");
1947                goto out;
1948        }
1949
1950        /* Set unloading bit on vport qedf_ctx to prevent more I/O */
1951        set_bit(QEDF_UNLOADING, &qedf->flags);
1952
1953        mutex_lock(&n_port->lp_mutex);
1954        list_del(&vn_port->list);
1955        mutex_unlock(&n_port->lp_mutex);
1956
1957        fc_fabric_logoff(vn_port);
1958        fc_lport_destroy(vn_port);
1959
1960        /* Detach from scsi-ml */
1961        fc_remove_host(vn_port->host);
1962        scsi_remove_host(vn_port->host);
1963
1964        /*
1965         * Only try to release the exchange manager if the vn_port
1966         * configuration is complete.
1967         */
1968        if (vn_port->state == LPORT_ST_READY)
1969                fc_exch_mgr_free(vn_port);
1970
1971        /* Free memory used by statistical counters */
1972        fc_lport_free_stats(vn_port);
1973
1974        /* Release Scsi_Host */
1975        scsi_host_put(vn_port->host);
1976
1977out:
1978        return 0;
1979}
1980
1981static int qedf_vport_disable(struct fc_vport *vport, bool disable)
1982{
1983        struct fc_lport *lport = vport->dd_data;
1984
1985        if (disable) {
1986                fc_vport_set_state(vport, FC_VPORT_DISABLED);
1987                fc_fabric_logoff(lport);
1988        } else {
1989                lport->boot_time = jiffies;
1990                fc_fabric_login(lport);
1991                fc_vport_setlink(lport);
1992        }
1993        return 0;
1994}
1995
1996/*
1997 * During removal we need to wait for all the vports associated with a port
1998 * to be destroyed so we avoid a race condition where libfc is still trying
1999 * to reap vports while the driver remove function has already reaped the
2000 * driver contexts associated with the physical port.
2001 */
2002static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
2003{
2004        struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
2005
2006        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2007            "Entered.\n");
2008        while (fc_host->npiv_vports_inuse > 0) {
2009                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2010                    "Waiting for all vports to be reaped.\n");
2011                msleep(1000);
2012        }
2013}
2014
2015/**
2016 * qedf_fcoe_reset - Resets the fcoe
2017 *
2018 * @shost: shost the reset is from
2019 *
2020 * Returns: always 0
2021 */
2022static int qedf_fcoe_reset(struct Scsi_Host *shost)
2023{
2024        struct fc_lport *lport = shost_priv(shost);
2025
2026        qedf_ctx_soft_reset(lport);
2027        return 0;
2028}
2029
2030static void qedf_get_host_port_id(struct Scsi_Host *shost)
2031{
2032        struct fc_lport *lport = shost_priv(shost);
2033
2034        fc_host_port_id(shost) = lport->port_id;
2035}
2036
2037static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
2038        *shost)
2039{
2040        struct fc_host_statistics *qedf_stats;
2041        struct fc_lport *lport = shost_priv(shost);
2042        struct qedf_ctx *qedf = lport_priv(lport);
2043        struct qed_fcoe_stats *fw_fcoe_stats;
2044
2045        qedf_stats = fc_get_host_stats(shost);
2046
2047        /* We don't collect offload stats for specific NPIV ports */
2048        if (lport->vport)
2049                goto out;
2050
2051        fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
2052        if (!fw_fcoe_stats) {
2053                QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
2054                    "fw_fcoe_stats.\n");
2055                goto out;
2056        }
2057
2058        mutex_lock(&qedf->stats_mutex);
2059
2060        /* Query firmware for offload stats */
2061        qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
2062
2063        /*
2064         * The expectation is that we add our offload stats to the stats
2065         * being maintained by libfc each time the fc_get_host_status callback
2066         * is invoked. The additions are not carried over for each call to
2067         * the fc_get_host_stats callback.
2068         */
2069        qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
2070            fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
2071            fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
2072        qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
2073            fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
2074            fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
2075        qedf_stats->fcp_input_megabytes +=
2076            do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
2077        qedf_stats->fcp_output_megabytes +=
2078            do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
2079        qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
2080        qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
2081        qedf_stats->invalid_crc_count +=
2082            fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
2083        qedf_stats->dumped_frames =
2084            fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2085        qedf_stats->error_frames +=
2086            fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2087        qedf_stats->fcp_input_requests += qedf->input_requests;
2088        qedf_stats->fcp_output_requests += qedf->output_requests;
2089        qedf_stats->fcp_control_requests += qedf->control_requests;
2090        qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
2091        qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
2092
2093        mutex_unlock(&qedf->stats_mutex);
2094        kfree(fw_fcoe_stats);
2095out:
2096        return qedf_stats;
2097}
2098
2099static struct fc_function_template qedf_fc_transport_fn = {
2100        .show_host_node_name = 1,
2101        .show_host_port_name = 1,
2102        .show_host_supported_classes = 1,
2103        .show_host_supported_fc4s = 1,
2104        .show_host_active_fc4s = 1,
2105        .show_host_maxframe_size = 1,
2106
2107        .get_host_port_id = qedf_get_host_port_id,
2108        .show_host_port_id = 1,
2109        .show_host_supported_speeds = 1,
2110        .get_host_speed = fc_get_host_speed,
2111        .show_host_speed = 1,
2112        .show_host_port_type = 1,
2113        .get_host_port_state = fc_get_host_port_state,
2114        .show_host_port_state = 1,
2115        .show_host_symbolic_name = 1,
2116
2117        /*
2118         * Tell FC transport to allocate enough space to store the backpointer
2119         * for the associate qedf_rport struct.
2120         */
2121        .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2122                                sizeof(struct qedf_rport)),
2123        .show_rport_maxframe_size = 1,
2124        .show_rport_supported_classes = 1,
2125        .show_host_fabric_name = 1,
2126        .show_starget_node_name = 1,
2127        .show_starget_port_name = 1,
2128        .show_starget_port_id = 1,
2129        .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2130        .show_rport_dev_loss_tmo = 1,
2131        .get_fc_host_stats = qedf_fc_get_host_stats,
2132        .issue_fc_host_lip = qedf_fcoe_reset,
2133        .vport_create = qedf_vport_create,
2134        .vport_delete = qedf_vport_destroy,
2135        .vport_disable = qedf_vport_disable,
2136        .bsg_request = fc_lport_bsg_request,
2137};
2138
2139static struct fc_function_template qedf_fc_vport_transport_fn = {
2140        .show_host_node_name = 1,
2141        .show_host_port_name = 1,
2142        .show_host_supported_classes = 1,
2143        .show_host_supported_fc4s = 1,
2144        .show_host_active_fc4s = 1,
2145        .show_host_maxframe_size = 1,
2146        .show_host_port_id = 1,
2147        .show_host_supported_speeds = 1,
2148        .get_host_speed = fc_get_host_speed,
2149        .show_host_speed = 1,
2150        .show_host_port_type = 1,
2151        .get_host_port_state = fc_get_host_port_state,
2152        .show_host_port_state = 1,
2153        .show_host_symbolic_name = 1,
2154        .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2155                                sizeof(struct qedf_rport)),
2156        .show_rport_maxframe_size = 1,
2157        .show_rport_supported_classes = 1,
2158        .show_host_fabric_name = 1,
2159        .show_starget_node_name = 1,
2160        .show_starget_port_name = 1,
2161        .show_starget_port_id = 1,
2162        .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2163        .show_rport_dev_loss_tmo = 1,
2164        .get_fc_host_stats = fc_get_host_stats,
2165        .issue_fc_host_lip = qedf_fcoe_reset,
2166        .bsg_request = fc_lport_bsg_request,
2167};
2168
2169static bool qedf_fp_has_work(struct qedf_fastpath *fp)
2170{
2171        struct qedf_ctx *qedf = fp->qedf;
2172        struct global_queue *que;
2173        struct qed_sb_info *sb_info = fp->sb_info;
2174        struct status_block *sb = sb_info->sb_virt;
2175        u16 prod_idx;
2176
2177        /* Get the pointer to the global CQ this completion is on */
2178        que = qedf->global_queues[fp->sb_id];
2179
2180        /* Be sure all responses have been written to PI */
2181        rmb();
2182
2183        /* Get the current firmware producer index */
2184        prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2185
2186        return (que->cq_prod_idx != prod_idx);
2187}
2188
2189/*
2190 * Interrupt handler code.
2191 */
2192
2193/* Process completion queue and copy CQE contents for deferred processesing
2194 *
2195 * Return true if we should wake the I/O thread, false if not.
2196 */
2197static bool qedf_process_completions(struct qedf_fastpath *fp)
2198{
2199        struct qedf_ctx *qedf = fp->qedf;
2200        struct qed_sb_info *sb_info = fp->sb_info;
2201        struct status_block *sb = sb_info->sb_virt;
2202        struct global_queue *que;
2203        u16 prod_idx;
2204        struct fcoe_cqe *cqe;
2205        struct qedf_io_work *io_work;
2206        int num_handled = 0;
2207        unsigned int cpu;
2208        struct qedf_ioreq *io_req = NULL;
2209        u16 xid;
2210        u16 new_cqes;
2211        u32 comp_type;
2212
2213        /* Get the current firmware producer index */
2214        prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2215
2216        /* Get the pointer to the global CQ this completion is on */
2217        que = qedf->global_queues[fp->sb_id];
2218
2219        /* Calculate the amount of new elements since last processing */
2220        new_cqes = (prod_idx >= que->cq_prod_idx) ?
2221            (prod_idx - que->cq_prod_idx) :
2222            0x10000 - que->cq_prod_idx + prod_idx;
2223
2224        /* Save producer index */
2225        que->cq_prod_idx = prod_idx;
2226
2227        while (new_cqes) {
2228                fp->completions++;
2229                num_handled++;
2230                cqe = &que->cq[que->cq_cons_idx];
2231
2232                comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2233                    FCOE_CQE_CQE_TYPE_MASK;
2234
2235                /*
2236                 * Process unsolicited CQEs directly in the interrupt handler
2237                 * sine we need the fastpath ID
2238                 */
2239                if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
2240                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2241                           "Unsolicated CQE.\n");
2242                        qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2243                        /*
2244                         * Don't add a work list item.  Increment consumer
2245                         * consumer index and move on.
2246                         */
2247                        goto inc_idx;
2248                }
2249
2250                xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2251                io_req = &qedf->cmd_mgr->cmds[xid];
2252
2253                /*
2254                 * Figure out which percpu thread we should queue this I/O
2255                 * on.
2256                 */
2257                if (!io_req)
2258                        /* If there is not io_req assocated with this CQE
2259                         * just queue it on CPU 0
2260                         */
2261                        cpu = 0;
2262                else {
2263                        cpu = io_req->cpu;
2264                        io_req->int_cpu = smp_processor_id();
2265                }
2266
2267                io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2268                if (!io_work) {
2269                        QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2270                                   "work for I/O completion.\n");
2271                        continue;
2272                }
2273                memset(io_work, 0, sizeof(struct qedf_io_work));
2274
2275                INIT_WORK(&io_work->work, qedf_fp_io_handler);
2276
2277                /* Copy contents of CQE for deferred processing */
2278                memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2279
2280                io_work->qedf = fp->qedf;
2281                io_work->fp = NULL; /* Only used for unsolicited frames */
2282
2283                queue_work_on(cpu, qedf_io_wq, &io_work->work);
2284
2285inc_idx:
2286                que->cq_cons_idx++;
2287                if (que->cq_cons_idx == fp->cq_num_entries)
2288                        que->cq_cons_idx = 0;
2289                new_cqes--;
2290        }
2291
2292        return true;
2293}
2294
2295
2296/* MSI-X fastpath handler code */
2297static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
2298{
2299        struct qedf_fastpath *fp = dev_id;
2300
2301        if (!fp) {
2302                QEDF_ERR(NULL, "fp is null.\n");
2303                return IRQ_HANDLED;
2304        }
2305        if (!fp->sb_info) {
2306                QEDF_ERR(NULL, "fp->sb_info in null.");
2307                return IRQ_HANDLED;
2308        }
2309
2310        /*
2311         * Disable interrupts for this status block while we process new
2312         * completions
2313         */
2314        qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
2315
2316        while (1) {
2317                qedf_process_completions(fp);
2318
2319                if (qedf_fp_has_work(fp) == 0) {
2320                        /* Update the sb information */
2321                        qed_sb_update_sb_idx(fp->sb_info);
2322
2323                        /* Check for more work */
2324                        rmb();
2325
2326                        if (qedf_fp_has_work(fp) == 0) {
2327                                /* Re-enable interrupts */
2328                                qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
2329                                return IRQ_HANDLED;
2330                        }
2331                }
2332        }
2333
2334        /* Do we ever want to break out of above loop? */
2335        return IRQ_HANDLED;
2336}
2337
2338/* simd handler for MSI/INTa */
2339static void qedf_simd_int_handler(void *cookie)
2340{
2341        /* Cookie is qedf_ctx struct */
2342        struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2343
2344        QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2345}
2346
2347#define QEDF_SIMD_HANDLER_NUM           0
2348static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2349{
2350        int i;
2351        u16 vector_idx = 0;
2352        u32 vector;
2353
2354        if (qedf->int_info.msix_cnt) {
2355                for (i = 0; i < qedf->int_info.used_cnt; i++) {
2356                        vector_idx = i * qedf->dev_info.common.num_hwfns +
2357                                qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2358                        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2359                                  "Freeing IRQ #%d vector_idx=%d.\n",
2360                                  i, vector_idx);
2361                        vector = qedf->int_info.msix[vector_idx].vector;
2362                        synchronize_irq(vector);
2363                        irq_set_affinity_hint(vector, NULL);
2364                        irq_set_affinity_notifier(vector, NULL);
2365                        free_irq(vector, &qedf->fp_array[i]);
2366                }
2367        } else
2368                qed_ops->common->simd_handler_clean(qedf->cdev,
2369                    QEDF_SIMD_HANDLER_NUM);
2370
2371        qedf->int_info.used_cnt = 0;
2372        qed_ops->common->set_fp_int(qedf->cdev, 0);
2373}
2374
2375static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2376{
2377        int i, rc, cpu;
2378        u16 vector_idx = 0;
2379        u32 vector;
2380
2381        cpu = cpumask_first(cpu_online_mask);
2382        for (i = 0; i < qedf->num_queues; i++) {
2383                vector_idx = i * qedf->dev_info.common.num_hwfns +
2384                        qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2385                QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2386                          "Requesting IRQ #%d vector_idx=%d.\n",
2387                          i, vector_idx);
2388                vector = qedf->int_info.msix[vector_idx].vector;
2389                rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
2390                                 &qedf->fp_array[i]);
2391
2392                if (rc) {
2393                        QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2394                        qedf_sync_free_irqs(qedf);
2395                        return rc;
2396                }
2397
2398                qedf->int_info.used_cnt++;
2399                rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
2400                cpu = cpumask_next(cpu, cpu_online_mask);
2401        }
2402
2403        return 0;
2404}
2405
2406static int qedf_setup_int(struct qedf_ctx *qedf)
2407{
2408        int rc = 0;
2409
2410        /*
2411         * Learn interrupt configuration
2412         */
2413        rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
2414        if (rc <= 0)
2415                return 0;
2416
2417        rc  = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2418        if (rc)
2419                return 0;
2420
2421        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2422                   "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2423                   num_online_cpus());
2424
2425        if (qedf->int_info.msix_cnt)
2426                return qedf_request_msix_irq(qedf);
2427
2428        qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2429            QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
2430        qedf->int_info.used_cnt = 1;
2431
2432        QEDF_ERR(&qedf->dbg_ctx,
2433                 "Cannot load driver due to a lack of MSI-X vectors.\n");
2434        return -EINVAL;
2435}
2436
2437/* Main function for libfc frame reception */
2438static void qedf_recv_frame(struct qedf_ctx *qedf,
2439        struct sk_buff *skb)
2440{
2441        u32 fr_len;
2442        struct fc_lport *lport;
2443        struct fc_frame_header *fh;
2444        struct fcoe_crc_eof crc_eof;
2445        struct fc_frame *fp;
2446        u8 *mac = NULL;
2447        u8 *dest_mac = NULL;
2448        struct fcoe_hdr *hp;
2449        struct qedf_rport *fcport;
2450        struct fc_lport *vn_port;
2451        u32 f_ctl;
2452
2453        lport = qedf->lport;
2454        if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
2455                QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
2456                kfree_skb(skb);
2457                return;
2458        }
2459
2460        if (skb_is_nonlinear(skb))
2461                skb_linearize(skb);
2462        mac = eth_hdr(skb)->h_source;
2463        dest_mac = eth_hdr(skb)->h_dest;
2464
2465        /* Pull the header */
2466        hp = (struct fcoe_hdr *)skb->data;
2467        fh = (struct fc_frame_header *) skb_transport_header(skb);
2468        skb_pull(skb, sizeof(struct fcoe_hdr));
2469        fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2470
2471        fp = (struct fc_frame *)skb;
2472        fc_frame_init(fp);
2473        fr_dev(fp) = lport;
2474        fr_sof(fp) = hp->fcoe_sof;
2475        if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2476                QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
2477                kfree_skb(skb);
2478                return;
2479        }
2480        fr_eof(fp) = crc_eof.fcoe_eof;
2481        fr_crc(fp) = crc_eof.fcoe_crc32;
2482        if (pskb_trim(skb, fr_len)) {
2483                QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
2484                kfree_skb(skb);
2485                return;
2486        }
2487
2488        fh = fc_frame_header_get(fp);
2489
2490        /*
2491         * Invalid frame filters.
2492         */
2493
2494        if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2495            fh->fh_type == FC_TYPE_FCP) {
2496                /* Drop FCP data. We dont this in L2 path */
2497                kfree_skb(skb);
2498                return;
2499        }
2500        if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
2501            fh->fh_type == FC_TYPE_ELS) {
2502                switch (fc_frame_payload_op(fp)) {
2503                case ELS_LOGO:
2504                        if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
2505                                /* drop non-FIP LOGO */
2506                                kfree_skb(skb);
2507                                return;
2508                        }
2509                        break;
2510                }
2511        }
2512
2513        if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
2514                /* Drop incoming ABTS */
2515                kfree_skb(skb);
2516                return;
2517        }
2518
2519        if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
2520                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2521                    "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
2522                kfree_skb(skb);
2523                return;
2524        }
2525
2526        if (qedf->ctlr.state) {
2527                if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
2528                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2529                            "Wrong source address: mac:%pM dest_addr:%pM.\n",
2530                            mac, qedf->ctlr.dest_addr);
2531                        kfree_skb(skb);
2532                        return;
2533                }
2534        }
2535
2536        vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
2537
2538        /*
2539         * If the destination ID from the frame header does not match what we
2540         * have on record for lport and the search for a NPIV port came up
2541         * empty then this is not addressed to our port so simply drop it.
2542         */
2543        if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
2544                QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2545                          "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
2546                          lport->port_id, ntoh24(fh->fh_d_id));
2547                kfree_skb(skb);
2548                return;
2549        }
2550
2551        f_ctl = ntoh24(fh->fh_f_ctl);
2552        if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
2553            (f_ctl & FC_FC_EX_CTX)) {
2554                /* Drop incoming ABTS response that has both SEQ/EX CTX set */
2555                QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2556                          "Dropping ABTS response as both SEQ/EX CTX set.\n");
2557                kfree_skb(skb);
2558                return;
2559        }
2560
2561        /*
2562         * If a connection is uploading, drop incoming FCoE frames as there
2563         * is a small window where we could try to return a frame while libfc
2564         * is trying to clean things up.
2565         */
2566
2567        /* Get fcport associated with d_id if it exists */
2568        fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2569
2570        if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
2571            &fcport->flags)) {
2572                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2573                    "Connection uploading, dropping fp=%p.\n", fp);
2574                kfree_skb(skb);
2575                return;
2576        }
2577
2578        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2579            "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
2580            ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2581            fh->fh_type);
2582        if (qedf_dump_frames)
2583                print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
2584                    1, skb->data, skb->len, false);
2585        fc_exch_recv(lport, fp);
2586}
2587
2588static void qedf_ll2_process_skb(struct work_struct *work)
2589{
2590        struct qedf_skb_work *skb_work =
2591            container_of(work, struct qedf_skb_work, work);
2592        struct qedf_ctx *qedf = skb_work->qedf;
2593        struct sk_buff *skb = skb_work->skb;
2594        struct ethhdr *eh;
2595
2596        if (!qedf) {
2597                QEDF_ERR(NULL, "qedf is NULL\n");
2598                goto err_out;
2599        }
2600
2601        eh = (struct ethhdr *)skb->data;
2602
2603        /* Undo VLAN encapsulation */
2604        if (eh->h_proto == htons(ETH_P_8021Q)) {
2605                memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
2606                eh = skb_pull(skb, VLAN_HLEN);
2607                skb_reset_mac_header(skb);
2608        }
2609
2610        /*
2611         * Process either a FIP frame or FCoE frame based on the
2612         * protocol value.  If it's not either just drop the
2613         * frame.
2614         */
2615        if (eh->h_proto == htons(ETH_P_FIP)) {
2616                qedf_fip_recv(qedf, skb);
2617                goto out;
2618        } else if (eh->h_proto == htons(ETH_P_FCOE)) {
2619                __skb_pull(skb, ETH_HLEN);
2620                qedf_recv_frame(qedf, skb);
2621                goto out;
2622        } else
2623                goto err_out;
2624
2625err_out:
2626        kfree_skb(skb);
2627out:
2628        kfree(skb_work);
2629        return;
2630}
2631
2632static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
2633        u32 arg1, u32 arg2)
2634{
2635        struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2636        struct qedf_skb_work *skb_work;
2637
2638        if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
2639                QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2640                          "Dropping frame as link state is down.\n");
2641                kfree_skb(skb);
2642                return 0;
2643        }
2644
2645        skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
2646        if (!skb_work) {
2647                QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2648                           "dropping frame.\n");
2649                kfree_skb(skb);
2650                return 0;
2651        }
2652
2653        INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
2654        skb_work->skb = skb;
2655        skb_work->qedf = qedf;
2656        queue_work(qedf->ll2_recv_wq, &skb_work->work);
2657
2658        return 0;
2659}
2660
2661static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
2662        .rx_cb = qedf_ll2_rx,
2663        .tx_cb = NULL,
2664};
2665
2666/* Main thread to process I/O completions */
2667void qedf_fp_io_handler(struct work_struct *work)
2668{
2669        struct qedf_io_work *io_work =
2670            container_of(work, struct qedf_io_work, work);
2671        u32 comp_type;
2672
2673        /*
2674         * Deferred part of unsolicited CQE sends
2675         * frame to libfc.
2676         */
2677        comp_type = (io_work->cqe.cqe_data >>
2678            FCOE_CQE_CQE_TYPE_SHIFT) &
2679            FCOE_CQE_CQE_TYPE_MASK;
2680        if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
2681            io_work->fp)
2682                fc_exch_recv(io_work->qedf->lport, io_work->fp);
2683        else
2684                qedf_process_cqe(io_work->qedf, &io_work->cqe);
2685
2686        kfree(io_work);
2687}
2688
2689static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2690        struct qed_sb_info *sb_info, u16 sb_id)
2691{
2692        struct status_block *sb_virt;
2693        dma_addr_t sb_phys;
2694        int ret;
2695
2696        sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
2697            sizeof(struct status_block), &sb_phys, GFP_KERNEL);
2698
2699        if (!sb_virt) {
2700                QEDF_ERR(&qedf->dbg_ctx,
2701                         "Status block allocation failed for id = %d.\n",
2702                         sb_id);
2703                return -ENOMEM;
2704        }
2705
2706        ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2707            sb_id, QED_SB_TYPE_STORAGE);
2708
2709        if (ret) {
2710                QEDF_ERR(&qedf->dbg_ctx,
2711                         "Status block initialization failed (0x%x) for id = %d.\n",
2712                         ret, sb_id);
2713                return ret;
2714        }
2715
2716        return 0;
2717}
2718
2719static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2720{
2721        if (sb_info->sb_virt)
2722                dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2723                    (void *)sb_info->sb_virt, sb_info->sb_phys);
2724}
2725
2726static void qedf_destroy_sb(struct qedf_ctx *qedf)
2727{
2728        int id;
2729        struct qedf_fastpath *fp = NULL;
2730
2731        for (id = 0; id < qedf->num_queues; id++) {
2732                fp = &(qedf->fp_array[id]);
2733                if (fp->sb_id == QEDF_SB_ID_NULL)
2734                        break;
2735                qedf_free_sb(qedf, fp->sb_info);
2736                kfree(fp->sb_info);
2737        }
2738        kfree(qedf->fp_array);
2739}
2740
2741static int qedf_prepare_sb(struct qedf_ctx *qedf)
2742{
2743        int id;
2744        struct qedf_fastpath *fp;
2745        int ret;
2746
2747        qedf->fp_array =
2748            kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2749                GFP_KERNEL);
2750
2751        if (!qedf->fp_array) {
2752                QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2753                          "failed.\n");
2754                return -ENOMEM;
2755        }
2756
2757        for (id = 0; id < qedf->num_queues; id++) {
2758                fp = &(qedf->fp_array[id]);
2759                fp->sb_id = QEDF_SB_ID_NULL;
2760                fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2761                if (!fp->sb_info) {
2762                        QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2763                                  "allocation failed.\n");
2764                        goto err;
2765                }
2766                ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2767                if (ret) {
2768                        QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2769                                  "initialization failed.\n");
2770                        goto err;
2771                }
2772                fp->sb_id = id;
2773                fp->qedf = qedf;
2774                fp->cq_num_entries =
2775                    qedf->global_queues[id]->cq_mem_size /
2776                    sizeof(struct fcoe_cqe);
2777        }
2778err:
2779        return 0;
2780}
2781
2782void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2783{
2784        u16 xid;
2785        struct qedf_ioreq *io_req;
2786        struct qedf_rport *fcport;
2787        u32 comp_type;
2788
2789        comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2790            FCOE_CQE_CQE_TYPE_MASK;
2791
2792        xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2793        io_req = &qedf->cmd_mgr->cmds[xid];
2794
2795        /* Completion not for a valid I/O anymore so just return */
2796        if (!io_req) {
2797                QEDF_ERR(&qedf->dbg_ctx,
2798                         "io_req is NULL for xid=0x%x.\n", xid);
2799                return;
2800        }
2801
2802        fcport = io_req->fcport;
2803
2804        if (fcport == NULL) {
2805                QEDF_ERR(&qedf->dbg_ctx,
2806                         "fcport is NULL for xid=0x%x io_req=%p.\n",
2807                         xid, io_req);
2808                return;
2809        }
2810
2811        /*
2812         * Check that fcport is offloaded.  If it isn't then the spinlock
2813         * isn't valid and shouldn't be taken. We should just return.
2814         */
2815        if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2816                QEDF_ERR(&qedf->dbg_ctx,
2817                         "Session not offloaded yet, fcport = %p.\n", fcport);
2818                return;
2819        }
2820
2821
2822        switch (comp_type) {
2823        case FCOE_GOOD_COMPLETION_CQE_TYPE:
2824                atomic_inc(&fcport->free_sqes);
2825                switch (io_req->cmd_type) {
2826                case QEDF_SCSI_CMD:
2827                        qedf_scsi_completion(qedf, cqe, io_req);
2828                        break;
2829                case QEDF_ELS:
2830                        qedf_process_els_compl(qedf, cqe, io_req);
2831                        break;
2832                case QEDF_TASK_MGMT_CMD:
2833                        qedf_process_tmf_compl(qedf, cqe, io_req);
2834                        break;
2835                case QEDF_SEQ_CLEANUP:
2836                        qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2837                        break;
2838                }
2839                break;
2840        case FCOE_ERROR_DETECTION_CQE_TYPE:
2841                atomic_inc(&fcport->free_sqes);
2842                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2843                    "Error detect CQE.\n");
2844                qedf_process_error_detect(qedf, cqe, io_req);
2845                break;
2846        case FCOE_EXCH_CLEANUP_CQE_TYPE:
2847                atomic_inc(&fcport->free_sqes);
2848                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2849                    "Cleanup CQE.\n");
2850                qedf_process_cleanup_compl(qedf, cqe, io_req);
2851                break;
2852        case FCOE_ABTS_CQE_TYPE:
2853                atomic_inc(&fcport->free_sqes);
2854                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2855                    "Abort CQE.\n");
2856                qedf_process_abts_compl(qedf, cqe, io_req);
2857                break;
2858        case FCOE_DUMMY_CQE_TYPE:
2859                atomic_inc(&fcport->free_sqes);
2860                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2861                    "Dummy CQE.\n");
2862                break;
2863        case FCOE_LOCAL_COMP_CQE_TYPE:
2864                atomic_inc(&fcport->free_sqes);
2865                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2866                    "Local completion CQE.\n");
2867                break;
2868        case FCOE_WARNING_CQE_TYPE:
2869                atomic_inc(&fcport->free_sqes);
2870                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2871                    "Warning CQE.\n");
2872                qedf_process_warning_compl(qedf, cqe, io_req);
2873                break;
2874        case MAX_FCOE_CQE_TYPE:
2875                atomic_inc(&fcport->free_sqes);
2876                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2877                    "Max FCoE CQE.\n");
2878                break;
2879        default:
2880                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2881                    "Default CQE.\n");
2882                break;
2883        }
2884}
2885
2886static void qedf_free_bdq(struct qedf_ctx *qedf)
2887{
2888        int i;
2889
2890        if (qedf->bdq_pbl_list)
2891                dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2892                    qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2893
2894        if (qedf->bdq_pbl)
2895                dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2896                    qedf->bdq_pbl, qedf->bdq_pbl_dma);
2897
2898        for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2899                if (qedf->bdq[i].buf_addr) {
2900                        dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2901                            qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2902                }
2903        }
2904}
2905
2906static void qedf_free_global_queues(struct qedf_ctx *qedf)
2907{
2908        int i;
2909        struct global_queue **gl = qedf->global_queues;
2910
2911        for (i = 0; i < qedf->num_queues; i++) {
2912                if (!gl[i])
2913                        continue;
2914
2915                if (gl[i]->cq)
2916                        dma_free_coherent(&qedf->pdev->dev,
2917                            gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
2918                if (gl[i]->cq_pbl)
2919                        dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2920                            gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
2921
2922                kfree(gl[i]);
2923        }
2924
2925        qedf_free_bdq(qedf);
2926}
2927
2928static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2929{
2930        int i;
2931        struct scsi_bd *pbl;
2932        u64 *list;
2933        dma_addr_t page;
2934
2935        /* Alloc dma memory for BDQ buffers */
2936        for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2937                qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2938                    QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2939                if (!qedf->bdq[i].buf_addr) {
2940                        QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2941                            "buffer %d.\n", i);
2942                        return -ENOMEM;
2943                }
2944        }
2945
2946        /* Alloc dma memory for BDQ page buffer list */
2947        qedf->bdq_pbl_mem_size =
2948            QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
2949        qedf->bdq_pbl_mem_size =
2950            ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2951
2952        qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2953            qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2954        if (!qedf->bdq_pbl) {
2955                QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2956                return -ENOMEM;
2957        }
2958
2959        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2960                  "BDQ PBL addr=0x%p dma=%pad\n",
2961                  qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2962
2963        /*
2964         * Populate BDQ PBL with physical and virtual address of individual
2965         * BDQ buffers
2966         */
2967        pbl = (struct scsi_bd *)qedf->bdq_pbl;
2968        for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2969                pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2970                pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
2971                pbl->opaque.fcoe_opaque.hi = 0;
2972                /* Opaque lo data is an index into the BDQ array */
2973                pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
2974                pbl++;
2975        }
2976
2977        /* Allocate list of PBL pages */
2978        qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2979                                                QEDF_PAGE_SIZE,
2980                                                &qedf->bdq_pbl_list_dma,
2981                                                GFP_KERNEL);
2982        if (!qedf->bdq_pbl_list) {
2983                QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
2984                return -ENOMEM;
2985        }
2986
2987        /*
2988         * Now populate PBL list with pages that contain pointers to the
2989         * individual buffers.
2990         */
2991        qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
2992            QEDF_PAGE_SIZE;
2993        list = (u64 *)qedf->bdq_pbl_list;
2994        page = qedf->bdq_pbl_list_dma;
2995        for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
2996                *list = qedf->bdq_pbl_dma;
2997                list++;
2998                page += QEDF_PAGE_SIZE;
2999        }
3000
3001        return 0;
3002}
3003
3004static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
3005{
3006        u32 *list;
3007        int i;
3008        int status;
3009        u32 *pbl;
3010        dma_addr_t page;
3011        int num_pages;
3012
3013        /* Allocate and map CQs, RQs */
3014        /*
3015         * Number of global queues (CQ / RQ). This should
3016         * be <= number of available MSIX vectors for the PF
3017         */
3018        if (!qedf->num_queues) {
3019                QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
3020                return -ENOMEM;
3021        }
3022
3023        /*
3024         * Make sure we allocated the PBL that will contain the physical
3025         * addresses of our queues
3026         */
3027        if (!qedf->p_cpuq) {
3028                status = -EINVAL;
3029                QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3030                goto mem_alloc_failure;
3031        }
3032
3033        qedf->global_queues = kzalloc((sizeof(struct global_queue *)
3034            * qedf->num_queues), GFP_KERNEL);
3035        if (!qedf->global_queues) {
3036                QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
3037                          "queues array ptr memory\n");
3038                return -ENOMEM;
3039        }
3040        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3041                   "qedf->global_queues=%p.\n", qedf->global_queues);
3042
3043        /* Allocate DMA coherent buffers for BDQ */
3044        status = qedf_alloc_bdq(qedf);
3045        if (status) {
3046                QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
3047                goto mem_alloc_failure;
3048        }
3049
3050        /* Allocate a CQ and an associated PBL for each MSI-X vector */
3051        for (i = 0; i < qedf->num_queues; i++) {
3052                qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
3053                    GFP_KERNEL);
3054                if (!qedf->global_queues[i]) {
3055                        QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
3056                                   "global queue %d.\n", i);
3057                        status = -ENOMEM;
3058                        goto mem_alloc_failure;
3059                }
3060
3061                qedf->global_queues[i]->cq_mem_size =
3062                    FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3063                qedf->global_queues[i]->cq_mem_size =
3064                    ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
3065
3066                qedf->global_queues[i]->cq_pbl_size =
3067                    (qedf->global_queues[i]->cq_mem_size /
3068                    PAGE_SIZE) * sizeof(void *);
3069                qedf->global_queues[i]->cq_pbl_size =
3070                    ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
3071
3072                qedf->global_queues[i]->cq =
3073                    dma_alloc_coherent(&qedf->pdev->dev,
3074                                       qedf->global_queues[i]->cq_mem_size,
3075                                       &qedf->global_queues[i]->cq_dma,
3076                                       GFP_KERNEL);
3077
3078                if (!qedf->global_queues[i]->cq) {
3079                        QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
3080                        status = -ENOMEM;
3081                        goto mem_alloc_failure;
3082                }
3083
3084                qedf->global_queues[i]->cq_pbl =
3085                    dma_alloc_coherent(&qedf->pdev->dev,
3086                                       qedf->global_queues[i]->cq_pbl_size,
3087                                       &qedf->global_queues[i]->cq_pbl_dma,
3088                                       GFP_KERNEL);
3089
3090                if (!qedf->global_queues[i]->cq_pbl) {
3091                        QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
3092                        status = -ENOMEM;
3093                        goto mem_alloc_failure;
3094                }
3095
3096                /* Create PBL */
3097                num_pages = qedf->global_queues[i]->cq_mem_size /
3098                    QEDF_PAGE_SIZE;
3099                page = qedf->global_queues[i]->cq_dma;
3100                pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
3101
3102                while (num_pages--) {
3103                        *pbl = U64_LO(page);
3104                        pbl++;
3105                        *pbl = U64_HI(page);
3106                        pbl++;
3107                        page += QEDF_PAGE_SIZE;
3108                }
3109                /* Set the initial consumer index for cq */
3110                qedf->global_queues[i]->cq_cons_idx = 0;
3111        }
3112
3113        list = (u32 *)qedf->p_cpuq;
3114
3115        /*
3116         * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
3117         * CQ#1 PBL pointer, RQ#1 PBL pointer, etc.  Each PBL pointer points
3118         * to the physical address which contains an array of pointers to
3119         * the physical addresses of the specific queue pages.
3120         */
3121        for (i = 0; i < qedf->num_queues; i++) {
3122                *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
3123                list++;
3124                *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
3125                list++;
3126                *list = U64_LO(0);
3127                list++;
3128                *list = U64_HI(0);
3129                list++;
3130        }
3131
3132        return 0;
3133
3134mem_alloc_failure:
3135        qedf_free_global_queues(qedf);
3136        return status;
3137}
3138
3139static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
3140{
3141        u8 sq_num_pbl_pages;
3142        u32 sq_mem_size;
3143        u32 cq_mem_size;
3144        u32 cq_num_entries;
3145        int rval;
3146
3147        /*
3148         * The number of completion queues/fastpath interrupts/status blocks
3149         * we allocation is the minimum off:
3150         *
3151         * Number of CPUs
3152         * Number allocated by qed for our PCI function
3153         */
3154        qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
3155
3156        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
3157                   qedf->num_queues);
3158
3159        qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
3160            qedf->num_queues * sizeof(struct qedf_glbl_q_params),
3161            &qedf->hw_p_cpuq, GFP_KERNEL);
3162
3163        if (!qedf->p_cpuq) {
3164                QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
3165                return 1;
3166        }
3167
3168        rval = qedf_alloc_global_queues(qedf);
3169        if (rval) {
3170                QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
3171                          "failed.\n");
3172                return 1;
3173        }
3174
3175        /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
3176        sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
3177        sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
3178        sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
3179
3180        /* Calculate CQ num entries */
3181        cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3182        cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
3183        cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
3184
3185        memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
3186
3187        /* Setup the value for fcoe PF */
3188        qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
3189        qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
3190        qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
3191            (u64)qedf->hw_p_cpuq;
3192        qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
3193
3194        qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
3195
3196        qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
3197        qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
3198
3199        /* log_page_size: 12 for 4KB pages */
3200        qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
3201
3202        qedf->pf_params.fcoe_pf_params.mtu = 9000;
3203        qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
3204        qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
3205
3206        /* BDQ address and size */
3207        qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
3208            qedf->bdq_pbl_list_dma;
3209        qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
3210            qedf->bdq_pbl_list_num_entries;
3211        qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
3212
3213        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3214            "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
3215            qedf->bdq_pbl_list,
3216            qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
3217            qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
3218
3219        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3220            "cq_num_entries=%d.\n",
3221            qedf->pf_params.fcoe_pf_params.cq_num_entries);
3222
3223        return 0;
3224}
3225
3226/* Free DMA coherent memory for array of queue pointers we pass to qed */
3227static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
3228{
3229        size_t size = 0;
3230
3231        if (qedf->p_cpuq) {
3232                size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
3233                dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
3234                    qedf->hw_p_cpuq);
3235        }
3236
3237        qedf_free_global_queues(qedf);
3238
3239        kfree(qedf->global_queues);
3240}
3241
3242/*
3243 * PCI driver functions
3244 */
3245
3246static const struct pci_device_id qedf_pci_tbl[] = {
3247        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
3248        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
3249        {0}
3250};
3251MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
3252
3253static struct pci_driver qedf_pci_driver = {
3254        .name = QEDF_MODULE_NAME,
3255        .id_table = qedf_pci_tbl,
3256        .probe = qedf_probe,
3257        .remove = qedf_remove,
3258        .shutdown = qedf_shutdown,
3259};
3260
3261static int __qedf_probe(struct pci_dev *pdev, int mode)
3262{
3263        int rc = -EINVAL;
3264        struct fc_lport *lport;
3265        struct qedf_ctx *qedf = NULL;
3266        struct Scsi_Host *host;
3267        bool is_vf = false;
3268        struct qed_ll2_params params;
3269        char host_buf[20];
3270        struct qed_link_params link_params;
3271        int status;
3272        void *task_start, *task_end;
3273        struct qed_slowpath_params slowpath_params;
3274        struct qed_probe_params qed_params;
3275        u16 retry_cnt = 10;
3276
3277        /*
3278         * When doing error recovery we didn't reap the lport so don't try
3279         * to reallocate it.
3280         */
3281retry_probe:
3282        if (mode == QEDF_MODE_RECOVERY)
3283                msleep(2000);
3284
3285        if (mode != QEDF_MODE_RECOVERY) {
3286                lport = libfc_host_alloc(&qedf_host_template,
3287                    sizeof(struct qedf_ctx));
3288
3289                if (!lport) {
3290                        QEDF_ERR(NULL, "Could not allocate lport.\n");
3291                        rc = -ENOMEM;
3292                        goto err0;
3293                }
3294
3295                fc_disc_init(lport);
3296
3297                /* Initialize qedf_ctx */
3298                qedf = lport_priv(lport);
3299                set_bit(QEDF_PROBING, &qedf->flags);
3300                qedf->lport = lport;
3301                qedf->ctlr.lp = lport;
3302                qedf->pdev = pdev;
3303                qedf->dbg_ctx.pdev = pdev;
3304                qedf->dbg_ctx.host_no = lport->host->host_no;
3305                spin_lock_init(&qedf->hba_lock);
3306                INIT_LIST_HEAD(&qedf->fcports);
3307                qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3308                atomic_set(&qedf->num_offloads, 0);
3309                qedf->stop_io_on_error = false;
3310                pci_set_drvdata(pdev, qedf);
3311                init_completion(&qedf->fipvlan_compl);
3312                mutex_init(&qedf->stats_mutex);
3313                mutex_init(&qedf->flush_mutex);
3314                qedf->flogi_pending = 0;
3315
3316                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3317                   "QLogic FastLinQ FCoE Module qedf %s, "
3318                   "FW %d.%d.%d.%d\n", QEDF_VERSION,
3319                   FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
3320                   FW_ENGINEERING_VERSION);
3321        } else {
3322                /* Init pointers during recovery */
3323                qedf = pci_get_drvdata(pdev);
3324                set_bit(QEDF_PROBING, &qedf->flags);
3325                lport = qedf->lport;
3326        }
3327
3328        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
3329
3330        host = lport->host;
3331
3332        /* Allocate mempool for qedf_io_work structs */
3333        qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3334            qedf_io_work_cache);
3335        if (qedf->io_mempool == NULL) {
3336                QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3337                goto err1;
3338        }
3339        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3340            qedf->io_mempool);
3341
3342        sprintf(host_buf, "qedf_%u_link",
3343            qedf->lport->host->host_no);
3344        qedf->link_update_wq = create_workqueue(host_buf);
3345        INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3346        INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
3347        INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3348        INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
3349        qedf->fipvlan_retries = qedf_fipvlan_retries;
3350        /* Set a default prio in case DCBX doesn't converge */
3351        if (qedf_default_prio > -1) {
3352                /*
3353                 * This is the case where we pass a modparam in so we want to
3354                 * honor it even if dcbx doesn't converge.
3355                 */
3356                qedf->prio = qedf_default_prio;
3357        } else
3358                qedf->prio = QEDF_DEFAULT_PRIO;
3359
3360        /*
3361         * Common probe. Takes care of basic hardware init and pci_*
3362         * functions.
3363         */
3364        memset(&qed_params, 0, sizeof(qed_params));
3365        qed_params.protocol = QED_PROTOCOL_FCOE;
3366        qed_params.dp_module = qedf_dp_module;
3367        qed_params.dp_level = qedf_dp_level;
3368        qed_params.is_vf = is_vf;
3369        qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3370        if (!qedf->cdev) {
3371                if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
3372                        QEDF_ERR(&qedf->dbg_ctx,
3373                                "Retry %d initialize hardware\n", retry_cnt);
3374                        retry_cnt--;
3375                        goto retry_probe;
3376                }
3377                QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3378                rc = -ENODEV;
3379                goto err1;
3380        }
3381
3382        /* Learn information crucial for qedf to progress */
3383        rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3384        if (rc) {
3385                QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3386                goto err1;
3387        }
3388
3389        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3390                  "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
3391                  qedf->dev_info.common.num_hwfns,
3392                  qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
3393
3394        /* queue allocation code should come here
3395         * order should be
3396         *      slowpath_start
3397         *      status block allocation
3398         *      interrupt registration (to get min number of queues)
3399         *      set_fcoe_pf_param
3400         *      qed_sp_fcoe_func_start
3401         */
3402        rc = qedf_set_fcoe_pf_param(qedf);
3403        if (rc) {
3404                QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3405                goto err2;
3406        }
3407        qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3408
3409        /* Learn information crucial for qedf to progress */
3410        rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3411        if (rc) {
3412                QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3413                goto err2;
3414        }
3415
3416        if (mode != QEDF_MODE_RECOVERY) {
3417                qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
3418                if (IS_ERR(qedf->devlink)) {
3419                        QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
3420                        rc = PTR_ERR(qedf->devlink);
3421                        qedf->devlink = NULL;
3422                        goto err2;
3423                }
3424        }
3425
3426        /* Record BDQ producer doorbell addresses */
3427        qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3428        qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3429        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3430            "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3431            qedf->bdq_secondary_prod);
3432
3433        qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3434
3435        rc = qedf_prepare_sb(qedf);
3436        if (rc) {
3437
3438                QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3439                goto err2;
3440        }
3441
3442        /* Start the Slowpath-process */
3443        slowpath_params.int_mode = QED_INT_MODE_MSIX;
3444        slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
3445        slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
3446        slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
3447        slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
3448        strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
3449        rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3450        if (rc) {
3451                QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3452                goto err2;
3453        }
3454
3455        /*
3456         * update_pf_params needs to be called before and after slowpath
3457         * start
3458         */
3459        qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3460
3461        /* Setup interrupts */
3462        rc = qedf_setup_int(qedf);
3463        if (rc) {
3464                QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3465                goto err3;
3466        }
3467
3468        rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3469        if (rc) {
3470                QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3471                goto err4;
3472        }
3473        task_start = qedf_get_task_mem(&qedf->tasks, 0);
3474        task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3475        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3476                   "end=%p block_size=%u.\n", task_start, task_end,
3477                   qedf->tasks.size);
3478
3479        /*
3480         * We need to write the number of BDs in the BDQ we've preallocated so
3481         * the f/w will do a prefetch and we'll get an unsolicited CQE when a
3482         * packet arrives.
3483         */
3484        qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3485        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3486            "Writing %d to primary and secondary BDQ doorbell registers.\n",
3487            qedf->bdq_prod_idx);
3488        writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3489        readw(qedf->bdq_primary_prod);
3490        writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3491        readw(qedf->bdq_secondary_prod);
3492
3493        qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3494
3495        /* Now that the dev_info struct has been filled in set the MAC
3496         * address
3497         */
3498        ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
3499        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3500                   qedf->mac);
3501
3502        /*
3503         * Set the WWNN and WWPN in the following way:
3504         *
3505         * If the info we get from qed is non-zero then use that to set the
3506         * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
3507         * on the MAC address.
3508         */
3509        if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3510                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3511                    "Setting WWPN and WWNN from qed dev_info.\n");
3512                qedf->wwnn = qedf->dev_info.wwnn;
3513                qedf->wwpn = qedf->dev_info.wwpn;
3514        } else {
3515                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3516                    "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
3517                qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
3518                qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
3519        }
3520        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,  "WWNN=%016llx "
3521                   "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3522
3523        sprintf(host_buf, "host_%d", host->host_no);
3524        qed_ops->common->set_name(qedf->cdev, host_buf);
3525
3526        /* Allocate cmd mgr */
3527        qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3528        if (!qedf->cmd_mgr) {
3529                QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3530                rc = -ENOMEM;
3531                goto err5;
3532        }
3533
3534        if (mode != QEDF_MODE_RECOVERY) {
3535                host->transportt = qedf_fc_transport_template;
3536                host->max_lun = qedf_max_lun;
3537                host->max_cmd_len = QEDF_MAX_CDB_LEN;
3538                host->max_id = QEDF_MAX_SESSIONS;
3539                host->can_queue = FCOE_PARAMS_NUM_TASKS;
3540                rc = scsi_add_host(host, &pdev->dev);
3541                if (rc) {
3542                        QEDF_WARN(&qedf->dbg_ctx,
3543                                  "Error adding Scsi_Host rc=0x%x.\n", rc);
3544                        goto err6;
3545                }
3546        }
3547
3548        memset(&params, 0, sizeof(params));
3549        params.mtu = QEDF_LL2_BUF_SIZE;
3550        ether_addr_copy(params.ll2_mac_address, qedf->mac);
3551
3552        /* Start LL2 processing thread */
3553        snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
3554        qedf->ll2_recv_wq =
3555                create_workqueue(host_buf);
3556        if (!qedf->ll2_recv_wq) {
3557                QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3558                rc = -ENOMEM;
3559                goto err7;
3560        }
3561
3562#ifdef CONFIG_DEBUG_FS
3563        qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
3564                            qedf_dbg_fops);
3565#endif
3566
3567        /* Start LL2 */
3568        qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3569        rc = qed_ops->ll2->start(qedf->cdev, &params);
3570        if (rc) {
3571                QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3572                goto err7;
3573        }
3574        set_bit(QEDF_LL2_STARTED, &qedf->flags);
3575
3576        /* Set initial FIP/FCoE VLAN to NULL */
3577        qedf->vlan_id = 0;
3578
3579        /*
3580         * No need to setup fcoe_ctlr or fc_lport objects during recovery since
3581         * they were not reaped during the unload process.
3582         */
3583        if (mode != QEDF_MODE_RECOVERY) {
3584                /* Setup imbedded fcoe controller */
3585                qedf_fcoe_ctlr_setup(qedf);
3586
3587                /* Setup lport */
3588                rc = qedf_lport_setup(qedf);
3589                if (rc) {
3590                        QEDF_ERR(&(qedf->dbg_ctx),
3591                            "qedf_lport_setup failed.\n");
3592                        goto err7;
3593                }
3594        }
3595
3596        sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
3597        qedf->timer_work_queue =
3598                create_workqueue(host_buf);
3599        if (!qedf->timer_work_queue) {
3600                QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3601                          "workqueue.\n");
3602                rc = -ENOMEM;
3603                goto err7;
3604        }
3605
3606        /* DPC workqueue is not reaped during recovery unload */
3607        if (mode != QEDF_MODE_RECOVERY) {
3608                sprintf(host_buf, "qedf_%u_dpc",
3609                    qedf->lport->host->host_no);
3610                qedf->dpc_wq = create_workqueue(host_buf);
3611        }
3612        INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
3613
3614        /*
3615         * GRC dump and sysfs parameters are not reaped during the recovery
3616         * unload process.
3617         */
3618        if (mode != QEDF_MODE_RECOVERY) {
3619                qedf->grcdump_size =
3620                    qed_ops->common->dbg_all_data_size(qedf->cdev);
3621                if (qedf->grcdump_size) {
3622                        rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3623                            qedf->grcdump_size);
3624                        if (rc) {
3625                                QEDF_ERR(&(qedf->dbg_ctx),
3626                                    "GRC Dump buffer alloc failed.\n");
3627                                qedf->grcdump = NULL;
3628                        }
3629
3630                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3631                            "grcdump: addr=%p, size=%u.\n",
3632                            qedf->grcdump, qedf->grcdump_size);
3633                }
3634                qedf_create_sysfs_ctx_attr(qedf);
3635
3636                /* Initialize I/O tracing for this adapter */
3637                spin_lock_init(&qedf->io_trace_lock);
3638                qedf->io_trace_idx = 0;
3639        }
3640
3641        init_completion(&qedf->flogi_compl);
3642
3643        status = qed_ops->common->update_drv_state(qedf->cdev, true);
3644        if (status)
3645                QEDF_ERR(&(qedf->dbg_ctx),
3646                        "Failed to send drv state to MFW.\n");
3647
3648        memset(&link_params, 0, sizeof(struct qed_link_params));
3649        link_params.link_up = true;
3650        status = qed_ops->common->set_link(qedf->cdev, &link_params);
3651        if (status)
3652                QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3653
3654        /* Start/restart discovery */
3655        if (mode == QEDF_MODE_RECOVERY)
3656                fcoe_ctlr_link_up(&qedf->ctlr);
3657        else
3658                fc_fabric_login(lport);
3659
3660        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3661
3662        clear_bit(QEDF_PROBING, &qedf->flags);
3663
3664        /* All good */
3665        return 0;
3666
3667err7:
3668        if (qedf->ll2_recv_wq)
3669                destroy_workqueue(qedf->ll2_recv_wq);
3670        fc_remove_host(qedf->lport->host);
3671        scsi_remove_host(qedf->lport->host);
3672#ifdef CONFIG_DEBUG_FS
3673        qedf_dbg_host_exit(&(qedf->dbg_ctx));
3674#endif
3675err6:
3676        qedf_cmd_mgr_free(qedf->cmd_mgr);
3677err5:
3678        qed_ops->stop(qedf->cdev);
3679err4:
3680        qedf_free_fcoe_pf_param(qedf);
3681        qedf_sync_free_irqs(qedf);
3682err3:
3683        qed_ops->common->slowpath_stop(qedf->cdev);
3684err2:
3685        qed_ops->common->remove(qedf->cdev);
3686err1:
3687        scsi_host_put(lport->host);
3688err0:
3689        if (qedf) {
3690                QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3691
3692                clear_bit(QEDF_PROBING, &qedf->flags);
3693        }
3694        return rc;
3695}
3696
3697static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3698{
3699        return __qedf_probe(pdev, QEDF_MODE_NORMAL);
3700}
3701
3702static void __qedf_remove(struct pci_dev *pdev, int mode)
3703{
3704        struct qedf_ctx *qedf;
3705        int rc;
3706
3707        if (!pdev) {
3708                QEDF_ERR(NULL, "pdev is NULL.\n");
3709                return;
3710        }
3711
3712        qedf = pci_get_drvdata(pdev);
3713
3714        /*
3715         * Prevent race where we're in board disable work and then try to
3716         * rmmod the module.
3717         */
3718        if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3719                QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3720                return;
3721        }
3722
3723        if (mode != QEDF_MODE_RECOVERY)
3724                set_bit(QEDF_UNLOADING, &qedf->flags);
3725
3726        /* Logoff the fabric to upload all connections */
3727        if (mode == QEDF_MODE_RECOVERY)
3728                fcoe_ctlr_link_down(&qedf->ctlr);
3729        else
3730                fc_fabric_logoff(qedf->lport);
3731
3732        if (!qedf_wait_for_upload(qedf))
3733                QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
3734
3735#ifdef CONFIG_DEBUG_FS
3736        qedf_dbg_host_exit(&(qedf->dbg_ctx));
3737#endif
3738
3739        /* Stop any link update handling */
3740        cancel_delayed_work_sync(&qedf->link_update);
3741        destroy_workqueue(qedf->link_update_wq);
3742        qedf->link_update_wq = NULL;
3743
3744        if (qedf->timer_work_queue)
3745                destroy_workqueue(qedf->timer_work_queue);
3746
3747        /* Stop Light L2 */
3748        clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3749        qed_ops->ll2->stop(qedf->cdev);
3750        if (qedf->ll2_recv_wq)
3751                destroy_workqueue(qedf->ll2_recv_wq);
3752
3753        /* Stop fastpath */
3754        qedf_sync_free_irqs(qedf);
3755        qedf_destroy_sb(qedf);
3756
3757        /*
3758         * During recovery don't destroy OS constructs that represent the
3759         * physical port.
3760         */
3761        if (mode != QEDF_MODE_RECOVERY) {
3762                qedf_free_grc_dump_buf(&qedf->grcdump);
3763                qedf_remove_sysfs_ctx_attr(qedf);
3764
3765                /* Remove all SCSI/libfc/libfcoe structures */
3766                fcoe_ctlr_destroy(&qedf->ctlr);
3767                fc_lport_destroy(qedf->lport);
3768                fc_remove_host(qedf->lport->host);
3769                scsi_remove_host(qedf->lport->host);
3770        }
3771
3772        qedf_cmd_mgr_free(qedf->cmd_mgr);
3773
3774        if (mode != QEDF_MODE_RECOVERY) {
3775                fc_exch_mgr_free(qedf->lport);
3776                fc_lport_free_stats(qedf->lport);
3777
3778                /* Wait for all vports to be reaped */
3779                qedf_wait_for_vport_destroy(qedf);
3780        }
3781
3782        /*
3783         * Now that all connections have been uploaded we can stop the
3784         * rest of the qed operations
3785         */
3786        qed_ops->stop(qedf->cdev);
3787
3788        if (mode != QEDF_MODE_RECOVERY) {
3789                if (qedf->dpc_wq) {
3790                        /* Stop general DPC handling */
3791                        destroy_workqueue(qedf->dpc_wq);
3792                        qedf->dpc_wq = NULL;
3793                }
3794        }
3795
3796        /* Final shutdown for the board */
3797        qedf_free_fcoe_pf_param(qedf);
3798        if (mode != QEDF_MODE_RECOVERY) {
3799                qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3800                pci_set_drvdata(pdev, NULL);
3801        }
3802
3803        rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3804        if (rc)
3805                QEDF_ERR(&(qedf->dbg_ctx),
3806                        "Failed to send drv state to MFW.\n");
3807
3808        if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
3809                qed_ops->common->devlink_unregister(qedf->devlink);
3810                qedf->devlink = NULL;
3811        }
3812
3813        qed_ops->common->slowpath_stop(qedf->cdev);
3814        qed_ops->common->remove(qedf->cdev);
3815
3816        mempool_destroy(qedf->io_mempool);
3817
3818        /* Only reap the Scsi_host on a real removal */
3819        if (mode != QEDF_MODE_RECOVERY)
3820                scsi_host_put(qedf->lport->host);
3821}
3822
3823static void qedf_remove(struct pci_dev *pdev)
3824{
3825        /* Check to make sure this function wasn't already disabled */
3826        if (!atomic_read(&pdev->enable_cnt))
3827                return;
3828
3829        __qedf_remove(pdev, QEDF_MODE_NORMAL);
3830}
3831
3832void qedf_wq_grcdump(struct work_struct *work)
3833{
3834        struct qedf_ctx *qedf =
3835            container_of(work, struct qedf_ctx, grcdump_work.work);
3836
3837        QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3838        qedf_capture_grc_dump(qedf);
3839}
3840
3841void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
3842{
3843        struct qedf_ctx *qedf = dev;
3844
3845        QEDF_ERR(&(qedf->dbg_ctx),
3846                        "Hardware error handler scheduled, event=%d.\n",
3847                        err_type);
3848
3849        if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3850                QEDF_ERR(&(qedf->dbg_ctx),
3851                                "Already in recovery, not scheduling board disable work.\n");
3852                return;
3853        }
3854
3855        switch (err_type) {
3856        case QED_HW_ERR_FAN_FAIL:
3857                schedule_delayed_work(&qedf->board_disable_work, 0);
3858                break;
3859        case QED_HW_ERR_MFW_RESP_FAIL:
3860        case QED_HW_ERR_HW_ATTN:
3861        case QED_HW_ERR_DMAE_FAIL:
3862        case QED_HW_ERR_FW_ASSERT:
3863                /* Prevent HW attentions from being reasserted */
3864                qed_ops->common->attn_clr_enable(qedf->cdev, true);
3865                break;
3866        case QED_HW_ERR_RAMROD_FAIL:
3867                /* Prevent HW attentions from being reasserted */
3868                qed_ops->common->attn_clr_enable(qedf->cdev, true);
3869
3870                if (qedf_enable_recovery && qedf->devlink)
3871                        qed_ops->common->report_fatal_error(qedf->devlink,
3872                                err_type);
3873
3874                break;
3875        default:
3876                break;
3877        }
3878}
3879
3880/*
3881 * Protocol TLV handler
3882 */
3883void qedf_get_protocol_tlv_data(void *dev, void *data)
3884{
3885        struct qedf_ctx *qedf = dev;
3886        struct qed_mfw_tlv_fcoe *fcoe = data;
3887        struct fc_lport *lport;
3888        struct Scsi_Host *host;
3889        struct fc_host_attrs *fc_host;
3890        struct fc_host_statistics *hst;
3891
3892        if (!qedf) {
3893                QEDF_ERR(NULL, "qedf is null.\n");
3894                return;
3895        }
3896
3897        if (test_bit(QEDF_PROBING, &qedf->flags)) {
3898                QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
3899                return;
3900        }
3901
3902        lport = qedf->lport;
3903        host = lport->host;
3904        fc_host = shost_to_fc_host(host);
3905
3906        /* Force a refresh of the fc_host stats including offload stats */
3907        hst = qedf_fc_get_host_stats(host);
3908
3909        fcoe->qos_pri_set = true;
3910        fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
3911
3912        fcoe->ra_tov_set = true;
3913        fcoe->ra_tov = lport->r_a_tov;
3914
3915        fcoe->ed_tov_set = true;
3916        fcoe->ed_tov = lport->e_d_tov;
3917
3918        fcoe->npiv_state_set = true;
3919        fcoe->npiv_state = 1; /* NPIV always enabled */
3920
3921        fcoe->num_npiv_ids_set = true;
3922        fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
3923
3924        /* Certain attributes we only want to set if we've selected an FCF */
3925        if (qedf->ctlr.sel_fcf) {
3926                fcoe->switch_name_set = true;
3927                u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
3928        }
3929
3930        fcoe->port_state_set = true;
3931        /* For qedf we're either link down or fabric attach */
3932        if (lport->link_up)
3933                fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
3934        else
3935                fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
3936
3937        fcoe->link_failures_set = true;
3938        fcoe->link_failures = (u16)hst->link_failure_count;
3939
3940        fcoe->fcoe_txq_depth_set = true;
3941        fcoe->fcoe_rxq_depth_set = true;
3942        fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
3943        fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
3944
3945        fcoe->fcoe_rx_frames_set = true;
3946        fcoe->fcoe_rx_frames = hst->rx_frames;
3947
3948        fcoe->fcoe_tx_frames_set = true;
3949        fcoe->fcoe_tx_frames = hst->tx_frames;
3950
3951        fcoe->fcoe_rx_bytes_set = true;
3952        fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
3953
3954        fcoe->fcoe_tx_bytes_set = true;
3955        fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
3956
3957        fcoe->crc_count_set = true;
3958        fcoe->crc_count = hst->invalid_crc_count;
3959
3960        fcoe->tx_abts_set = true;
3961        fcoe->tx_abts = hst->fcp_packet_aborts;
3962
3963        fcoe->tx_lun_rst_set = true;
3964        fcoe->tx_lun_rst = qedf->lun_resets;
3965
3966        fcoe->abort_task_sets_set = true;
3967        fcoe->abort_task_sets = qedf->packet_aborts;
3968
3969        fcoe->scsi_busy_set = true;
3970        fcoe->scsi_busy = qedf->busy;
3971
3972        fcoe->scsi_tsk_full_set = true;
3973        fcoe->scsi_tsk_full = qedf->task_set_fulls;
3974}
3975
3976/* Deferred work function to perform soft context reset on STAG change */
3977void qedf_stag_change_work(struct work_struct *work)
3978{
3979        struct qedf_ctx *qedf =
3980            container_of(work, struct qedf_ctx, stag_work.work);
3981
3982        printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
3983                        dev_name(&qedf->pdev->dev), __func__, __LINE__,
3984                        qedf->dbg_ctx.host_no);
3985        qedf_ctx_soft_reset(qedf->lport);
3986}
3987
3988static void qedf_shutdown(struct pci_dev *pdev)
3989{
3990        __qedf_remove(pdev, QEDF_MODE_NORMAL);
3991}
3992
3993/*
3994 * Recovery handler code
3995 */
3996static void qedf_schedule_recovery_handler(void *dev)
3997{
3998        struct qedf_ctx *qedf = dev;
3999
4000        QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
4001        schedule_delayed_work(&qedf->recovery_work, 0);
4002}
4003
4004static void qedf_recovery_handler(struct work_struct *work)
4005{
4006        struct qedf_ctx *qedf =
4007            container_of(work, struct qedf_ctx, recovery_work.work);
4008
4009        if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
4010                return;
4011
4012        /*
4013         * Call common_ops->recovery_prolog to allow the MFW to quiesce
4014         * any PCI transactions.
4015         */
4016        qed_ops->common->recovery_prolog(qedf->cdev);
4017
4018        QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4019        __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
4020        /*
4021         * Reset link and dcbx to down state since we will not get a link down
4022         * event from the MFW but calling __qedf_remove will essentially be a
4023         * link down event.
4024         */
4025        atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
4026        atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
4027        __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
4028        clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
4029        QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
4030}
4031
4032/* Generic TLV data callback */
4033void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
4034{
4035        struct qedf_ctx *qedf;
4036
4037        if (!dev) {
4038                QEDF_INFO(NULL, QEDF_LOG_EVT,
4039                          "dev is NULL so ignoring get_generic_tlv_data request.\n");
4040                return;
4041        }
4042        qedf = (struct qedf_ctx *)dev;
4043
4044        memset(data, 0, sizeof(struct qed_generic_tlvs));
4045        ether_addr_copy(data->mac[0], qedf->mac);
4046}
4047
4048/*
4049 * Module Init/Remove
4050 */
4051
4052static int __init qedf_init(void)
4053{
4054        int ret;
4055
4056        /* If debug=1 passed, set the default log mask */
4057        if (qedf_debug == QEDF_LOG_DEFAULT)
4058                qedf_debug = QEDF_DEFAULT_LOG_MASK;
4059
4060        /*
4061         * Check that default prio for FIP/FCoE traffic is between 0..7 if a
4062         * value has been set
4063         */
4064        if (qedf_default_prio > -1)
4065                if (qedf_default_prio > 7) {
4066                        qedf_default_prio = QEDF_DEFAULT_PRIO;
4067                        QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
4068                            QEDF_DEFAULT_PRIO);
4069                }
4070
4071        /* Print driver banner */
4072        QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
4073                   QEDF_VERSION);
4074
4075        /* Create kmem_cache for qedf_io_work structs */
4076        qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
4077            sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
4078        if (qedf_io_work_cache == NULL) {
4079                QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
4080                goto err1;
4081        }
4082        QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
4083            qedf_io_work_cache);
4084
4085        qed_ops = qed_get_fcoe_ops();
4086        if (!qed_ops) {
4087                QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
4088                goto err1;
4089        }
4090
4091#ifdef CONFIG_DEBUG_FS
4092        qedf_dbg_init("qedf");
4093#endif
4094
4095        qedf_fc_transport_template =
4096            fc_attach_transport(&qedf_fc_transport_fn);
4097        if (!qedf_fc_transport_template) {
4098                QEDF_ERR(NULL, "Could not register with FC transport\n");
4099                goto err2;
4100        }
4101
4102        qedf_fc_vport_transport_template =
4103                fc_attach_transport(&qedf_fc_vport_transport_fn);
4104        if (!qedf_fc_vport_transport_template) {
4105                QEDF_ERR(NULL, "Could not register vport template with FC "
4106                          "transport\n");
4107                goto err3;
4108        }
4109
4110        qedf_io_wq = create_workqueue("qedf_io_wq");
4111        if (!qedf_io_wq) {
4112                QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
4113                goto err4;
4114        }
4115
4116        qedf_cb_ops.get_login_failures = qedf_get_login_failures;
4117
4118        ret = pci_register_driver(&qedf_pci_driver);
4119        if (ret) {
4120                QEDF_ERR(NULL, "Failed to register driver\n");
4121                goto err5;
4122        }
4123
4124        return 0;
4125
4126err5:
4127        destroy_workqueue(qedf_io_wq);
4128err4:
4129        fc_release_transport(qedf_fc_vport_transport_template);
4130err3:
4131        fc_release_transport(qedf_fc_transport_template);
4132err2:
4133#ifdef CONFIG_DEBUG_FS
4134        qedf_dbg_exit();
4135#endif
4136        qed_put_fcoe_ops();
4137err1:
4138        return -EINVAL;
4139}
4140
4141static void __exit qedf_cleanup(void)
4142{
4143        pci_unregister_driver(&qedf_pci_driver);
4144
4145        destroy_workqueue(qedf_io_wq);
4146
4147        fc_release_transport(qedf_fc_vport_transport_template);
4148        fc_release_transport(qedf_fc_transport_template);
4149#ifdef CONFIG_DEBUG_FS
4150        qedf_dbg_exit();
4151#endif
4152        qed_put_fcoe_ops();
4153
4154        kmem_cache_destroy(qedf_io_work_cache);
4155}
4156
4157MODULE_LICENSE("GPL");
4158MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
4159MODULE_AUTHOR("QLogic Corporation");
4160MODULE_VERSION(QEDF_VERSION);
4161module_init(qedf_init);
4162module_exit(qedf_cleanup);
4163