linux/drivers/scsi/fcoe/fcoe.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, write to the Free Software Foundation, Inc.,
  15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16 *
  17 * Maintained at www.Open-FCoE.org
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/version.h>
  22#include <linux/spinlock.h>
  23#include <linux/netdevice.h>
  24#include <linux/etherdevice.h>
  25#include <linux/ethtool.h>
  26#include <linux/if_ether.h>
  27#include <linux/if_vlan.h>
  28#include <linux/crc32.h>
  29#include <linux/cpu.h>
  30#include <linux/fs.h>
  31#include <linux/sysfs.h>
  32#include <linux/ctype.h>
  33#include <scsi/scsi_tcq.h>
  34#include <scsi/scsicam.h>
  35#include <scsi/scsi_transport.h>
  36#include <scsi/scsi_transport_fc.h>
  37#include <net/rtnetlink.h>
  38
  39#include <scsi/fc/fc_encaps.h>
  40#include <scsi/fc/fc_fip.h>
  41
  42#include <scsi/libfc.h>
  43#include <scsi/fc_frame.h>
  44#include <scsi/libfcoe.h>
  45
  46#include "fcoe.h"
  47
  48MODULE_AUTHOR("Open-FCoE.org");
  49MODULE_DESCRIPTION("FCoE");
  50MODULE_LICENSE("GPL v2");
  51
  52/* Performance tuning parameters for fcoe */
  53static unsigned int fcoe_ddp_min;
  54module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
  55MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "      \
  56                 "Direct Data Placement (DDP).");
  57
  58DEFINE_MUTEX(fcoe_config_mutex);
  59
  60/* fcoe_percpu_clean completion.  Waiter protected by fcoe_create_mutex */
  61static DECLARE_COMPLETION(fcoe_flush_completion);
  62
  63/* fcoe host list */
  64/* must only by accessed under the RTNL mutex */
  65LIST_HEAD(fcoe_hostlist);
  66DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
  67
  68/* Function Prototypes */
  69static int fcoe_reset(struct Scsi_Host *shost);
  70static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
  71static int fcoe_rcv(struct sk_buff *, struct net_device *,
  72                    struct packet_type *, struct net_device *);
  73static int fcoe_percpu_receive_thread(void *arg);
  74static void fcoe_clean_pending_queue(struct fc_lport *lp);
  75static void fcoe_percpu_clean(struct fc_lport *lp);
  76static int fcoe_link_ok(struct fc_lport *lp);
  77
  78static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
  79static int fcoe_hostlist_add(const struct fc_lport *);
  80
  81static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
  82static int fcoe_device_notification(struct notifier_block *, ulong, void *);
  83static void fcoe_dev_setup(void);
  84static void fcoe_dev_cleanup(void);
  85static struct fcoe_interface *
  86        fcoe_hostlist_lookup_port(const struct net_device *dev);
  87
  88/* notification function from net device */
  89static struct notifier_block fcoe_notifier = {
  90        .notifier_call = fcoe_device_notification,
  91};
  92
  93static struct scsi_transport_template *scsi_transport_fcoe_sw;
  94
  95struct fc_function_template fcoe_transport_function = {
  96        .show_host_node_name = 1,
  97        .show_host_port_name = 1,
  98        .show_host_supported_classes = 1,
  99        .show_host_supported_fc4s = 1,
 100        .show_host_active_fc4s = 1,
 101        .show_host_maxframe_size = 1,
 102
 103        .show_host_port_id = 1,
 104        .show_host_supported_speeds = 1,
 105        .get_host_speed = fc_get_host_speed,
 106        .show_host_speed = 1,
 107        .show_host_port_type = 1,
 108        .get_host_port_state = fc_get_host_port_state,
 109        .show_host_port_state = 1,
 110        .show_host_symbolic_name = 1,
 111
 112        .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
 113        .show_rport_maxframe_size = 1,
 114        .show_rport_supported_classes = 1,
 115
 116        .show_host_fabric_name = 1,
 117        .show_starget_node_name = 1,
 118        .show_starget_port_name = 1,
 119        .show_starget_port_id = 1,
 120        .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
 121        .show_rport_dev_loss_tmo = 1,
 122        .get_fc_host_stats = fc_get_host_stats,
 123        .issue_fc_host_lip = fcoe_reset,
 124
 125        .terminate_rport_io = fc_rport_terminate_io,
 126};
 127
 128static struct scsi_host_template fcoe_shost_template = {
 129        .module = THIS_MODULE,
 130        .name = "FCoE Driver",
 131        .proc_name = FCOE_NAME,
 132        .queuecommand = fc_queuecommand,
 133        .eh_abort_handler = fc_eh_abort,
 134        .eh_device_reset_handler = fc_eh_device_reset,
 135        .eh_host_reset_handler = fc_eh_host_reset,
 136        .slave_alloc = fc_slave_alloc,
 137        .change_queue_depth = fc_change_queue_depth,
 138        .change_queue_type = fc_change_queue_type,
 139        .this_id = -1,
 140        .cmd_per_lun = 32,
 141        .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
 142        .use_clustering = ENABLE_CLUSTERING,
 143        .sg_tablesize = SG_ALL,
 144        .max_sectors = 0xffff,
 145};
 146
 147static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
 148                         struct packet_type *ptype,
 149                         struct net_device *orig_dev);
 150/**
 151 * fcoe_interface_setup()
 152 * @fcoe: new fcoe_interface
 153 * @netdev : ptr to the associated netdevice struct
 154 *
 155 * Returns : 0 for success
 156 * Locking: must be called with the RTNL mutex held
 157 */
 158static int fcoe_interface_setup(struct fcoe_interface *fcoe,
 159                                struct net_device *netdev)
 160{
 161        struct fcoe_ctlr *fip = &fcoe->ctlr;
 162        struct netdev_hw_addr *ha;
 163        u8 flogi_maddr[ETH_ALEN];
 164
 165        fcoe->netdev = netdev;
 166
 167        /* Do not support for bonding device */
 168        if ((netdev->priv_flags & IFF_MASTER_ALB) ||
 169            (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
 170            (netdev->priv_flags & IFF_MASTER_8023AD)) {
 171                return -EOPNOTSUPP;
 172        }
 173
 174        /* look for SAN MAC address, if multiple SAN MACs exist, only
 175         * use the first one for SPMA */
 176        rcu_read_lock();
 177        for_each_dev_addr(netdev, ha) {
 178                if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
 179                    (is_valid_ether_addr(fip->ctl_src_addr))) {
 180                        memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
 181                        fip->spma = 1;
 182                        break;
 183                }
 184        }
 185        rcu_read_unlock();
 186
 187        /* setup Source Mac Address */
 188        if (!fip->spma)
 189                memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
 190
 191        /*
 192         * Add FCoE MAC address as second unicast MAC address
 193         * or enter promiscuous mode if not capable of listening
 194         * for multiple unicast MACs.
 195         */
 196        memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
 197        dev_unicast_add(netdev, flogi_maddr);
 198        if (fip->spma)
 199                dev_unicast_add(netdev, fip->ctl_src_addr);
 200        dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
 201
 202        /*
 203         * setup the receive function from ethernet driver
 204         * on the ethertype for the given device
 205         */
 206        fcoe->fcoe_packet_type.func = fcoe_rcv;
 207        fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
 208        fcoe->fcoe_packet_type.dev = netdev;
 209        dev_add_pack(&fcoe->fcoe_packet_type);
 210
 211        fcoe->fip_packet_type.func = fcoe_fip_recv;
 212        fcoe->fip_packet_type.type = htons(ETH_P_FIP);
 213        fcoe->fip_packet_type.dev = netdev;
 214        dev_add_pack(&fcoe->fip_packet_type);
 215
 216        return 0;
 217}
 218
 219static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
 220static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new);
 221static void fcoe_destroy_work(struct work_struct *work);
 222
 223/**
 224 * fcoe_interface_create()
 225 * @netdev: network interface
 226 *
 227 * Returns: pointer to a struct fcoe_interface or NULL on error
 228 */
 229static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
 230{
 231        struct fcoe_interface *fcoe;
 232
 233        fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
 234        if (!fcoe) {
 235                FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
 236                return NULL;
 237        }
 238
 239        dev_hold(netdev);
 240        kref_init(&fcoe->kref);
 241
 242        /*
 243         * Initialize FIP.
 244         */
 245        fcoe_ctlr_init(&fcoe->ctlr);
 246        fcoe->ctlr.send = fcoe_fip_send;
 247        fcoe->ctlr.update_mac = fcoe_update_src_mac;
 248
 249        fcoe_interface_setup(fcoe, netdev);
 250
 251        return fcoe;
 252}
 253
 254/**
 255 * fcoe_interface_cleanup() - clean up netdev configurations
 256 * @fcoe:
 257 *
 258 * Caller must be holding the RTNL mutex
 259 */
 260void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 261{
 262        struct net_device *netdev = fcoe->netdev;
 263        struct fcoe_ctlr *fip = &fcoe->ctlr;
 264        u8 flogi_maddr[ETH_ALEN];
 265
 266        /*
 267         * Don't listen for Ethernet packets anymore.
 268         * synchronize_net() ensures that the packet handlers are not running
 269         * on another CPU. dev_remove_pack() would do that, this calls the
 270         * unsyncronized version __dev_remove_pack() to avoid multiple delays.
 271         */
 272        __dev_remove_pack(&fcoe->fcoe_packet_type);
 273        __dev_remove_pack(&fcoe->fip_packet_type);
 274        synchronize_net();
 275
 276        /* Delete secondary MAC addresses */
 277        memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
 278        dev_unicast_delete(netdev, flogi_maddr);
 279        if (!is_zero_ether_addr(fip->data_src_addr))
 280                dev_unicast_delete(netdev, fip->data_src_addr);
 281        if (fip->spma)
 282                dev_unicast_delete(netdev, fip->ctl_src_addr);
 283        dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
 284}
 285
 286/**
 287 * fcoe_interface_release() - fcoe_port kref release function
 288 * @kref: embedded reference count in an fcoe_interface struct
 289 */
 290static void fcoe_interface_release(struct kref *kref)
 291{
 292        struct fcoe_interface *fcoe;
 293        struct net_device *netdev;
 294
 295        fcoe = container_of(kref, struct fcoe_interface, kref);
 296        netdev = fcoe->netdev;
 297        /* tear-down the FCoE controller */
 298        fcoe_ctlr_destroy(&fcoe->ctlr);
 299        kfree(fcoe);
 300        dev_put(netdev);
 301}
 302
 303/**
 304 * fcoe_interface_get()
 305 * @fcoe:
 306 */
 307static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
 308{
 309        kref_get(&fcoe->kref);
 310}
 311
 312/**
 313 * fcoe_interface_put()
 314 * @fcoe:
 315 */
 316static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
 317{
 318        kref_put(&fcoe->kref, fcoe_interface_release);
 319}
 320
 321/**
 322 * fcoe_fip_recv - handle a received FIP frame.
 323 * @skb: the receive skb
 324 * @dev: associated &net_device
 325 * @ptype: the &packet_type structure which was used to register this handler.
 326 * @orig_dev: original receive &net_device, in case @dev is a bond.
 327 *
 328 * Returns: 0 for success
 329 */
 330static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
 331                         struct packet_type *ptype,
 332                         struct net_device *orig_dev)
 333{
 334        struct fcoe_interface *fcoe;
 335
 336        fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
 337        fcoe_ctlr_recv(&fcoe->ctlr, skb);
 338        return 0;
 339}
 340
 341/**
 342 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
 343 * @fip: FCoE controller.
 344 * @skb: FIP Packet.
 345 */
 346static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 347{
 348        skb->dev = fcoe_from_ctlr(fip)->netdev;
 349        dev_queue_xmit(skb);
 350}
 351
 352/**
 353 * fcoe_update_src_mac() - Update Ethernet MAC filters.
 354 * @fip: FCoE controller.
 355 * @old: Unicast MAC address to delete if the MAC is non-zero.
 356 * @new: Unicast MAC address to add.
 357 *
 358 * Remove any previously-set unicast MAC filter.
 359 * Add secondary FCoE MAC address filter for our OUI.
 360 */
 361static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
 362{
 363        struct fcoe_interface *fcoe;
 364
 365        fcoe = fcoe_from_ctlr(fip);
 366        rtnl_lock();
 367        if (!is_zero_ether_addr(old))
 368                dev_unicast_delete(fcoe->netdev, old);
 369        dev_unicast_add(fcoe->netdev, new);
 370        rtnl_unlock();
 371}
 372
 373/**
 374 * fcoe_lport_config() - sets up the fc_lport
 375 * @lp: ptr to the fc_lport
 376 *
 377 * Returns: 0 for success
 378 */
 379static int fcoe_lport_config(struct fc_lport *lp)
 380{
 381        lp->link_up = 0;
 382        lp->qfull = 0;
 383        lp->max_retry_count = 3;
 384        lp->max_rport_retry_count = 3;
 385        lp->e_d_tov = 2 * 1000; /* FC-FS default */
 386        lp->r_a_tov = 2 * 2 * 1000;
 387        lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
 388                              FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
 389
 390        fc_lport_init_stats(lp);
 391
 392        /* lport fc_lport related configuration */
 393        fc_lport_config(lp);
 394
 395        /* offload related configuration */
 396        lp->crc_offload = 0;
 397        lp->seq_offload = 0;
 398        lp->lro_enabled = 0;
 399        lp->lro_xid = 0;
 400        lp->lso_max = 0;
 401
 402        return 0;
 403}
 404
 405/**
 406 * fcoe_queue_timer() - fcoe queue timer
 407 * @lp: the fc_lport pointer
 408 *
 409 * Calls fcoe_check_wait_queue on timeout
 410 *
 411 */
 412static void fcoe_queue_timer(ulong lp)
 413{
 414        fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
 415}
 416
 417/**
 418 * fcoe_netdev_config() - Set up netdev for SW FCoE
 419 * @lp : ptr to the fc_lport
 420 * @netdev : ptr to the associated netdevice struct
 421 *
 422 * Must be called after fcoe_lport_config() as it will use lport mutex
 423 *
 424 * Returns : 0 for success
 425 */
 426static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
 427{
 428        u32 mfs;
 429        u64 wwnn, wwpn;
 430        struct fcoe_interface *fcoe;
 431        struct fcoe_port *port;
 432
 433        /* Setup lport private data to point to fcoe softc */
 434        port = lport_priv(lp);
 435        fcoe = port->fcoe;
 436
 437        /*
 438         * Determine max frame size based on underlying device and optional
 439         * user-configured limit.  If the MFS is too low, fcoe_link_ok()
 440         * will return 0, so do this first.
 441         */
 442        mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
 443                             sizeof(struct fcoe_crc_eof));
 444        if (fc_set_mfs(lp, mfs))
 445                return -EINVAL;
 446
 447        /* offload features support */
 448        if (netdev->features & NETIF_F_SG)
 449                lp->sg_supp = 1;
 450
 451        if (netdev->features & NETIF_F_FCOE_CRC) {
 452                lp->crc_offload = 1;
 453                FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
 454        }
 455        if (netdev->features & NETIF_F_FSO) {
 456                lp->seq_offload = 1;
 457                lp->lso_max = netdev->gso_max_size;
 458                FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
 459                                lp->lso_max);
 460        }
 461        if (netdev->fcoe_ddp_xid) {
 462                lp->lro_enabled = 1;
 463                lp->lro_xid = netdev->fcoe_ddp_xid;
 464                FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
 465                                lp->lro_xid);
 466        }
 467        skb_queue_head_init(&port->fcoe_pending_queue);
 468        port->fcoe_pending_queue_active = 0;
 469        setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp);
 470
 471        wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0);
 472        fc_set_wwnn(lp, wwnn);
 473        /* XXX - 3rd arg needs to be vlan id */
 474        wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0);
 475        fc_set_wwpn(lp, wwpn);
 476
 477        return 0;
 478}
 479
 480/**
 481 * fcoe_shost_config() - Sets up fc_lport->host
 482 * @lp : ptr to the fc_lport
 483 * @shost : ptr to the associated scsi host
 484 * @dev : device associated to scsi host
 485 *
 486 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
 487 *
 488 * Returns : 0 for success
 489 */
 490static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
 491                                struct device *dev)
 492{
 493        int rc = 0;
 494
 495        /* lport scsi host config */
 496        lp->host = shost;
 497
 498        lp->host->max_lun = FCOE_MAX_LUN;
 499        lp->host->max_id = FCOE_MAX_FCP_TARGET;
 500        lp->host->max_channel = 0;
 501        lp->host->transportt = scsi_transport_fcoe_sw;
 502
 503        /* add the new host to the SCSI-ml */
 504        rc = scsi_add_host(lp->host, dev);
 505        if (rc) {
 506                FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
 507                                "error on scsi_add_host\n");
 508                return rc;
 509        }
 510        sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
 511                FCOE_NAME, FCOE_VERSION,
 512                fcoe_netdev(lp)->name);
 513
 514        return 0;
 515}
 516
 517/*
 518 * fcoe_oem_match() - match for read types IO
 519 * @fp: the fc_frame for new IO.
 520 *
 521 * Returns : true for read types IO, otherwise returns false.
 522 */
 523bool fcoe_oem_match(struct fc_frame *fp)
 524{
 525        return fc_fcp_is_read(fr_fsp(fp)) &&
 526                (fr_fsp(fp)->data_len > fcoe_ddp_min);
 527}
 528
 529/**
 530 * fcoe_em_config() - allocates em for this lport
 531 * @lp: the fcoe that em is to allocated for
 532 *
 533 * Returns : 0 on success
 534 */
 535static inline int fcoe_em_config(struct fc_lport *lp)
 536{
 537        struct fcoe_port *port = lport_priv(lp);
 538        struct fcoe_interface *fcoe = port->fcoe;
 539        struct fcoe_interface *oldfcoe = NULL;
 540        struct net_device *old_real_dev, *cur_real_dev;
 541        u16 min_xid = FCOE_MIN_XID;
 542        u16 max_xid = FCOE_MAX_XID;
 543
 544        /*
 545         * Check if need to allocate an em instance for
 546         * offload exchange ids to be shared across all VN_PORTs/lport.
 547         */
 548        if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
 549                lp->lro_xid = 0;
 550                goto skip_oem;
 551        }
 552
 553        /*
 554         * Reuse existing offload em instance in case
 555         * it is already allocated on real eth device
 556         */
 557        if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
 558                cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
 559        else
 560                cur_real_dev = fcoe->netdev;
 561
 562        list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
 563                if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
 564                        old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
 565                else
 566                        old_real_dev = oldfcoe->netdev;
 567
 568                if (cur_real_dev == old_real_dev) {
 569                        fcoe->oem = oldfcoe->oem;
 570                        break;
 571                }
 572        }
 573
 574        if (fcoe->oem) {
 575                if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) {
 576                        printk(KERN_ERR "fcoe_em_config: failed to add "
 577                               "offload em:%p on interface:%s\n",
 578                               fcoe->oem, fcoe->netdev->name);
 579                        return -ENOMEM;
 580                }
 581        } else {
 582                fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
 583                                            FCOE_MIN_XID, lp->lro_xid,
 584                                            fcoe_oem_match);
 585                if (!fcoe->oem) {
 586                        printk(KERN_ERR "fcoe_em_config: failed to allocate "
 587                               "em for offload exches on interface:%s\n",
 588                               fcoe->netdev->name);
 589                        return -ENOMEM;
 590                }
 591        }
 592
 593        /*
 594         * Exclude offload EM xid range from next EM xid range.
 595         */
 596        min_xid += lp->lro_xid + 1;
 597
 598skip_oem:
 599        if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
 600                printk(KERN_ERR "fcoe_em_config: failed to "
 601                       "allocate em on interface %s\n", fcoe->netdev->name);
 602                return -ENOMEM;
 603        }
 604
 605        return 0;
 606}
 607
 608/**
 609 * fcoe_if_destroy() - FCoE software HBA tear-down function
 610 * @lport: fc_lport to destroy
 611 */
 612static void fcoe_if_destroy(struct fc_lport *lport)
 613{
 614        struct fcoe_port *port = lport_priv(lport);
 615        struct fcoe_interface *fcoe = port->fcoe;
 616        struct net_device *netdev = fcoe->netdev;
 617
 618        FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
 619
 620        /* Logout of the fabric */
 621        fc_fabric_logoff(lport);
 622
 623        /* Cleanup the fc_lport */
 624        fc_lport_destroy(lport);
 625        fc_fcp_destroy(lport);
 626
 627        /* Stop the transmit retry timer */
 628        del_timer_sync(&port->timer);
 629
 630        /* Free existing transmit skbs */
 631        fcoe_clean_pending_queue(lport);
 632
 633        /* receives may not be stopped until after this */
 634        fcoe_interface_put(fcoe);
 635
 636        /* Free queued packets for the per-CPU receive threads */
 637        fcoe_percpu_clean(lport);
 638
 639        /* Detach from the scsi-ml */
 640        fc_remove_host(lport->host);
 641        scsi_remove_host(lport->host);
 642
 643        /* There are no more rports or I/O, free the EM */
 644        fc_exch_mgr_free(lport);
 645
 646        /* Free memory used by statistical counters */
 647        fc_lport_free_stats(lport);
 648
 649        /* Release the Scsi_Host */
 650        scsi_host_put(lport->host);
 651}
 652
 653/*
 654 * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
 655 * @lp: the corresponding fc_lport
 656 * @xid: the exchange id for this ddp transfer
 657 * @sgl: the scatterlist describing this transfer
 658 * @sgc: number of sg items
 659 *
 660 * Returns : 0 no ddp
 661 */
 662static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
 663                             struct scatterlist *sgl, unsigned int sgc)
 664{
 665        struct net_device *n = fcoe_netdev(lp);
 666
 667        if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
 668                return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
 669
 670        return 0;
 671}
 672
 673/*
 674 * fcoe_ddp_done - calls LLD's ddp_done through net_device
 675 * @lp: the corresponding fc_lport
 676 * @xid: the exchange id for this ddp transfer
 677 *
 678 * Returns : the length of data that have been completed by ddp
 679 */
 680static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
 681{
 682        struct net_device *n = fcoe_netdev(lp);
 683
 684        if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
 685                return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
 686        return 0;
 687}
 688
 689static struct libfc_function_template fcoe_libfc_fcn_templ = {
 690        .frame_send = fcoe_xmit,
 691        .ddp_setup = fcoe_ddp_setup,
 692        .ddp_done = fcoe_ddp_done,
 693};
 694
 695/**
 696 * fcoe_if_create() - this function creates the fcoe port
 697 * @fcoe: fcoe_interface structure to create an fc_lport instance on
 698 * @parent: device pointer to be the parent in sysfs for the SCSI host
 699 *
 700 * Creates fc_lport struct and scsi_host for lport, configures lport.
 701 *
 702 * Returns : The allocated fc_lport or an error pointer
 703 */
 704static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
 705                                       struct device *parent)
 706{
 707        int rc;
 708        struct fc_lport *lport = NULL;
 709        struct fcoe_port *port;
 710        struct Scsi_Host *shost;
 711        struct net_device *netdev = fcoe->netdev;
 712
 713        FCOE_NETDEV_DBG(netdev, "Create Interface\n");
 714
 715        shost = libfc_host_alloc(&fcoe_shost_template,
 716                                 sizeof(struct fcoe_port));
 717        if (!shost) {
 718                FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
 719                rc = -ENOMEM;
 720                goto out;
 721        }
 722        lport = shost_priv(shost);
 723        port = lport_priv(lport);
 724        port->lport = lport;
 725        port->fcoe = fcoe;
 726        INIT_WORK(&port->destroy_work, fcoe_destroy_work);
 727
 728        /* configure fc_lport, e.g., em */
 729        rc = fcoe_lport_config(lport);
 730        if (rc) {
 731                FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
 732                                "interface\n");
 733                goto out_host_put;
 734        }
 735
 736        /* configure lport network properties */
 737        rc = fcoe_netdev_config(lport, netdev);
 738        if (rc) {
 739                FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
 740                                "interface\n");
 741                goto out_lp_destroy;
 742        }
 743
 744        /* configure lport scsi host properties */
 745        rc = fcoe_shost_config(lport, shost, parent);
 746        if (rc) {
 747                FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
 748                                "interface\n");
 749                goto out_lp_destroy;
 750        }
 751
 752        /* Initialize the library */
 753        rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
 754        if (rc) {
 755                FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
 756                                "interface\n");
 757                goto out_lp_destroy;
 758        }
 759
 760        /*
 761         * fcoe_em_alloc() and fcoe_hostlist_add() both
 762         * need to be atomic with respect to other changes to the hostlist
 763         * since fcoe_em_alloc() looks for an existing EM
 764         * instance on host list updated by fcoe_hostlist_add().
 765         *
 766         * This is currently handled through the fcoe_config_mutex begin held.
 767         */
 768
 769        /* lport exch manager allocation */
 770        rc = fcoe_em_config(lport);
 771        if (rc) {
 772                FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
 773                                "interface\n");
 774                goto out_lp_destroy;
 775        }
 776
 777        fcoe_interface_get(fcoe);
 778        return lport;
 779
 780out_lp_destroy:
 781        fc_exch_mgr_free(lport);
 782out_host_put:
 783        scsi_host_put(lport->host);
 784out:
 785        return ERR_PTR(rc);
 786}
 787
 788/**
 789 * fcoe_if_init() - attach to scsi transport
 790 *
 791 * Returns : 0 on success
 792 */
 793static int __init fcoe_if_init(void)
 794{
 795        /* attach to scsi transport */
 796        scsi_transport_fcoe_sw =
 797                fc_attach_transport(&fcoe_transport_function);
 798
 799        if (!scsi_transport_fcoe_sw) {
 800                printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
 801                return -ENODEV;
 802        }
 803
 804        return 0;
 805}
 806
 807/**
 808 * fcoe_if_exit() - detach from scsi transport
 809 *
 810 * Returns : 0 on success
 811 */
 812int __exit fcoe_if_exit(void)
 813{
 814        fc_release_transport(scsi_transport_fcoe_sw);
 815        scsi_transport_fcoe_sw = NULL;
 816        return 0;
 817}
 818
 819/**
 820 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
 821 * @cpu: cpu index for the online cpu
 822 */
 823static void fcoe_percpu_thread_create(unsigned int cpu)
 824{
 825        struct fcoe_percpu_s *p;
 826        struct task_struct *thread;
 827
 828        p = &per_cpu(fcoe_percpu, cpu);
 829
 830        thread = kthread_create(fcoe_percpu_receive_thread,
 831                                (void *)p, "fcoethread/%d", cpu);
 832
 833        if (likely(!IS_ERR(thread))) {
 834                kthread_bind(thread, cpu);
 835                wake_up_process(thread);
 836
 837                spin_lock_bh(&p->fcoe_rx_list.lock);
 838                p->thread = thread;
 839                spin_unlock_bh(&p->fcoe_rx_list.lock);
 840        }
 841}
 842
 843/**
 844 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
 845 * @cpu: cpu index the rx thread is to be removed
 846 *
 847 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
 848 * current CPU's Rx thread. If the thread being destroyed is bound to
 849 * the CPU processing this context the skbs will be freed.
 850 */
 851static void fcoe_percpu_thread_destroy(unsigned int cpu)
 852{
 853        struct fcoe_percpu_s *p;
 854        struct task_struct *thread;
 855        struct page *crc_eof;
 856        struct sk_buff *skb;
 857#ifdef CONFIG_SMP
 858        struct fcoe_percpu_s *p0;
 859        unsigned targ_cpu = smp_processor_id();
 860#endif /* CONFIG_SMP */
 861
 862        FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
 863
 864        /* Prevent any new skbs from being queued for this CPU. */
 865        p = &per_cpu(fcoe_percpu, cpu);
 866        spin_lock_bh(&p->fcoe_rx_list.lock);
 867        thread = p->thread;
 868        p->thread = NULL;
 869        crc_eof = p->crc_eof_page;
 870        p->crc_eof_page = NULL;
 871        p->crc_eof_offset = 0;
 872        spin_unlock_bh(&p->fcoe_rx_list.lock);
 873
 874#ifdef CONFIG_SMP
 875        /*
 876         * Don't bother moving the skb's if this context is running
 877         * on the same CPU that is having its thread destroyed. This
 878         * can easily happen when the module is removed.
 879         */
 880        if (cpu != targ_cpu) {
 881                p0 = &per_cpu(fcoe_percpu, targ_cpu);
 882                spin_lock_bh(&p0->fcoe_rx_list.lock);
 883                if (p0->thread) {
 884                        FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
 885                                 cpu, targ_cpu);
 886
 887                        while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
 888                                __skb_queue_tail(&p0->fcoe_rx_list, skb);
 889                        spin_unlock_bh(&p0->fcoe_rx_list.lock);
 890                } else {
 891                        /*
 892                         * The targeted CPU is not initialized and cannot accept
 893                         * new  skbs. Unlock the targeted CPU and drop the skbs
 894                         * on the CPU that is going offline.
 895                         */
 896                        while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
 897                                kfree_skb(skb);
 898                        spin_unlock_bh(&p0->fcoe_rx_list.lock);
 899                }
 900        } else {
 901                /*
 902                 * This scenario occurs when the module is being removed
 903                 * and all threads are being destroyed. skbs will continue
 904                 * to be shifted from the CPU thread that is being removed
 905                 * to the CPU thread associated with the CPU that is processing
 906                 * the module removal. Once there is only one CPU Rx thread it
 907                 * will reach this case and we will drop all skbs and later
 908                 * stop the thread.
 909                 */
 910                spin_lock_bh(&p->fcoe_rx_list.lock);
 911                while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
 912                        kfree_skb(skb);
 913                spin_unlock_bh(&p->fcoe_rx_list.lock);
 914        }
 915#else
 916        /*
 917         * This a non-SMP scenario where the singular Rx thread is
 918         * being removed. Free all skbs and stop the thread.
 919         */
 920        spin_lock_bh(&p->fcoe_rx_list.lock);
 921        while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
 922                kfree_skb(skb);
 923        spin_unlock_bh(&p->fcoe_rx_list.lock);
 924#endif
 925
 926        if (thread)
 927                kthread_stop(thread);
 928
 929        if (crc_eof)
 930                put_page(crc_eof);
 931}
 932
 933/**
 934 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
 935 * @nfb: callback data block
 936 * @action: event triggering the callback
 937 * @hcpu: index for the cpu of this event
 938 *
 939 * This creates or destroys per cpu data for fcoe
 940 *
 941 * Returns NOTIFY_OK always.
 942 */
 943static int fcoe_cpu_callback(struct notifier_block *nfb,
 944                             unsigned long action, void *hcpu)
 945{
 946        unsigned cpu = (unsigned long)hcpu;
 947
 948        switch (action) {
 949        case CPU_ONLINE:
 950        case CPU_ONLINE_FROZEN:
 951                FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
 952                fcoe_percpu_thread_create(cpu);
 953                break;
 954        case CPU_DEAD:
 955        case CPU_DEAD_FROZEN:
 956                FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
 957                fcoe_percpu_thread_destroy(cpu);
 958                break;
 959        default:
 960                break;
 961        }
 962        return NOTIFY_OK;
 963}
 964
 965static struct notifier_block fcoe_cpu_notifier = {
 966        .notifier_call = fcoe_cpu_callback,
 967};
 968
 969/**
 970 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
 971 * @skb: the receive skb
 972 * @dev: associated net device
 973 * @ptype: context
 974 * @olddev: last device
 975 *
 976 * this function will receive the packet and build fc frame and pass it up
 977 *
 978 * Returns: 0 for success
 979 */
 980int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
 981             struct packet_type *ptype, struct net_device *olddev)
 982{
 983        struct fc_lport *lp;
 984        struct fcoe_rcv_info *fr;
 985        struct fcoe_interface *fcoe;
 986        struct fc_frame_header *fh;
 987        struct fcoe_percpu_s *fps;
 988        unsigned int cpu;
 989
 990        fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
 991        lp = fcoe->ctlr.lp;
 992        if (unlikely(lp == NULL)) {
 993                FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
 994                goto err2;
 995        }
 996        if (!lp->link_up)
 997                goto err2;
 998
 999        FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
1000                        "data:%p tail:%p end:%p sum:%d dev:%s",
1001                        skb->len, skb->data_len, skb->head, skb->data,
1002                        skb_tail_pointer(skb), skb_end_pointer(skb),
1003                        skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1004
1005        /* check for FCOE packet type */
1006        if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
1007                FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
1008                goto err;
1009        }
1010
1011        /*
1012         * Check for minimum frame length, and make sure required FCoE
1013         * and FC headers are pulled into the linear data area.
1014         */
1015        if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1016            !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1017                goto err;
1018
1019        skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1020        fh = (struct fc_frame_header *) skb_transport_header(skb);
1021
1022        fr = fcoe_dev_from_skb(skb);
1023        fr->fr_dev = lp;
1024        fr->ptype = ptype;
1025
1026        /*
1027         * In case the incoming frame's exchange is originated from
1028         * the initiator, then received frame's exchange id is ANDed
1029         * with fc_cpu_mask bits to get the same cpu on which exchange
1030         * was originated, otherwise just use the current cpu.
1031         */
1032        if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1033                cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1034        else
1035                cpu = smp_processor_id();
1036
1037        fps = &per_cpu(fcoe_percpu, cpu);
1038        spin_lock_bh(&fps->fcoe_rx_list.lock);
1039        if (unlikely(!fps->thread)) {
1040                /*
1041                 * The targeted CPU is not ready, let's target
1042                 * the first CPU now. For non-SMP systems this
1043                 * will check the same CPU twice.
1044                 */
1045                FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
1046                                "ready for incoming skb- using first online "
1047                                "CPU.\n");
1048
1049                spin_unlock_bh(&fps->fcoe_rx_list.lock);
1050                cpu = first_cpu(cpu_online_map);
1051                fps = &per_cpu(fcoe_percpu, cpu);
1052                spin_lock_bh(&fps->fcoe_rx_list.lock);
1053                if (!fps->thread) {
1054                        spin_unlock_bh(&fps->fcoe_rx_list.lock);
1055                        goto err;
1056                }
1057        }
1058
1059        /*
1060         * We now have a valid CPU that we're targeting for
1061         * this skb. We also have this receive thread locked,
1062         * so we're free to queue skbs into it's queue.
1063         */
1064        __skb_queue_tail(&fps->fcoe_rx_list, skb);
1065        if (fps->fcoe_rx_list.qlen == 1)
1066                wake_up_process(fps->thread);
1067
1068        spin_unlock_bh(&fps->fcoe_rx_list.lock);
1069
1070        return 0;
1071err:
1072        fc_lport_get_stats(lp)->ErrorFrames++;
1073
1074err2:
1075        kfree_skb(skb);
1076        return -1;
1077}
1078
1079/**
1080 * fcoe_start_io() - pass to netdev to start xmit for fcoe
1081 * @skb: the skb to be xmitted
1082 *
1083 * Returns: 0 for success
1084 */
1085static inline int fcoe_start_io(struct sk_buff *skb)
1086{
1087        int rc;
1088
1089        skb_get(skb);
1090        rc = dev_queue_xmit(skb);
1091        if (rc != 0)
1092                return rc;
1093        kfree_skb(skb);
1094        return 0;
1095}
1096
1097/**
1098 * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
1099 * @skb: the skb to be xmitted
1100 * @tlen: total len
1101 *
1102 * Returns: 0 for success
1103 */
1104static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1105{
1106        struct fcoe_percpu_s *fps;
1107        struct page *page;
1108
1109        fps = &get_cpu_var(fcoe_percpu);
1110        page = fps->crc_eof_page;
1111        if (!page) {
1112                page = alloc_page(GFP_ATOMIC);
1113                if (!page) {
1114                        put_cpu_var(fcoe_percpu);
1115                        return -ENOMEM;
1116                }
1117                fps->crc_eof_page = page;
1118                fps->crc_eof_offset = 0;
1119        }
1120
1121        get_page(page);
1122        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
1123                           fps->crc_eof_offset, tlen);
1124        skb->len += tlen;
1125        skb->data_len += tlen;
1126        skb->truesize += tlen;
1127        fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
1128
1129        if (fps->crc_eof_offset >= PAGE_SIZE) {
1130                fps->crc_eof_page = NULL;
1131                fps->crc_eof_offset = 0;
1132                put_page(page);
1133        }
1134        put_cpu_var(fcoe_percpu);
1135        return 0;
1136}
1137
1138/**
1139 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
1140 * @fp: the fc_frame containing data to be checksummed
1141 *
1142 * This uses crc32() to calculate the crc for port frame
1143 * Return   : 32 bit crc
1144 */
1145u32 fcoe_fc_crc(struct fc_frame *fp)
1146{
1147        struct sk_buff *skb = fp_skb(fp);
1148        struct skb_frag_struct *frag;
1149        unsigned char *data;
1150        unsigned long off, len, clen;
1151        u32 crc;
1152        unsigned i;
1153
1154        crc = crc32(~0, skb->data, skb_headlen(skb));
1155
1156        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1157                frag = &skb_shinfo(skb)->frags[i];
1158                off = frag->page_offset;
1159                len = frag->size;
1160                while (len > 0) {
1161                        clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1162                        data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1163                                           KM_SKB_DATA_SOFTIRQ);
1164                        crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
1165                        kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1166                        off += clen;
1167                        len -= clen;
1168                }
1169        }
1170        return crc;
1171}
1172
1173/**
1174 * fcoe_xmit() - FCoE frame transmit function
1175 * @lp: the associated local fcoe
1176 * @fp: the fc_frame to be transmitted
1177 *
1178 * Return   : 0 for success
1179 */
1180int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1181{
1182        int wlen;
1183        u32 crc;
1184        struct ethhdr *eh;
1185        struct fcoe_crc_eof *cp;
1186        struct sk_buff *skb;
1187        struct fcoe_dev_stats *stats;
1188        struct fc_frame_header *fh;
1189        unsigned int hlen;              /* header length implies the version */
1190        unsigned int tlen;              /* trailer length */
1191        unsigned int elen;              /* eth header, may include vlan */
1192        struct fcoe_port *port = lport_priv(lp);
1193        struct fcoe_interface *fcoe = port->fcoe;
1194        u8 sof, eof;
1195        struct fcoe_hdr *hp;
1196
1197        WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1198
1199        fh = fc_frame_header_get(fp);
1200        skb = fp_skb(fp);
1201        wlen = skb->len / FCOE_WORD_TO_BYTE;
1202
1203        if (!lp->link_up) {
1204                kfree_skb(skb);
1205                return 0;
1206        }
1207
1208        if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1209            fcoe_ctlr_els_send(&fcoe->ctlr, skb))
1210                return 0;
1211
1212        sof = fr_sof(fp);
1213        eof = fr_eof(fp);
1214
1215        elen = sizeof(struct ethhdr);
1216        hlen = sizeof(struct fcoe_hdr);
1217        tlen = sizeof(struct fcoe_crc_eof);
1218        wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1219
1220        /* crc offload */
1221        if (likely(lp->crc_offload)) {
1222                skb->ip_summed = CHECKSUM_PARTIAL;
1223                skb->csum_start = skb_headroom(skb);
1224                skb->csum_offset = skb->len;
1225                crc = 0;
1226        } else {
1227                skb->ip_summed = CHECKSUM_NONE;
1228                crc = fcoe_fc_crc(fp);
1229        }
1230
1231        /* copy port crc and eof to the skb buff */
1232        if (skb_is_nonlinear(skb)) {
1233                skb_frag_t *frag;
1234                if (fcoe_get_paged_crc_eof(skb, tlen)) {
1235                        kfree_skb(skb);
1236                        return -ENOMEM;
1237                }
1238                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1239                cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1240                        + frag->page_offset;
1241        } else {
1242                cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1243        }
1244
1245        memset(cp, 0, sizeof(*cp));
1246        cp->fcoe_eof = eof;
1247        cp->fcoe_crc32 = cpu_to_le32(~crc);
1248
1249        if (skb_is_nonlinear(skb)) {
1250                kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1251                cp = NULL;
1252        }
1253
1254        /* adjust skb network/transport offsets to match mac/fcoe/port */
1255        skb_push(skb, elen + hlen);
1256        skb_reset_mac_header(skb);
1257        skb_reset_network_header(skb);
1258        skb->mac_len = elen;
1259        skb->protocol = htons(ETH_P_FCOE);
1260        skb->dev = fcoe->netdev;
1261
1262        /* fill up mac and fcoe headers */
1263        eh = eth_hdr(skb);
1264        eh->h_proto = htons(ETH_P_FCOE);
1265        if (fcoe->ctlr.map_dest)
1266                fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1267        else
1268                /* insert GW address */
1269                memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1270
1271        if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1272                memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1273        else
1274                memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN);
1275
1276        hp = (struct fcoe_hdr *)(eh + 1);
1277        memset(hp, 0, sizeof(*hp));
1278        if (FC_FCOE_VER)
1279                FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1280        hp->fcoe_sof = sof;
1281
1282        /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1283        if (lp->seq_offload && fr_max_payload(fp)) {
1284                skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1285                skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1286        } else {
1287                skb_shinfo(skb)->gso_type = 0;
1288                skb_shinfo(skb)->gso_size = 0;
1289        }
1290        /* update tx stats: regardless if LLD fails */
1291        stats = fc_lport_get_stats(lp);
1292        stats->TxFrames++;
1293        stats->TxWords += wlen;
1294
1295        /* send down to lld */
1296        fr_dev(fp) = lp;
1297        if (port->fcoe_pending_queue.qlen)
1298                fcoe_check_wait_queue(lp, skb);
1299        else if (fcoe_start_io(skb))
1300                fcoe_check_wait_queue(lp, skb);
1301
1302        return 0;
1303}
1304
1305/**
1306 * fcoe_percpu_flush_done() - Indicate percpu queue flush completion.
1307 * @skb: the skb being completed.
1308 */
1309static void fcoe_percpu_flush_done(struct sk_buff *skb)
1310{
1311        complete(&fcoe_flush_completion);
1312}
1313
1314/**
1315 * fcoe_percpu_receive_thread() - recv thread per cpu
1316 * @arg: ptr to the fcoe per cpu struct
1317 *
1318 * Return: 0 for success
1319 */
1320int fcoe_percpu_receive_thread(void *arg)
1321{
1322        struct fcoe_percpu_s *p = arg;
1323        u32 fr_len;
1324        struct fc_lport *lp;
1325        struct fcoe_rcv_info *fr;
1326        struct fcoe_dev_stats *stats;
1327        struct fc_frame_header *fh;
1328        struct sk_buff *skb;
1329        struct fcoe_crc_eof crc_eof;
1330        struct fc_frame *fp;
1331        u8 *mac = NULL;
1332        struct fcoe_port *port;
1333        struct fcoe_hdr *hp;
1334
1335        set_user_nice(current, -20);
1336
1337        while (!kthread_should_stop()) {
1338
1339                spin_lock_bh(&p->fcoe_rx_list.lock);
1340                while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1341                        set_current_state(TASK_INTERRUPTIBLE);
1342                        spin_unlock_bh(&p->fcoe_rx_list.lock);
1343                        schedule();
1344                        set_current_state(TASK_RUNNING);
1345                        if (kthread_should_stop())
1346                                return 0;
1347                        spin_lock_bh(&p->fcoe_rx_list.lock);
1348                }
1349                spin_unlock_bh(&p->fcoe_rx_list.lock);
1350                fr = fcoe_dev_from_skb(skb);
1351                lp = fr->fr_dev;
1352                if (unlikely(lp == NULL)) {
1353                        if (skb->destructor != fcoe_percpu_flush_done)
1354                                FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1355                        kfree_skb(skb);
1356                        continue;
1357                }
1358
1359                FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1360                                "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1361                                skb->len, skb->data_len,
1362                                skb->head, skb->data, skb_tail_pointer(skb),
1363                                skb_end_pointer(skb), skb->csum,
1364                                skb->dev ? skb->dev->name : "<NULL>");
1365
1366                /*
1367                 * Save source MAC address before discarding header.
1368                 */
1369                port = lport_priv(lp);
1370                if (skb_is_nonlinear(skb))
1371                        skb_linearize(skb);     /* not ideal */
1372                mac = eth_hdr(skb)->h_source;
1373
1374                /*
1375                 * Frame length checks and setting up the header pointers
1376                 * was done in fcoe_rcv already.
1377                 */
1378                hp = (struct fcoe_hdr *) skb_network_header(skb);
1379                fh = (struct fc_frame_header *) skb_transport_header(skb);
1380
1381                stats = fc_lport_get_stats(lp);
1382                if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1383                        if (stats->ErrorFrames < 5)
1384                                printk(KERN_WARNING "fcoe: FCoE version "
1385                                       "mismatch: The frame has "
1386                                       "version %x, but the "
1387                                       "initiator supports version "
1388                                       "%x\n", FC_FCOE_DECAPS_VER(hp),
1389                                       FC_FCOE_VER);
1390                        stats->ErrorFrames++;
1391                        kfree_skb(skb);
1392                        continue;
1393                }
1394
1395                skb_pull(skb, sizeof(struct fcoe_hdr));
1396                fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1397
1398                stats->RxFrames++;
1399                stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1400
1401                fp = (struct fc_frame *)skb;
1402                fc_frame_init(fp);
1403                fr_dev(fp) = lp;
1404                fr_sof(fp) = hp->fcoe_sof;
1405
1406                /* Copy out the CRC and EOF trailer for access */
1407                if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1408                        kfree_skb(skb);
1409                        continue;
1410                }
1411                fr_eof(fp) = crc_eof.fcoe_eof;
1412                fr_crc(fp) = crc_eof.fcoe_crc32;
1413                if (pskb_trim(skb, fr_len)) {
1414                        kfree_skb(skb);
1415                        continue;
1416                }
1417
1418                /*
1419                 * We only check CRC if no offload is available and if it is
1420                 * it's solicited data, in which case, the FCP layer would
1421                 * check it during the copy.
1422                 */
1423                if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1424                        fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1425                else
1426                        fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1427
1428                fh = fc_frame_header_get(fp);
1429                if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1430                    fh->fh_type == FC_TYPE_FCP) {
1431                        fc_exch_recv(lp, fp);
1432                        continue;
1433                }
1434                if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1435                        if (le32_to_cpu(fr_crc(fp)) !=
1436                            ~crc32(~0, skb->data, fr_len)) {
1437                                if (stats->InvalidCRCCount < 5)
1438                                        printk(KERN_WARNING "fcoe: dropping "
1439                                               "frame with CRC error\n");
1440                                stats->InvalidCRCCount++;
1441                                stats->ErrorFrames++;
1442                                fc_frame_free(fp);
1443                                continue;
1444                        }
1445                        fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1446                }
1447                if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1448                    fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) {
1449                        fc_frame_free(fp);
1450                        continue;
1451                }
1452                fc_exch_recv(lp, fp);
1453        }
1454        return 0;
1455}
1456
1457/**
1458 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1459 * @lp: the fc_lport
1460 *
1461 * This empties the wait_queue, dequeue the head of the wait_queue queue
1462 * and calls fcoe_start_io() for each packet, if all skb have been
1463 * transmitted, return qlen or -1 if a error occurs, then restore
1464 * wait_queue and try again later.
1465 *
1466 * The wait_queue is used when the skb transmit fails. skb will go
1467 * in the wait_queue which will be emptied by the timer function or
1468 * by the next skb transmit.
1469 */
1470static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1471{
1472        struct fcoe_port *port = lport_priv(lp);
1473        int rc;
1474
1475        spin_lock_bh(&port->fcoe_pending_queue.lock);
1476
1477        if (skb)
1478                __skb_queue_tail(&port->fcoe_pending_queue, skb);
1479
1480        if (port->fcoe_pending_queue_active)
1481                goto out;
1482        port->fcoe_pending_queue_active = 1;
1483
1484        while (port->fcoe_pending_queue.qlen) {
1485                /* keep qlen > 0 until fcoe_start_io succeeds */
1486                port->fcoe_pending_queue.qlen++;
1487                skb = __skb_dequeue(&port->fcoe_pending_queue);
1488
1489                spin_unlock_bh(&port->fcoe_pending_queue.lock);
1490                rc = fcoe_start_io(skb);
1491                spin_lock_bh(&port->fcoe_pending_queue.lock);
1492
1493                if (rc) {
1494                        __skb_queue_head(&port->fcoe_pending_queue, skb);
1495                        /* undo temporary increment above */
1496                        port->fcoe_pending_queue.qlen--;
1497                        break;
1498                }
1499                /* undo temporary increment above */
1500                port->fcoe_pending_queue.qlen--;
1501        }
1502
1503        if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1504                lp->qfull = 0;
1505        if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
1506                mod_timer(&port->timer, jiffies + 2);
1507        port->fcoe_pending_queue_active = 0;
1508out:
1509        if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1510                lp->qfull = 1;
1511        spin_unlock_bh(&port->fcoe_pending_queue.lock);
1512        return;
1513}
1514
1515/**
1516 * fcoe_dev_setup() - setup link change notification interface
1517 */
1518static void fcoe_dev_setup(void)
1519{
1520        register_netdevice_notifier(&fcoe_notifier);
1521}
1522
1523/**
1524 * fcoe_dev_cleanup() - cleanup link change notification interface
1525 */
1526static void fcoe_dev_cleanup(void)
1527{
1528        unregister_netdevice_notifier(&fcoe_notifier);
1529}
1530
1531/**
1532 * fcoe_device_notification() - netdev event notification callback
1533 * @notifier: context of the notification
1534 * @event: type of event
1535 * @ptr: fixed array for output parsed ifname
1536 *
1537 * This function is called by the ethernet driver in case of link change event
1538 *
1539 * Returns: 0 for success
1540 */
1541static int fcoe_device_notification(struct notifier_block *notifier,
1542                                    ulong event, void *ptr)
1543{
1544        struct fc_lport *lp = NULL;
1545        struct net_device *netdev = ptr;
1546        struct fcoe_interface *fcoe;
1547        struct fcoe_port *port;
1548        struct fcoe_dev_stats *stats;
1549        u32 link_possible = 1;
1550        u32 mfs;
1551        int rc = NOTIFY_OK;
1552
1553        list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1554                if (fcoe->netdev == netdev) {
1555                        lp = fcoe->ctlr.lp;
1556                        break;
1557                }
1558        }
1559        if (lp == NULL) {
1560                rc = NOTIFY_DONE;
1561                goto out;
1562        }
1563
1564        switch (event) {
1565        case NETDEV_DOWN:
1566        case NETDEV_GOING_DOWN:
1567                link_possible = 0;
1568                break;
1569        case NETDEV_UP:
1570        case NETDEV_CHANGE:
1571                break;
1572        case NETDEV_CHANGEMTU:
1573                mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1574                                     sizeof(struct fcoe_crc_eof));
1575                if (mfs >= FC_MIN_MAX_FRAME)
1576                        fc_set_mfs(lp, mfs);
1577                break;
1578        case NETDEV_REGISTER:
1579                break;
1580        case NETDEV_UNREGISTER:
1581                list_del(&fcoe->list);
1582                port = lport_priv(fcoe->ctlr.lp);
1583                fcoe_interface_cleanup(fcoe);
1584                schedule_work(&port->destroy_work);
1585                goto out;
1586                break;
1587        default:
1588                FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1589                                "from netdev netlink\n", event);
1590        }
1591        if (link_possible && !fcoe_link_ok(lp))
1592                fcoe_ctlr_link_up(&fcoe->ctlr);
1593        else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1594                stats = fc_lport_get_stats(lp);
1595                stats->LinkFailureCount++;
1596                fcoe_clean_pending_queue(lp);
1597        }
1598out:
1599        return rc;
1600}
1601
1602/**
1603 * fcoe_if_to_netdev() - parse a name buffer to get netdev
1604 * @buffer: incoming buffer to be copied
1605 *
1606 * Returns: NULL or ptr to net_device
1607 */
1608static struct net_device *fcoe_if_to_netdev(const char *buffer)
1609{
1610        char *cp;
1611        char ifname[IFNAMSIZ + 2];
1612
1613        if (buffer) {
1614                strlcpy(ifname, buffer, IFNAMSIZ);
1615                cp = ifname + strlen(ifname);
1616                while (--cp >= ifname && *cp == '\n')
1617                        *cp = '\0';
1618                return dev_get_by_name(&init_net, ifname);
1619        }
1620        return NULL;
1621}
1622
1623/**
1624 * fcoe_destroy() - handles the destroy from sysfs
1625 * @buffer: expected to be an eth if name
1626 * @kp: associated kernel param
1627 *
1628 * Returns: 0 for success
1629 */
1630static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1631{
1632        struct fcoe_interface *fcoe;
1633        struct net_device *netdev;
1634        int rc;
1635
1636        mutex_lock(&fcoe_config_mutex);
1637#ifdef CONFIG_FCOE_MODULE
1638        /*
1639         * Make sure the module has been initialized, and is not about to be
1640         * removed.  Module paramter sysfs files are writable before the
1641         * module_init function is called and after module_exit.
1642         */
1643        if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1644                rc = -ENODEV;
1645                goto out_nodev;
1646        }
1647#endif
1648
1649        netdev = fcoe_if_to_netdev(buffer);
1650        if (!netdev) {
1651                rc = -ENODEV;
1652                goto out_nodev;
1653        }
1654
1655        rtnl_lock();
1656        fcoe = fcoe_hostlist_lookup_port(netdev);
1657        if (!fcoe) {
1658                rtnl_unlock();
1659                rc = -ENODEV;
1660                goto out_putdev;
1661        }
1662        list_del(&fcoe->list);
1663        fcoe_interface_cleanup(fcoe);
1664        rtnl_unlock();
1665        fcoe_if_destroy(fcoe->ctlr.lp);
1666out_putdev:
1667        dev_put(netdev);
1668out_nodev:
1669        mutex_unlock(&fcoe_config_mutex);
1670        return rc;
1671}
1672
1673static void fcoe_destroy_work(struct work_struct *work)
1674{
1675        struct fcoe_port *port;
1676
1677        port = container_of(work, struct fcoe_port, destroy_work);
1678        mutex_lock(&fcoe_config_mutex);
1679        fcoe_if_destroy(port->lport);
1680        mutex_unlock(&fcoe_config_mutex);
1681}
1682
1683/**
1684 * fcoe_create() - Handles the create call from sysfs
1685 * @buffer: expected to be an eth if name
1686 * @kp: associated kernel param
1687 *
1688 * Returns: 0 for success
1689 */
1690static int fcoe_create(const char *buffer, struct kernel_param *kp)
1691{
1692        int rc;
1693        struct fcoe_interface *fcoe;
1694        struct fc_lport *lport;
1695        struct net_device *netdev;
1696
1697        mutex_lock(&fcoe_config_mutex);
1698#ifdef CONFIG_FCOE_MODULE
1699        /*
1700         * Make sure the module has been initialized, and is not about to be
1701         * removed.  Module paramter sysfs files are writable before the
1702         * module_init function is called and after module_exit.
1703         */
1704        if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1705                rc = -ENODEV;
1706                goto out_nodev;
1707        }
1708#endif
1709
1710        rtnl_lock();
1711        netdev = fcoe_if_to_netdev(buffer);
1712        if (!netdev) {
1713                rc = -ENODEV;
1714                goto out_nodev;
1715        }
1716
1717        /* look for existing lport */
1718        if (fcoe_hostlist_lookup(netdev)) {
1719                rc = -EEXIST;
1720                goto out_putdev;
1721        }
1722
1723        fcoe = fcoe_interface_create(netdev);
1724        if (!fcoe) {
1725                rc = -ENOMEM;
1726                goto out_putdev;
1727        }
1728
1729        lport = fcoe_if_create(fcoe, &netdev->dev);
1730        if (IS_ERR(lport)) {
1731                printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
1732                       netdev->name);
1733                rc = -EIO;
1734                fcoe_interface_cleanup(fcoe);
1735                goto out_free;
1736        }
1737
1738        /* Make this the "master" N_Port */
1739        fcoe->ctlr.lp = lport;
1740
1741        /* add to lports list */
1742        fcoe_hostlist_add(lport);
1743
1744        /* start FIP Discovery and FLOGI */
1745        lport->boot_time = jiffies;
1746        fc_fabric_login(lport);
1747        if (!fcoe_link_ok(lport))
1748                fcoe_ctlr_link_up(&fcoe->ctlr);
1749
1750        rc = 0;
1751out_free:
1752        /*
1753         * Release from init in fcoe_interface_create(), on success lport
1754         * should be holding a reference taken in fcoe_if_create().
1755         */
1756        fcoe_interface_put(fcoe);
1757out_putdev:
1758        dev_put(netdev);
1759out_nodev:
1760        rtnl_unlock();
1761        mutex_unlock(&fcoe_config_mutex);
1762        return rc;
1763}
1764
1765module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1766__MODULE_PARM_TYPE(create, "string");
1767MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
1768module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1769__MODULE_PARM_TYPE(destroy, "string");
1770MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
1771
1772/**
1773 * fcoe_link_ok() - Check if link is ok for the fc_lport
1774 * @lp: ptr to the fc_lport
1775 *
1776 * Any permanently-disqualifying conditions have been previously checked.
1777 * This also updates the speed setting, which may change with link for 100/1000.
1778 *
1779 * This function should probably be checking for PAUSE support at some point
1780 * in the future. Currently Per-priority-pause is not determinable using
1781 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1782 *
1783 * Returns: 0 if link is OK for use by FCoE.
1784 *
1785 */
1786int fcoe_link_ok(struct fc_lport *lp)
1787{
1788        struct fcoe_port *port = lport_priv(lp);
1789        struct net_device *dev = port->fcoe->netdev;
1790        struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1791
1792        if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
1793            (!dev_ethtool_get_settings(dev, &ecmd))) {
1794                lp->link_supported_speeds &=
1795                        ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1796                if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1797                                      SUPPORTED_1000baseT_Full))
1798                        lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1799                if (ecmd.supported & SUPPORTED_10000baseT_Full)
1800                        lp->link_supported_speeds |=
1801                                FC_PORTSPEED_10GBIT;
1802                if (ecmd.speed == SPEED_1000)
1803                        lp->link_speed = FC_PORTSPEED_1GBIT;
1804                if (ecmd.speed == SPEED_10000)
1805                        lp->link_speed = FC_PORTSPEED_10GBIT;
1806
1807                return 0;
1808        }
1809        return -1;
1810}
1811
1812/**
1813 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1814 * @lp: the fc_lport
1815 *
1816 * Must be called with fcoe_create_mutex held to single-thread completion.
1817 *
1818 * This flushes the pending skbs by adding a new skb to each queue and
1819 * waiting until they are all freed.  This assures us that not only are
1820 * there no packets that will be handled by the lport, but also that any
1821 * threads already handling packet have returned.
1822 */
1823void fcoe_percpu_clean(struct fc_lport *lp)
1824{
1825        struct fcoe_percpu_s *pp;
1826        struct fcoe_rcv_info *fr;
1827        struct sk_buff_head *list;
1828        struct sk_buff *skb, *next;
1829        struct sk_buff *head;
1830        unsigned int cpu;
1831
1832        for_each_possible_cpu(cpu) {
1833                pp = &per_cpu(fcoe_percpu, cpu);
1834                spin_lock_bh(&pp->fcoe_rx_list.lock);
1835                list = &pp->fcoe_rx_list;
1836                head = list->next;
1837                for (skb = head; skb != (struct sk_buff *)list;
1838                     skb = next) {
1839                        next = skb->next;
1840                        fr = fcoe_dev_from_skb(skb);
1841                        if (fr->fr_dev == lp) {
1842                                __skb_unlink(skb, list);
1843                                kfree_skb(skb);
1844                        }
1845                }
1846
1847                if (!pp->thread || !cpu_online(cpu)) {
1848                        spin_unlock_bh(&pp->fcoe_rx_list.lock);
1849                        continue;
1850                }
1851
1852                skb = dev_alloc_skb(0);
1853                if (!skb) {
1854                        spin_unlock_bh(&pp->fcoe_rx_list.lock);
1855                        continue;
1856                }
1857                skb->destructor = fcoe_percpu_flush_done;
1858
1859                __skb_queue_tail(&pp->fcoe_rx_list, skb);
1860                if (pp->fcoe_rx_list.qlen == 1)
1861                        wake_up_process(pp->thread);
1862                spin_unlock_bh(&pp->fcoe_rx_list.lock);
1863
1864                wait_for_completion(&fcoe_flush_completion);
1865        }
1866}
1867
1868/**
1869 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1870 * @lp: the corresponding fc_lport
1871 *
1872 * Returns: none
1873 */
1874void fcoe_clean_pending_queue(struct fc_lport *lp)
1875{
1876        struct fcoe_port  *port = lport_priv(lp);
1877        struct sk_buff *skb;
1878
1879        spin_lock_bh(&port->fcoe_pending_queue.lock);
1880        while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
1881                spin_unlock_bh(&port->fcoe_pending_queue.lock);
1882                kfree_skb(skb);
1883                spin_lock_bh(&port->fcoe_pending_queue.lock);
1884        }
1885        spin_unlock_bh(&port->fcoe_pending_queue.lock);
1886}
1887
1888/**
1889 * fcoe_reset() - Resets the fcoe
1890 * @shost: shost the reset is from
1891 *
1892 * Returns: always 0
1893 */
1894int fcoe_reset(struct Scsi_Host *shost)
1895{
1896        struct fc_lport *lport = shost_priv(shost);
1897        fc_lport_reset(lport);
1898        return 0;
1899}
1900
1901/**
1902 * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
1903 * @dev: this is currently ptr to net_device
1904 *
1905 * Returns: NULL or the located fcoe_port
1906 * Locking: must be called with the RNL mutex held
1907 */
1908static struct fcoe_interface *
1909fcoe_hostlist_lookup_port(const struct net_device *dev)
1910{
1911        struct fcoe_interface *fcoe;
1912
1913        list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1914                if (fcoe->netdev == dev)
1915                        return fcoe;
1916        }
1917        return NULL;
1918}
1919
1920/**
1921 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1922 * @netdev: ptr to net_device
1923 *
1924 * Returns: 0 for success
1925 * Locking: must be called with the RTNL mutex held
1926 */
1927static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1928{
1929        struct fcoe_interface *fcoe;
1930
1931        fcoe = fcoe_hostlist_lookup_port(netdev);
1932        return (fcoe) ? fcoe->ctlr.lp : NULL;
1933}
1934
1935/**
1936 * fcoe_hostlist_add() - Add a lport to lports list
1937 * @lp: ptr to the fc_lport to be added
1938 *
1939 * Returns: 0 for success
1940 * Locking: must be called with the RTNL mutex held
1941 */
1942static int fcoe_hostlist_add(const struct fc_lport *lport)
1943{
1944        struct fcoe_interface *fcoe;
1945        struct fcoe_port *port;
1946
1947        fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
1948        if (!fcoe) {
1949                port = lport_priv(lport);
1950                fcoe = port->fcoe;
1951                list_add_tail(&fcoe->list, &fcoe_hostlist);
1952        }
1953        return 0;
1954}
1955
1956/**
1957 * fcoe_init() - fcoe module loading initialization
1958 *
1959 * Returns 0 on success, negative on failure
1960 */
1961static int __init fcoe_init(void)
1962{
1963        unsigned int cpu;
1964        int rc = 0;
1965        struct fcoe_percpu_s *p;
1966
1967        mutex_lock(&fcoe_config_mutex);
1968
1969        for_each_possible_cpu(cpu) {
1970                p = &per_cpu(fcoe_percpu, cpu);
1971                skb_queue_head_init(&p->fcoe_rx_list);
1972        }
1973
1974        for_each_online_cpu(cpu)
1975                fcoe_percpu_thread_create(cpu);
1976
1977        /* Initialize per CPU interrupt thread */
1978        rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1979        if (rc)
1980                goto out_free;
1981
1982        /* Setup link change notification */
1983        fcoe_dev_setup();
1984
1985        rc = fcoe_if_init();
1986        if (rc)
1987                goto out_free;
1988
1989        mutex_unlock(&fcoe_config_mutex);
1990        return 0;
1991
1992out_free:
1993        for_each_online_cpu(cpu) {
1994                fcoe_percpu_thread_destroy(cpu);
1995        }
1996        mutex_unlock(&fcoe_config_mutex);
1997        return rc;
1998}
1999module_init(fcoe_init);
2000
2001/**
2002 * fcoe_exit() - fcoe module unloading cleanup
2003 *
2004 * Returns 0 on success, negative on failure
2005 */
2006static void __exit fcoe_exit(void)
2007{
2008        unsigned int cpu;
2009        struct fcoe_interface *fcoe, *tmp;
2010        struct fcoe_port *port;
2011
2012        mutex_lock(&fcoe_config_mutex);
2013
2014        fcoe_dev_cleanup();
2015
2016        /* releases the associated fcoe hosts */
2017        rtnl_lock();
2018        list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2019                list_del(&fcoe->list);
2020                port = lport_priv(fcoe->ctlr.lp);
2021                fcoe_interface_cleanup(fcoe);
2022                schedule_work(&port->destroy_work);
2023        }
2024        rtnl_unlock();
2025
2026        unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2027
2028        for_each_online_cpu(cpu)
2029                fcoe_percpu_thread_destroy(cpu);
2030
2031        mutex_unlock(&fcoe_config_mutex);
2032
2033        /* flush any asyncronous interface destroys,
2034         * this should happen after the netdev notifier is unregistered */
2035        flush_scheduled_work();
2036
2037        /* detach from scsi transport
2038         * must happen after all destroys are done, therefor after the flush */
2039        fcoe_if_exit();
2040}
2041module_exit(fcoe_exit);
2042