linux/drivers/nvme/target/fcloop.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2016 Avago Technologies.  All rights reserved.
   4 */
   5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   6#include <linux/module.h>
   7#include <linux/parser.h>
   8#include <uapi/scsi/fc/fc_fs.h>
   9
  10#include "../host/nvme.h"
  11#include "../target/nvmet.h"
  12#include <linux/nvme-fc-driver.h>
  13#include <linux/nvme-fc.h>
  14
  15
  16enum {
  17        NVMF_OPT_ERR            = 0,
  18        NVMF_OPT_WWNN           = 1 << 0,
  19        NVMF_OPT_WWPN           = 1 << 1,
  20        NVMF_OPT_ROLES          = 1 << 2,
  21        NVMF_OPT_FCADDR         = 1 << 3,
  22        NVMF_OPT_LPWWNN         = 1 << 4,
  23        NVMF_OPT_LPWWPN         = 1 << 5,
  24};
  25
  26struct fcloop_ctrl_options {
  27        int                     mask;
  28        u64                     wwnn;
  29        u64                     wwpn;
  30        u32                     roles;
  31        u32                     fcaddr;
  32        u64                     lpwwnn;
  33        u64                     lpwwpn;
  34};
  35
  36static const match_table_t opt_tokens = {
  37        { NVMF_OPT_WWNN,        "wwnn=%s"       },
  38        { NVMF_OPT_WWPN,        "wwpn=%s"       },
  39        { NVMF_OPT_ROLES,       "roles=%d"      },
  40        { NVMF_OPT_FCADDR,      "fcaddr=%x"     },
  41        { NVMF_OPT_LPWWNN,      "lpwwnn=%s"     },
  42        { NVMF_OPT_LPWWPN,      "lpwwpn=%s"     },
  43        { NVMF_OPT_ERR,         NULL            }
  44};
  45
  46static int
  47fcloop_parse_options(struct fcloop_ctrl_options *opts,
  48                const char *buf)
  49{
  50        substring_t args[MAX_OPT_ARGS];
  51        char *options, *o, *p;
  52        int token, ret = 0;
  53        u64 token64;
  54
  55        options = o = kstrdup(buf, GFP_KERNEL);
  56        if (!options)
  57                return -ENOMEM;
  58
  59        while ((p = strsep(&o, ",\n")) != NULL) {
  60                if (!*p)
  61                        continue;
  62
  63                token = match_token(p, opt_tokens, args);
  64                opts->mask |= token;
  65                switch (token) {
  66                case NVMF_OPT_WWNN:
  67                        if (match_u64(args, &token64)) {
  68                                ret = -EINVAL;
  69                                goto out_free_options;
  70                        }
  71                        opts->wwnn = token64;
  72                        break;
  73                case NVMF_OPT_WWPN:
  74                        if (match_u64(args, &token64)) {
  75                                ret = -EINVAL;
  76                                goto out_free_options;
  77                        }
  78                        opts->wwpn = token64;
  79                        break;
  80                case NVMF_OPT_ROLES:
  81                        if (match_int(args, &token)) {
  82                                ret = -EINVAL;
  83                                goto out_free_options;
  84                        }
  85                        opts->roles = token;
  86                        break;
  87                case NVMF_OPT_FCADDR:
  88                        if (match_hex(args, &token)) {
  89                                ret = -EINVAL;
  90                                goto out_free_options;
  91                        }
  92                        opts->fcaddr = token;
  93                        break;
  94                case NVMF_OPT_LPWWNN:
  95                        if (match_u64(args, &token64)) {
  96                                ret = -EINVAL;
  97                                goto out_free_options;
  98                        }
  99                        opts->lpwwnn = token64;
 100                        break;
 101                case NVMF_OPT_LPWWPN:
 102                        if (match_u64(args, &token64)) {
 103                                ret = -EINVAL;
 104                                goto out_free_options;
 105                        }
 106                        opts->lpwwpn = token64;
 107                        break;
 108                default:
 109                        pr_warn("unknown parameter or missing value '%s'\n", p);
 110                        ret = -EINVAL;
 111                        goto out_free_options;
 112                }
 113        }
 114
 115out_free_options:
 116        kfree(options);
 117        return ret;
 118}
 119
 120
 121static int
 122fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
 123                const char *buf)
 124{
 125        substring_t args[MAX_OPT_ARGS];
 126        char *options, *o, *p;
 127        int token, ret = 0;
 128        u64 token64;
 129
 130        *nname = -1;
 131        *pname = -1;
 132
 133        options = o = kstrdup(buf, GFP_KERNEL);
 134        if (!options)
 135                return -ENOMEM;
 136
 137        while ((p = strsep(&o, ",\n")) != NULL) {
 138                if (!*p)
 139                        continue;
 140
 141                token = match_token(p, opt_tokens, args);
 142                switch (token) {
 143                case NVMF_OPT_WWNN:
 144                        if (match_u64(args, &token64)) {
 145                                ret = -EINVAL;
 146                                goto out_free_options;
 147                        }
 148                        *nname = token64;
 149                        break;
 150                case NVMF_OPT_WWPN:
 151                        if (match_u64(args, &token64)) {
 152                                ret = -EINVAL;
 153                                goto out_free_options;
 154                        }
 155                        *pname = token64;
 156                        break;
 157                default:
 158                        pr_warn("unknown parameter or missing value '%s'\n", p);
 159                        ret = -EINVAL;
 160                        goto out_free_options;
 161                }
 162        }
 163
 164out_free_options:
 165        kfree(options);
 166
 167        if (!ret) {
 168                if (*nname == -1)
 169                        return -EINVAL;
 170                if (*pname == -1)
 171                        return -EINVAL;
 172        }
 173
 174        return ret;
 175}
 176
 177
 178#define LPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
 179
 180#define RPORT_OPTS      (NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
 181                         NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
 182
 183#define TGTPORT_OPTS    (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
 184
 185
 186static DEFINE_SPINLOCK(fcloop_lock);
 187static LIST_HEAD(fcloop_lports);
 188static LIST_HEAD(fcloop_nports);
 189
 190struct fcloop_lport {
 191        struct nvme_fc_local_port *localport;
 192        struct list_head lport_list;
 193        struct completion unreg_done;
 194};
 195
 196struct fcloop_lport_priv {
 197        struct fcloop_lport *lport;
 198};
 199
 200struct fcloop_rport {
 201        struct nvme_fc_remote_port *remoteport;
 202        struct nvmet_fc_target_port *targetport;
 203        struct fcloop_nport *nport;
 204        struct fcloop_lport *lport;
 205};
 206
 207struct fcloop_tport {
 208        struct nvmet_fc_target_port *targetport;
 209        struct nvme_fc_remote_port *remoteport;
 210        struct fcloop_nport *nport;
 211        struct fcloop_lport *lport;
 212};
 213
 214struct fcloop_nport {
 215        struct fcloop_rport *rport;
 216        struct fcloop_tport *tport;
 217        struct fcloop_lport *lport;
 218        struct list_head nport_list;
 219        struct kref ref;
 220        u64 node_name;
 221        u64 port_name;
 222        u32 port_role;
 223        u32 port_id;
 224};
 225
 226struct fcloop_lsreq {
 227        struct fcloop_tport             *tport;
 228        struct nvmefc_ls_req            *lsreq;
 229        struct work_struct              work;
 230        struct nvmefc_tgt_ls_req        tgt_ls_req;
 231        int                             status;
 232};
 233
 234struct fcloop_rscn {
 235        struct fcloop_tport             *tport;
 236        struct work_struct              work;
 237};
 238
 239enum {
 240        INI_IO_START            = 0,
 241        INI_IO_ACTIVE           = 1,
 242        INI_IO_ABORTED          = 2,
 243        INI_IO_COMPLETED        = 3,
 244};
 245
 246struct fcloop_fcpreq {
 247        struct fcloop_tport             *tport;
 248        struct nvmefc_fcp_req           *fcpreq;
 249        spinlock_t                      reqlock;
 250        u16                             status;
 251        u32                             inistate;
 252        bool                            active;
 253        bool                            aborted;
 254        struct kref                     ref;
 255        struct work_struct              fcp_rcv_work;
 256        struct work_struct              abort_rcv_work;
 257        struct work_struct              tio_done_work;
 258        struct nvmefc_tgt_fcp_req       tgt_fcp_req;
 259};
 260
 261struct fcloop_ini_fcpreq {
 262        struct nvmefc_fcp_req           *fcpreq;
 263        struct fcloop_fcpreq            *tfcp_req;
 264        spinlock_t                      inilock;
 265};
 266
 267static inline struct fcloop_lsreq *
 268tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
 269{
 270        return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
 271}
 272
 273static inline struct fcloop_fcpreq *
 274tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
 275{
 276        return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
 277}
 278
 279
 280static int
 281fcloop_create_queue(struct nvme_fc_local_port *localport,
 282                        unsigned int qidx, u16 qsize,
 283                        void **handle)
 284{
 285        *handle = localport;
 286        return 0;
 287}
 288
 289static void
 290fcloop_delete_queue(struct nvme_fc_local_port *localport,
 291                        unsigned int idx, void *handle)
 292{
 293}
 294
 295
 296/*
 297 * Transmit of LS RSP done (e.g. buffers all set). call back up
 298 * initiator "done" flows.
 299 */
 300static void
 301fcloop_tgt_lsrqst_done_work(struct work_struct *work)
 302{
 303        struct fcloop_lsreq *tls_req =
 304                container_of(work, struct fcloop_lsreq, work);
 305        struct fcloop_tport *tport = tls_req->tport;
 306        struct nvmefc_ls_req *lsreq = tls_req->lsreq;
 307
 308        if (!tport || tport->remoteport)
 309                lsreq->done(lsreq, tls_req->status);
 310}
 311
 312static int
 313fcloop_ls_req(struct nvme_fc_local_port *localport,
 314                        struct nvme_fc_remote_port *remoteport,
 315                        struct nvmefc_ls_req *lsreq)
 316{
 317        struct fcloop_lsreq *tls_req = lsreq->private;
 318        struct fcloop_rport *rport = remoteport->private;
 319        int ret = 0;
 320
 321        tls_req->lsreq = lsreq;
 322        INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
 323
 324        if (!rport->targetport) {
 325                tls_req->status = -ECONNREFUSED;
 326                tls_req->tport = NULL;
 327                schedule_work(&tls_req->work);
 328                return ret;
 329        }
 330
 331        tls_req->status = 0;
 332        tls_req->tport = rport->targetport->private;
 333        ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
 334                                 lsreq->rqstaddr, lsreq->rqstlen);
 335
 336        return ret;
 337}
 338
 339static int
 340fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
 341                        struct nvmefc_tgt_ls_req *tgt_lsreq)
 342{
 343        struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
 344        struct nvmefc_ls_req *lsreq = tls_req->lsreq;
 345
 346        memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
 347                ((lsreq->rsplen < tgt_lsreq->rsplen) ?
 348                                lsreq->rsplen : tgt_lsreq->rsplen));
 349        tgt_lsreq->done(tgt_lsreq);
 350
 351        schedule_work(&tls_req->work);
 352
 353        return 0;
 354}
 355
 356/*
 357 * Simulate reception of RSCN and converting it to a initiator transport
 358 * call to rescan a remote port.
 359 */
 360static void
 361fcloop_tgt_rscn_work(struct work_struct *work)
 362{
 363        struct fcloop_rscn *tgt_rscn =
 364                container_of(work, struct fcloop_rscn, work);
 365        struct fcloop_tport *tport = tgt_rscn->tport;
 366
 367        if (tport->remoteport)
 368                nvme_fc_rescan_remoteport(tport->remoteport);
 369        kfree(tgt_rscn);
 370}
 371
 372static void
 373fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
 374{
 375        struct fcloop_rscn *tgt_rscn;
 376
 377        tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
 378        if (!tgt_rscn)
 379                return;
 380
 381        tgt_rscn->tport = tgtport->private;
 382        INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
 383
 384        schedule_work(&tgt_rscn->work);
 385}
 386
 387static void
 388fcloop_tfcp_req_free(struct kref *ref)
 389{
 390        struct fcloop_fcpreq *tfcp_req =
 391                container_of(ref, struct fcloop_fcpreq, ref);
 392
 393        kfree(tfcp_req);
 394}
 395
 396static void
 397fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
 398{
 399        kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
 400}
 401
 402static int
 403fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
 404{
 405        return kref_get_unless_zero(&tfcp_req->ref);
 406}
 407
 408static void
 409fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
 410                        struct fcloop_fcpreq *tfcp_req, int status)
 411{
 412        struct fcloop_ini_fcpreq *inireq = NULL;
 413
 414        if (fcpreq) {
 415                inireq = fcpreq->private;
 416                spin_lock(&inireq->inilock);
 417                inireq->tfcp_req = NULL;
 418                spin_unlock(&inireq->inilock);
 419
 420                fcpreq->status = status;
 421                fcpreq->done(fcpreq);
 422        }
 423
 424        /* release original io reference on tgt struct */
 425        fcloop_tfcp_req_put(tfcp_req);
 426}
 427
 428static void
 429fcloop_fcp_recv_work(struct work_struct *work)
 430{
 431        struct fcloop_fcpreq *tfcp_req =
 432                container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
 433        struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
 434        int ret = 0;
 435        bool aborted = false;
 436
 437        spin_lock_irq(&tfcp_req->reqlock);
 438        switch (tfcp_req->inistate) {
 439        case INI_IO_START:
 440                tfcp_req->inistate = INI_IO_ACTIVE;
 441                break;
 442        case INI_IO_ABORTED:
 443                aborted = true;
 444                break;
 445        default:
 446                spin_unlock_irq(&tfcp_req->reqlock);
 447                WARN_ON(1);
 448                return;
 449        }
 450        spin_unlock_irq(&tfcp_req->reqlock);
 451
 452        if (unlikely(aborted))
 453                ret = -ECANCELED;
 454        else
 455                ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
 456                                &tfcp_req->tgt_fcp_req,
 457                                fcpreq->cmdaddr, fcpreq->cmdlen);
 458        if (ret)
 459                fcloop_call_host_done(fcpreq, tfcp_req, ret);
 460
 461        return;
 462}
 463
 464static void
 465fcloop_fcp_abort_recv_work(struct work_struct *work)
 466{
 467        struct fcloop_fcpreq *tfcp_req =
 468                container_of(work, struct fcloop_fcpreq, abort_rcv_work);
 469        struct nvmefc_fcp_req *fcpreq;
 470        bool completed = false;
 471
 472        spin_lock_irq(&tfcp_req->reqlock);
 473        fcpreq = tfcp_req->fcpreq;
 474        switch (tfcp_req->inistate) {
 475        case INI_IO_ABORTED:
 476                break;
 477        case INI_IO_COMPLETED:
 478                completed = true;
 479                break;
 480        default:
 481                spin_unlock_irq(&tfcp_req->reqlock);
 482                WARN_ON(1);
 483                return;
 484        }
 485        spin_unlock_irq(&tfcp_req->reqlock);
 486
 487        if (unlikely(completed)) {
 488                /* remove reference taken in original abort downcall */
 489                fcloop_tfcp_req_put(tfcp_req);
 490                return;
 491        }
 492
 493        if (tfcp_req->tport->targetport)
 494                nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
 495                                        &tfcp_req->tgt_fcp_req);
 496
 497        spin_lock_irq(&tfcp_req->reqlock);
 498        tfcp_req->fcpreq = NULL;
 499        spin_unlock_irq(&tfcp_req->reqlock);
 500
 501        fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
 502        /* call_host_done releases reference for abort downcall */
 503}
 504
 505/*
 506 * FCP IO operation done by target completion.
 507 * call back up initiator "done" flows.
 508 */
 509static void
 510fcloop_tgt_fcprqst_done_work(struct work_struct *work)
 511{
 512        struct fcloop_fcpreq *tfcp_req =
 513                container_of(work, struct fcloop_fcpreq, tio_done_work);
 514        struct nvmefc_fcp_req *fcpreq;
 515
 516        spin_lock_irq(&tfcp_req->reqlock);
 517        fcpreq = tfcp_req->fcpreq;
 518        tfcp_req->inistate = INI_IO_COMPLETED;
 519        spin_unlock_irq(&tfcp_req->reqlock);
 520
 521        fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
 522}
 523
 524
 525static int
 526fcloop_fcp_req(struct nvme_fc_local_port *localport,
 527                        struct nvme_fc_remote_port *remoteport,
 528                        void *hw_queue_handle,
 529                        struct nvmefc_fcp_req *fcpreq)
 530{
 531        struct fcloop_rport *rport = remoteport->private;
 532        struct fcloop_ini_fcpreq *inireq = fcpreq->private;
 533        struct fcloop_fcpreq *tfcp_req;
 534
 535        if (!rport->targetport)
 536                return -ECONNREFUSED;
 537
 538        tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
 539        if (!tfcp_req)
 540                return -ENOMEM;
 541
 542        inireq->fcpreq = fcpreq;
 543        inireq->tfcp_req = tfcp_req;
 544        spin_lock_init(&inireq->inilock);
 545
 546        tfcp_req->fcpreq = fcpreq;
 547        tfcp_req->tport = rport->targetport->private;
 548        tfcp_req->inistate = INI_IO_START;
 549        spin_lock_init(&tfcp_req->reqlock);
 550        INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
 551        INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
 552        INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
 553        kref_init(&tfcp_req->ref);
 554
 555        schedule_work(&tfcp_req->fcp_rcv_work);
 556
 557        return 0;
 558}
 559
 560static void
 561fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
 562                        struct scatterlist *io_sg, u32 offset, u32 length)
 563{
 564        void *data_p, *io_p;
 565        u32 data_len, io_len, tlen;
 566
 567        io_p = sg_virt(io_sg);
 568        io_len = io_sg->length;
 569
 570        for ( ; offset; ) {
 571                tlen = min_t(u32, offset, io_len);
 572                offset -= tlen;
 573                io_len -= tlen;
 574                if (!io_len) {
 575                        io_sg = sg_next(io_sg);
 576                        io_p = sg_virt(io_sg);
 577                        io_len = io_sg->length;
 578                } else
 579                        io_p += tlen;
 580        }
 581
 582        data_p = sg_virt(data_sg);
 583        data_len = data_sg->length;
 584
 585        for ( ; length; ) {
 586                tlen = min_t(u32, io_len, data_len);
 587                tlen = min_t(u32, tlen, length);
 588
 589                if (op == NVMET_FCOP_WRITEDATA)
 590                        memcpy(data_p, io_p, tlen);
 591                else
 592                        memcpy(io_p, data_p, tlen);
 593
 594                length -= tlen;
 595
 596                io_len -= tlen;
 597                if ((!io_len) && (length)) {
 598                        io_sg = sg_next(io_sg);
 599                        io_p = sg_virt(io_sg);
 600                        io_len = io_sg->length;
 601                } else
 602                        io_p += tlen;
 603
 604                data_len -= tlen;
 605                if ((!data_len) && (length)) {
 606                        data_sg = sg_next(data_sg);
 607                        data_p = sg_virt(data_sg);
 608                        data_len = data_sg->length;
 609                } else
 610                        data_p += tlen;
 611        }
 612}
 613
 614static int
 615fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
 616                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
 617{
 618        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
 619        struct nvmefc_fcp_req *fcpreq;
 620        u32 rsplen = 0, xfrlen = 0;
 621        int fcp_err = 0, active, aborted;
 622        u8 op = tgt_fcpreq->op;
 623
 624        spin_lock_irq(&tfcp_req->reqlock);
 625        fcpreq = tfcp_req->fcpreq;
 626        active = tfcp_req->active;
 627        aborted = tfcp_req->aborted;
 628        tfcp_req->active = true;
 629        spin_unlock_irq(&tfcp_req->reqlock);
 630
 631        if (unlikely(active))
 632                /* illegal - call while i/o active */
 633                return -EALREADY;
 634
 635        if (unlikely(aborted)) {
 636                /* target transport has aborted i/o prior */
 637                spin_lock_irq(&tfcp_req->reqlock);
 638                tfcp_req->active = false;
 639                spin_unlock_irq(&tfcp_req->reqlock);
 640                tgt_fcpreq->transferred_length = 0;
 641                tgt_fcpreq->fcp_error = -ECANCELED;
 642                tgt_fcpreq->done(tgt_fcpreq);
 643                return 0;
 644        }
 645
 646        /*
 647         * if fcpreq is NULL, the I/O has been aborted (from
 648         * initiator side). For the target side, act as if all is well
 649         * but don't actually move data.
 650         */
 651
 652        switch (op) {
 653        case NVMET_FCOP_WRITEDATA:
 654                xfrlen = tgt_fcpreq->transfer_length;
 655                if (fcpreq) {
 656                        fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
 657                                        fcpreq->first_sgl, tgt_fcpreq->offset,
 658                                        xfrlen);
 659                        fcpreq->transferred_length += xfrlen;
 660                }
 661                break;
 662
 663        case NVMET_FCOP_READDATA:
 664        case NVMET_FCOP_READDATA_RSP:
 665                xfrlen = tgt_fcpreq->transfer_length;
 666                if (fcpreq) {
 667                        fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
 668                                        fcpreq->first_sgl, tgt_fcpreq->offset,
 669                                        xfrlen);
 670                        fcpreq->transferred_length += xfrlen;
 671                }
 672                if (op == NVMET_FCOP_READDATA)
 673                        break;
 674
 675                /* Fall-Thru to RSP handling */
 676                /* FALLTHRU */
 677
 678        case NVMET_FCOP_RSP:
 679                if (fcpreq) {
 680                        rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
 681                                        fcpreq->rsplen : tgt_fcpreq->rsplen);
 682                        memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
 683                        if (rsplen < tgt_fcpreq->rsplen)
 684                                fcp_err = -E2BIG;
 685                        fcpreq->rcv_rsplen = rsplen;
 686                        fcpreq->status = 0;
 687                }
 688                tfcp_req->status = 0;
 689                break;
 690
 691        default:
 692                fcp_err = -EINVAL;
 693                break;
 694        }
 695
 696        spin_lock_irq(&tfcp_req->reqlock);
 697        tfcp_req->active = false;
 698        spin_unlock_irq(&tfcp_req->reqlock);
 699
 700        tgt_fcpreq->transferred_length = xfrlen;
 701        tgt_fcpreq->fcp_error = fcp_err;
 702        tgt_fcpreq->done(tgt_fcpreq);
 703
 704        return 0;
 705}
 706
 707static void
 708fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
 709                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
 710{
 711        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
 712
 713        /*
 714         * mark aborted only in case there were 2 threads in transport
 715         * (one doing io, other doing abort) and only kills ops posted
 716         * after the abort request
 717         */
 718        spin_lock_irq(&tfcp_req->reqlock);
 719        tfcp_req->aborted = true;
 720        spin_unlock_irq(&tfcp_req->reqlock);
 721
 722        tfcp_req->status = NVME_SC_INTERNAL;
 723
 724        /*
 725         * nothing more to do. If io wasn't active, the transport should
 726         * immediately call the req_release. If it was active, the op
 727         * will complete, and the lldd should call req_release.
 728         */
 729}
 730
 731static void
 732fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
 733                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
 734{
 735        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
 736
 737        schedule_work(&tfcp_req->tio_done_work);
 738}
 739
 740static void
 741fcloop_ls_abort(struct nvme_fc_local_port *localport,
 742                        struct nvme_fc_remote_port *remoteport,
 743                                struct nvmefc_ls_req *lsreq)
 744{
 745}
 746
 747static void
 748fcloop_fcp_abort(struct nvme_fc_local_port *localport,
 749                        struct nvme_fc_remote_port *remoteport,
 750                        void *hw_queue_handle,
 751                        struct nvmefc_fcp_req *fcpreq)
 752{
 753        struct fcloop_ini_fcpreq *inireq = fcpreq->private;
 754        struct fcloop_fcpreq *tfcp_req;
 755        bool abortio = true;
 756
 757        spin_lock(&inireq->inilock);
 758        tfcp_req = inireq->tfcp_req;
 759        if (tfcp_req)
 760                fcloop_tfcp_req_get(tfcp_req);
 761        spin_unlock(&inireq->inilock);
 762
 763        if (!tfcp_req)
 764                /* abort has already been called */
 765                return;
 766
 767        /* break initiator/target relationship for io */
 768        spin_lock_irq(&tfcp_req->reqlock);
 769        switch (tfcp_req->inistate) {
 770        case INI_IO_START:
 771        case INI_IO_ACTIVE:
 772                tfcp_req->inistate = INI_IO_ABORTED;
 773                break;
 774        case INI_IO_COMPLETED:
 775                abortio = false;
 776                break;
 777        default:
 778                spin_unlock_irq(&tfcp_req->reqlock);
 779                WARN_ON(1);
 780                return;
 781        }
 782        spin_unlock_irq(&tfcp_req->reqlock);
 783
 784        if (abortio)
 785                /* leave the reference while the work item is scheduled */
 786                WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
 787        else  {
 788                /*
 789                 * as the io has already had the done callback made,
 790                 * nothing more to do. So release the reference taken above
 791                 */
 792                fcloop_tfcp_req_put(tfcp_req);
 793        }
 794}
 795
 796static void
 797fcloop_nport_free(struct kref *ref)
 798{
 799        struct fcloop_nport *nport =
 800                container_of(ref, struct fcloop_nport, ref);
 801        unsigned long flags;
 802
 803        spin_lock_irqsave(&fcloop_lock, flags);
 804        list_del(&nport->nport_list);
 805        spin_unlock_irqrestore(&fcloop_lock, flags);
 806
 807        kfree(nport);
 808}
 809
 810static void
 811fcloop_nport_put(struct fcloop_nport *nport)
 812{
 813        kref_put(&nport->ref, fcloop_nport_free);
 814}
 815
 816static int
 817fcloop_nport_get(struct fcloop_nport *nport)
 818{
 819        return kref_get_unless_zero(&nport->ref);
 820}
 821
 822static void
 823fcloop_localport_delete(struct nvme_fc_local_port *localport)
 824{
 825        struct fcloop_lport_priv *lport_priv = localport->private;
 826        struct fcloop_lport *lport = lport_priv->lport;
 827
 828        /* release any threads waiting for the unreg to complete */
 829        complete(&lport->unreg_done);
 830}
 831
 832static void
 833fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 834{
 835        struct fcloop_rport *rport = remoteport->private;
 836
 837        fcloop_nport_put(rport->nport);
 838}
 839
 840static void
 841fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
 842{
 843        struct fcloop_tport *tport = targetport->private;
 844
 845        fcloop_nport_put(tport->nport);
 846}
 847
 848#define FCLOOP_HW_QUEUES                4
 849#define FCLOOP_SGL_SEGS                 256
 850#define FCLOOP_DMABOUND_4G              0xFFFFFFFF
 851
 852static struct nvme_fc_port_template fctemplate = {
 853        .module                 = THIS_MODULE,
 854        .localport_delete       = fcloop_localport_delete,
 855        .remoteport_delete      = fcloop_remoteport_delete,
 856        .create_queue           = fcloop_create_queue,
 857        .delete_queue           = fcloop_delete_queue,
 858        .ls_req                 = fcloop_ls_req,
 859        .fcp_io                 = fcloop_fcp_req,
 860        .ls_abort               = fcloop_ls_abort,
 861        .fcp_abort              = fcloop_fcp_abort,
 862        .max_hw_queues          = FCLOOP_HW_QUEUES,
 863        .max_sgl_segments       = FCLOOP_SGL_SEGS,
 864        .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
 865        .dma_boundary           = FCLOOP_DMABOUND_4G,
 866        /* sizes of additional private data for data structures */
 867        .local_priv_sz          = sizeof(struct fcloop_lport_priv),
 868        .remote_priv_sz         = sizeof(struct fcloop_rport),
 869        .lsrqst_priv_sz         = sizeof(struct fcloop_lsreq),
 870        .fcprqst_priv_sz        = sizeof(struct fcloop_ini_fcpreq),
 871};
 872
 873static struct nvmet_fc_target_template tgttemplate = {
 874        .targetport_delete      = fcloop_targetport_delete,
 875        .xmt_ls_rsp             = fcloop_xmt_ls_rsp,
 876        .fcp_op                 = fcloop_fcp_op,
 877        .fcp_abort              = fcloop_tgt_fcp_abort,
 878        .fcp_req_release        = fcloop_fcp_req_release,
 879        .discovery_event        = fcloop_tgt_discovery_evt,
 880        .max_hw_queues          = FCLOOP_HW_QUEUES,
 881        .max_sgl_segments       = FCLOOP_SGL_SEGS,
 882        .max_dif_sgl_segments   = FCLOOP_SGL_SEGS,
 883        .dma_boundary           = FCLOOP_DMABOUND_4G,
 884        /* optional features */
 885        .target_features        = 0,
 886        /* sizes of additional private data for data structures */
 887        .target_priv_sz         = sizeof(struct fcloop_tport),
 888};
 889
 890static ssize_t
 891fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
 892                const char *buf, size_t count)
 893{
 894        struct nvme_fc_port_info pinfo;
 895        struct fcloop_ctrl_options *opts;
 896        struct nvme_fc_local_port *localport;
 897        struct fcloop_lport *lport;
 898        struct fcloop_lport_priv *lport_priv;
 899        unsigned long flags;
 900        int ret = -ENOMEM;
 901
 902        lport = kzalloc(sizeof(*lport), GFP_KERNEL);
 903        if (!lport)
 904                return -ENOMEM;
 905
 906        opts = kzalloc(sizeof(*opts), GFP_KERNEL);
 907        if (!opts)
 908                goto out_free_lport;
 909
 910        ret = fcloop_parse_options(opts, buf);
 911        if (ret)
 912                goto out_free_opts;
 913
 914        /* everything there ? */
 915        if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
 916                ret = -EINVAL;
 917                goto out_free_opts;
 918        }
 919
 920        memset(&pinfo, 0, sizeof(pinfo));
 921        pinfo.node_name = opts->wwnn;
 922        pinfo.port_name = opts->wwpn;
 923        pinfo.port_role = opts->roles;
 924        pinfo.port_id = opts->fcaddr;
 925
 926        ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
 927        if (!ret) {
 928                /* success */
 929                lport_priv = localport->private;
 930                lport_priv->lport = lport;
 931
 932                lport->localport = localport;
 933                INIT_LIST_HEAD(&lport->lport_list);
 934
 935                spin_lock_irqsave(&fcloop_lock, flags);
 936                list_add_tail(&lport->lport_list, &fcloop_lports);
 937                spin_unlock_irqrestore(&fcloop_lock, flags);
 938        }
 939
 940out_free_opts:
 941        kfree(opts);
 942out_free_lport:
 943        /* free only if we're going to fail */
 944        if (ret)
 945                kfree(lport);
 946
 947        return ret ? ret : count;
 948}
 949
 950
 951static void
 952__unlink_local_port(struct fcloop_lport *lport)
 953{
 954        list_del(&lport->lport_list);
 955}
 956
 957static int
 958__wait_localport_unreg(struct fcloop_lport *lport)
 959{
 960        int ret;
 961
 962        init_completion(&lport->unreg_done);
 963
 964        ret = nvme_fc_unregister_localport(lport->localport);
 965
 966        wait_for_completion(&lport->unreg_done);
 967
 968        kfree(lport);
 969
 970        return ret;
 971}
 972
 973
 974static ssize_t
 975fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
 976                const char *buf, size_t count)
 977{
 978        struct fcloop_lport *tlport, *lport = NULL;
 979        u64 nodename, portname;
 980        unsigned long flags;
 981        int ret;
 982
 983        ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
 984        if (ret)
 985                return ret;
 986
 987        spin_lock_irqsave(&fcloop_lock, flags);
 988
 989        list_for_each_entry(tlport, &fcloop_lports, lport_list) {
 990                if (tlport->localport->node_name == nodename &&
 991                    tlport->localport->port_name == portname) {
 992                        lport = tlport;
 993                        __unlink_local_port(lport);
 994                        break;
 995                }
 996        }
 997        spin_unlock_irqrestore(&fcloop_lock, flags);
 998
 999        if (!lport)
1000                return -ENOENT;
1001
1002        ret = __wait_localport_unreg(lport);
1003
1004        return ret ? ret : count;
1005}
1006
1007static struct fcloop_nport *
1008fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1009{
1010        struct fcloop_nport *newnport, *nport = NULL;
1011        struct fcloop_lport *tmplport, *lport = NULL;
1012        struct fcloop_ctrl_options *opts;
1013        unsigned long flags;
1014        u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1015        int ret;
1016
1017        opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1018        if (!opts)
1019                return NULL;
1020
1021        ret = fcloop_parse_options(opts, buf);
1022        if (ret)
1023                goto out_free_opts;
1024
1025        /* everything there ? */
1026        if ((opts->mask & opts_mask) != opts_mask) {
1027                ret = -EINVAL;
1028                goto out_free_opts;
1029        }
1030
1031        newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1032        if (!newnport)
1033                goto out_free_opts;
1034
1035        INIT_LIST_HEAD(&newnport->nport_list);
1036        newnport->node_name = opts->wwnn;
1037        newnport->port_name = opts->wwpn;
1038        if (opts->mask & NVMF_OPT_ROLES)
1039                newnport->port_role = opts->roles;
1040        if (opts->mask & NVMF_OPT_FCADDR)
1041                newnport->port_id = opts->fcaddr;
1042        kref_init(&newnport->ref);
1043
1044        spin_lock_irqsave(&fcloop_lock, flags);
1045
1046        list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1047                if (tmplport->localport->node_name == opts->wwnn &&
1048                    tmplport->localport->port_name == opts->wwpn)
1049                        goto out_invalid_opts;
1050
1051                if (tmplport->localport->node_name == opts->lpwwnn &&
1052                    tmplport->localport->port_name == opts->lpwwpn)
1053                        lport = tmplport;
1054        }
1055
1056        if (remoteport) {
1057                if (!lport)
1058                        goto out_invalid_opts;
1059                newnport->lport = lport;
1060        }
1061
1062        list_for_each_entry(nport, &fcloop_nports, nport_list) {
1063                if (nport->node_name == opts->wwnn &&
1064                    nport->port_name == opts->wwpn) {
1065                        if ((remoteport && nport->rport) ||
1066                            (!remoteport && nport->tport)) {
1067                                nport = NULL;
1068                                goto out_invalid_opts;
1069                        }
1070
1071                        fcloop_nport_get(nport);
1072
1073                        spin_unlock_irqrestore(&fcloop_lock, flags);
1074
1075                        if (remoteport)
1076                                nport->lport = lport;
1077                        if (opts->mask & NVMF_OPT_ROLES)
1078                                nport->port_role = opts->roles;
1079                        if (opts->mask & NVMF_OPT_FCADDR)
1080                                nport->port_id = opts->fcaddr;
1081                        goto out_free_newnport;
1082                }
1083        }
1084
1085        list_add_tail(&newnport->nport_list, &fcloop_nports);
1086
1087        spin_unlock_irqrestore(&fcloop_lock, flags);
1088
1089        kfree(opts);
1090        return newnport;
1091
1092out_invalid_opts:
1093        spin_unlock_irqrestore(&fcloop_lock, flags);
1094out_free_newnport:
1095        kfree(newnport);
1096out_free_opts:
1097        kfree(opts);
1098        return nport;
1099}
1100
1101static ssize_t
1102fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1103                const char *buf, size_t count)
1104{
1105        struct nvme_fc_remote_port *remoteport;
1106        struct fcloop_nport *nport;
1107        struct fcloop_rport *rport;
1108        struct nvme_fc_port_info pinfo;
1109        int ret;
1110
1111        nport = fcloop_alloc_nport(buf, count, true);
1112        if (!nport)
1113                return -EIO;
1114
1115        memset(&pinfo, 0, sizeof(pinfo));
1116        pinfo.node_name = nport->node_name;
1117        pinfo.port_name = nport->port_name;
1118        pinfo.port_role = nport->port_role;
1119        pinfo.port_id = nport->port_id;
1120
1121        ret = nvme_fc_register_remoteport(nport->lport->localport,
1122                                                &pinfo, &remoteport);
1123        if (ret || !remoteport) {
1124                fcloop_nport_put(nport);
1125                return ret;
1126        }
1127
1128        /* success */
1129        rport = remoteport->private;
1130        rport->remoteport = remoteport;
1131        rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
1132        if (nport->tport) {
1133                nport->tport->remoteport = remoteport;
1134                nport->tport->lport = nport->lport;
1135        }
1136        rport->nport = nport;
1137        rport->lport = nport->lport;
1138        nport->rport = rport;
1139
1140        return count;
1141}
1142
1143
1144static struct fcloop_rport *
1145__unlink_remote_port(struct fcloop_nport *nport)
1146{
1147        struct fcloop_rport *rport = nport->rport;
1148
1149        if (rport && nport->tport)
1150                nport->tport->remoteport = NULL;
1151        nport->rport = NULL;
1152
1153        return rport;
1154}
1155
1156static int
1157__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1158{
1159        if (!rport)
1160                return -EALREADY;
1161
1162        return nvme_fc_unregister_remoteport(rport->remoteport);
1163}
1164
1165static ssize_t
1166fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1167                const char *buf, size_t count)
1168{
1169        struct fcloop_nport *nport = NULL, *tmpport;
1170        static struct fcloop_rport *rport;
1171        u64 nodename, portname;
1172        unsigned long flags;
1173        int ret;
1174
1175        ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1176        if (ret)
1177                return ret;
1178
1179        spin_lock_irqsave(&fcloop_lock, flags);
1180
1181        list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1182                if (tmpport->node_name == nodename &&
1183                    tmpport->port_name == portname && tmpport->rport) {
1184                        nport = tmpport;
1185                        rport = __unlink_remote_port(nport);
1186                        break;
1187                }
1188        }
1189
1190        spin_unlock_irqrestore(&fcloop_lock, flags);
1191
1192        if (!nport)
1193                return -ENOENT;
1194
1195        ret = __remoteport_unreg(nport, rport);
1196
1197        return ret ? ret : count;
1198}
1199
1200static ssize_t
1201fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1202                const char *buf, size_t count)
1203{
1204        struct nvmet_fc_target_port *targetport;
1205        struct fcloop_nport *nport;
1206        struct fcloop_tport *tport;
1207        struct nvmet_fc_port_info tinfo;
1208        int ret;
1209
1210        nport = fcloop_alloc_nport(buf, count, false);
1211        if (!nport)
1212                return -EIO;
1213
1214        tinfo.node_name = nport->node_name;
1215        tinfo.port_name = nport->port_name;
1216        tinfo.port_id = nport->port_id;
1217
1218        ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1219                                                &targetport);
1220        if (ret) {
1221                fcloop_nport_put(nport);
1222                return ret;
1223        }
1224
1225        /* success */
1226        tport = targetport->private;
1227        tport->targetport = targetport;
1228        tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1229        if (nport->rport)
1230                nport->rport->targetport = targetport;
1231        tport->nport = nport;
1232        tport->lport = nport->lport;
1233        nport->tport = tport;
1234
1235        return count;
1236}
1237
1238
1239static struct fcloop_tport *
1240__unlink_target_port(struct fcloop_nport *nport)
1241{
1242        struct fcloop_tport *tport = nport->tport;
1243
1244        if (tport && nport->rport)
1245                nport->rport->targetport = NULL;
1246        nport->tport = NULL;
1247
1248        return tport;
1249}
1250
1251static int
1252__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1253{
1254        if (!tport)
1255                return -EALREADY;
1256
1257        return nvmet_fc_unregister_targetport(tport->targetport);
1258}
1259
1260static ssize_t
1261fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1262                const char *buf, size_t count)
1263{
1264        struct fcloop_nport *nport = NULL, *tmpport;
1265        struct fcloop_tport *tport = NULL;
1266        u64 nodename, portname;
1267        unsigned long flags;
1268        int ret;
1269
1270        ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1271        if (ret)
1272                return ret;
1273
1274        spin_lock_irqsave(&fcloop_lock, flags);
1275
1276        list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1277                if (tmpport->node_name == nodename &&
1278                    tmpport->port_name == portname && tmpport->tport) {
1279                        nport = tmpport;
1280                        tport = __unlink_target_port(nport);
1281                        break;
1282                }
1283        }
1284
1285        spin_unlock_irqrestore(&fcloop_lock, flags);
1286
1287        if (!nport)
1288                return -ENOENT;
1289
1290        ret = __targetport_unreg(nport, tport);
1291
1292        return ret ? ret : count;
1293}
1294
1295
1296static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1297static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1298static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1299static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1300static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1301static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1302
1303static struct attribute *fcloop_dev_attrs[] = {
1304        &dev_attr_add_local_port.attr,
1305        &dev_attr_del_local_port.attr,
1306        &dev_attr_add_remote_port.attr,
1307        &dev_attr_del_remote_port.attr,
1308        &dev_attr_add_target_port.attr,
1309        &dev_attr_del_target_port.attr,
1310        NULL
1311};
1312
1313static struct attribute_group fclopp_dev_attrs_group = {
1314        .attrs          = fcloop_dev_attrs,
1315};
1316
1317static const struct attribute_group *fcloop_dev_attr_groups[] = {
1318        &fclopp_dev_attrs_group,
1319        NULL,
1320};
1321
1322static struct class *fcloop_class;
1323static struct device *fcloop_device;
1324
1325
1326static int __init fcloop_init(void)
1327{
1328        int ret;
1329
1330        fcloop_class = class_create(THIS_MODULE, "fcloop");
1331        if (IS_ERR(fcloop_class)) {
1332                pr_err("couldn't register class fcloop\n");
1333                ret = PTR_ERR(fcloop_class);
1334                return ret;
1335        }
1336
1337        fcloop_device = device_create_with_groups(
1338                                fcloop_class, NULL, MKDEV(0, 0), NULL,
1339                                fcloop_dev_attr_groups, "ctl");
1340        if (IS_ERR(fcloop_device)) {
1341                pr_err("couldn't create ctl device!\n");
1342                ret = PTR_ERR(fcloop_device);
1343                goto out_destroy_class;
1344        }
1345
1346        get_device(fcloop_device);
1347
1348        return 0;
1349
1350out_destroy_class:
1351        class_destroy(fcloop_class);
1352        return ret;
1353}
1354
1355static void __exit fcloop_exit(void)
1356{
1357        struct fcloop_lport *lport;
1358        struct fcloop_nport *nport;
1359        struct fcloop_tport *tport;
1360        struct fcloop_rport *rport;
1361        unsigned long flags;
1362        int ret;
1363
1364        spin_lock_irqsave(&fcloop_lock, flags);
1365
1366        for (;;) {
1367                nport = list_first_entry_or_null(&fcloop_nports,
1368                                                typeof(*nport), nport_list);
1369                if (!nport)
1370                        break;
1371
1372                tport = __unlink_target_port(nport);
1373                rport = __unlink_remote_port(nport);
1374
1375                spin_unlock_irqrestore(&fcloop_lock, flags);
1376
1377                ret = __targetport_unreg(nport, tport);
1378                if (ret)
1379                        pr_warn("%s: Failed deleting target port\n", __func__);
1380
1381                ret = __remoteport_unreg(nport, rport);
1382                if (ret)
1383                        pr_warn("%s: Failed deleting remote port\n", __func__);
1384
1385                spin_lock_irqsave(&fcloop_lock, flags);
1386        }
1387
1388        for (;;) {
1389                lport = list_first_entry_or_null(&fcloop_lports,
1390                                                typeof(*lport), lport_list);
1391                if (!lport)
1392                        break;
1393
1394                __unlink_local_port(lport);
1395
1396                spin_unlock_irqrestore(&fcloop_lock, flags);
1397
1398                ret = __wait_localport_unreg(lport);
1399                if (ret)
1400                        pr_warn("%s: Failed deleting local port\n", __func__);
1401
1402                spin_lock_irqsave(&fcloop_lock, flags);
1403        }
1404
1405        spin_unlock_irqrestore(&fcloop_lock, flags);
1406
1407        put_device(fcloop_device);
1408
1409        device_destroy(fcloop_class, MKDEV(0, 0));
1410        class_destroy(fcloop_class);
1411}
1412
1413module_init(fcloop_init);
1414module_exit(fcloop_exit);
1415
1416MODULE_LICENSE("GPL v2");
1417