linux/drivers/target/target_core_tpg.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_tpg.c
   3 *
   4 * This file contains generic Target Portal Group related functions.
   5 *
   6 * (c) Copyright 2002-2013 Datera, Inc.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/net.h>
  27#include <linux/string.h>
  28#include <linux/timer.h>
  29#include <linux/slab.h>
  30#include <linux/spinlock.h>
  31#include <linux/in.h>
  32#include <linux/export.h>
  33#include <net/sock.h>
  34#include <net/tcp.h>
  35#include <scsi/scsi.h>
  36#include <scsi/scsi_cmnd.h>
  37
  38#include <target/target_core_base.h>
  39#include <target/target_core_backend.h>
  40#include <target/target_core_fabric.h>
  41
  42#include "target_core_internal.h"
  43#include "target_core_pr.h"
  44
  45extern struct se_device *g_lun0_dev;
  46
  47static DEFINE_SPINLOCK(tpg_lock);
  48static LIST_HEAD(tpg_list);
  49
  50/*      core_clear_initiator_node_from_tpg():
  51 *
  52 *
  53 */
  54static void core_clear_initiator_node_from_tpg(
  55        struct se_node_acl *nacl,
  56        struct se_portal_group *tpg)
  57{
  58        int i;
  59        struct se_dev_entry *deve;
  60        struct se_lun *lun;
  61
  62        spin_lock_irq(&nacl->device_list_lock);
  63        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  64                deve = nacl->device_list[i];
  65
  66                if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  67                        continue;
  68
  69                if (!deve->se_lun) {
  70                        pr_err("%s device entries device pointer is"
  71                                " NULL, but Initiator has access.\n",
  72                                tpg->se_tpg_tfo->get_fabric_name());
  73                        continue;
  74                }
  75
  76                lun = deve->se_lun;
  77                spin_unlock_irq(&nacl->device_list_lock);
  78                core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
  79                        TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  80
  81                spin_lock_irq(&nacl->device_list_lock);
  82        }
  83        spin_unlock_irq(&nacl->device_list_lock);
  84}
  85
  86/*      __core_tpg_get_initiator_node_acl():
  87 *
  88 *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
  89 */
  90struct se_node_acl *__core_tpg_get_initiator_node_acl(
  91        struct se_portal_group *tpg,
  92        const char *initiatorname)
  93{
  94        struct se_node_acl *acl;
  95
  96        list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  97                if (!strcmp(acl->initiatorname, initiatorname))
  98                        return acl;
  99        }
 100
 101        return NULL;
 102}
 103
 104/*      core_tpg_get_initiator_node_acl():
 105 *
 106 *
 107 */
 108struct se_node_acl *core_tpg_get_initiator_node_acl(
 109        struct se_portal_group *tpg,
 110        unsigned char *initiatorname)
 111{
 112        struct se_node_acl *acl;
 113
 114        spin_lock_irq(&tpg->acl_node_lock);
 115        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 116        spin_unlock_irq(&tpg->acl_node_lock);
 117
 118        return acl;
 119}
 120EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
 121
 122/*      core_tpg_add_node_to_devs():
 123 *
 124 *
 125 */
 126void core_tpg_add_node_to_devs(
 127        struct se_node_acl *acl,
 128        struct se_portal_group *tpg)
 129{
 130        int i = 0;
 131        u32 lun_access = 0;
 132        struct se_lun *lun;
 133        struct se_device *dev;
 134
 135        spin_lock(&tpg->tpg_lun_lock);
 136        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 137                lun = tpg->tpg_lun_list[i];
 138                if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
 139                        continue;
 140
 141                spin_unlock(&tpg->tpg_lun_lock);
 142
 143                dev = lun->lun_se_dev;
 144                /*
 145                 * By default in LIO-Target $FABRIC_MOD,
 146                 * demo_mode_write_protect is ON, or READ_ONLY;
 147                 */
 148                if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
 149                        lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
 150                } else {
 151                        /*
 152                         * Allow only optical drives to issue R/W in default RO
 153                         * demo mode.
 154                         */
 155                        if (dev->transport->get_device_type(dev) == TYPE_DISK)
 156                                lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
 157                        else
 158                                lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
 159                }
 160
 161                pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
 162                        " access for LUN in Demo Mode\n",
 163                        tpg->se_tpg_tfo->get_fabric_name(),
 164                        tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 165                        (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
 166                        "READ-WRITE" : "READ-ONLY");
 167
 168                core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
 169                                lun_access, acl, tpg);
 170                /*
 171                 * Check to see if there are any existing persistent reservation
 172                 * APTPL pre-registrations that need to be enabled for this dynamic
 173                 * LUN ACL now..
 174                 */
 175                core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
 176                                                    lun->unpacked_lun);
 177                spin_lock(&tpg->tpg_lun_lock);
 178        }
 179        spin_unlock(&tpg->tpg_lun_lock);
 180}
 181
 182/*      core_set_queue_depth_for_node():
 183 *
 184 *
 185 */
 186static int core_set_queue_depth_for_node(
 187        struct se_portal_group *tpg,
 188        struct se_node_acl *acl)
 189{
 190        if (!acl->queue_depth) {
 191                pr_err("Queue depth for %s Initiator Node: %s is 0,"
 192                        "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
 193                        acl->initiatorname);
 194                acl->queue_depth = 1;
 195        }
 196
 197        return 0;
 198}
 199
 200void array_free(void *array, int n)
 201{
 202        void **a = array;
 203        int i;
 204
 205        for (i = 0; i < n; i++)
 206                kfree(a[i]);
 207        kfree(a);
 208}
 209
 210static void *array_zalloc(int n, size_t size, gfp_t flags)
 211{
 212        void **a;
 213        int i;
 214
 215        a = kzalloc(n * sizeof(void*), flags);
 216        if (!a)
 217                return NULL;
 218        for (i = 0; i < n; i++) {
 219                a[i] = kzalloc(size, flags);
 220                if (!a[i]) {
 221                        array_free(a, n);
 222                        return NULL;
 223                }
 224        }
 225        return a;
 226}
 227
 228/*      core_create_device_list_for_node():
 229 *
 230 *
 231 */
 232static int core_create_device_list_for_node(struct se_node_acl *nacl)
 233{
 234        struct se_dev_entry *deve;
 235        int i;
 236
 237        nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
 238                        sizeof(struct se_dev_entry), GFP_KERNEL);
 239        if (!nacl->device_list) {
 240                pr_err("Unable to allocate memory for"
 241                        " struct se_node_acl->device_list\n");
 242                return -ENOMEM;
 243        }
 244        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 245                deve = nacl->device_list[i];
 246
 247                atomic_set(&deve->ua_count, 0);
 248                atomic_set(&deve->pr_ref_count, 0);
 249                spin_lock_init(&deve->ua_lock);
 250                INIT_LIST_HEAD(&deve->alua_port_list);
 251                INIT_LIST_HEAD(&deve->ua_list);
 252        }
 253
 254        return 0;
 255}
 256
 257/*      core_tpg_check_initiator_node_acl()
 258 *
 259 *
 260 */
 261struct se_node_acl *core_tpg_check_initiator_node_acl(
 262        struct se_portal_group *tpg,
 263        unsigned char *initiatorname)
 264{
 265        struct se_node_acl *acl;
 266
 267        acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
 268        if (acl)
 269                return acl;
 270
 271        if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
 272                return NULL;
 273
 274        acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
 275        if (!acl)
 276                return NULL;
 277
 278        INIT_LIST_HEAD(&acl->acl_list);
 279        INIT_LIST_HEAD(&acl->acl_sess_list);
 280        kref_init(&acl->acl_kref);
 281        init_completion(&acl->acl_free_comp);
 282        spin_lock_init(&acl->device_list_lock);
 283        spin_lock_init(&acl->nacl_sess_lock);
 284        atomic_set(&acl->acl_pr_ref_count, 0);
 285        acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
 286        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 287        acl->se_tpg = tpg;
 288        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 289        acl->dynamic_node_acl = 1;
 290
 291        tpg->se_tpg_tfo->set_default_node_attributes(acl);
 292
 293        if (core_create_device_list_for_node(acl) < 0) {
 294                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 295                return NULL;
 296        }
 297
 298        if (core_set_queue_depth_for_node(tpg, acl) < 0) {
 299                core_free_device_list_for_node(acl, tpg);
 300                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 301                return NULL;
 302        }
 303        /*
 304         * Here we only create demo-mode MappedLUNs from the active
 305         * TPG LUNs if the fabric is not explicitly asking for
 306         * tpg_check_demo_mode_login_only() == 1.
 307         */
 308        if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
 309            (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
 310                core_tpg_add_node_to_devs(acl, tpg);
 311
 312        spin_lock_irq(&tpg->acl_node_lock);
 313        list_add_tail(&acl->acl_list, &tpg->acl_node_list);
 314        tpg->num_node_acls++;
 315        spin_unlock_irq(&tpg->acl_node_lock);
 316
 317        pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
 318                " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 319                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 320                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
 321
 322        return acl;
 323}
 324EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
 325
 326void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
 327{
 328        while (atomic_read(&nacl->acl_pr_ref_count) != 0)
 329                cpu_relax();
 330}
 331
 332void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 333{
 334        int i;
 335        struct se_lun *lun;
 336
 337        spin_lock(&tpg->tpg_lun_lock);
 338        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 339                lun = tpg->tpg_lun_list[i];
 340
 341                if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
 342                    (lun->lun_se_dev == NULL))
 343                        continue;
 344
 345                spin_unlock(&tpg->tpg_lun_lock);
 346                core_dev_del_lun(tpg, lun);
 347                spin_lock(&tpg->tpg_lun_lock);
 348        }
 349        spin_unlock(&tpg->tpg_lun_lock);
 350}
 351EXPORT_SYMBOL(core_tpg_clear_object_luns);
 352
 353/*      core_tpg_add_initiator_node_acl():
 354 *
 355 *
 356 */
 357struct se_node_acl *core_tpg_add_initiator_node_acl(
 358        struct se_portal_group *tpg,
 359        struct se_node_acl *se_nacl,
 360        const char *initiatorname,
 361        u32 queue_depth)
 362{
 363        struct se_node_acl *acl = NULL;
 364
 365        spin_lock_irq(&tpg->acl_node_lock);
 366        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 367        if (acl) {
 368                if (acl->dynamic_node_acl) {
 369                        acl->dynamic_node_acl = 0;
 370                        pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
 371                                " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 372                                tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
 373                        spin_unlock_irq(&tpg->acl_node_lock);
 374                        /*
 375                         * Release the locally allocated struct se_node_acl
 376                         * because * core_tpg_add_initiator_node_acl() returned
 377                         * a pointer to an existing demo mode node ACL.
 378                         */
 379                        if (se_nacl)
 380                                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
 381                                                        se_nacl);
 382                        goto done;
 383                }
 384
 385                pr_err("ACL entry for %s Initiator"
 386                        " Node %s already exists for TPG %u, ignoring"
 387                        " request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
 388                        initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
 389                spin_unlock_irq(&tpg->acl_node_lock);
 390                return ERR_PTR(-EEXIST);
 391        }
 392        spin_unlock_irq(&tpg->acl_node_lock);
 393
 394        if (!se_nacl) {
 395                pr_err("struct se_node_acl pointer is NULL\n");
 396                return ERR_PTR(-EINVAL);
 397        }
 398        /*
 399         * For v4.x logic the se_node_acl_s is hanging off a fabric
 400         * dependent structure allocated via
 401         * struct target_core_fabric_ops->fabric_make_nodeacl()
 402         */
 403        acl = se_nacl;
 404
 405        INIT_LIST_HEAD(&acl->acl_list);
 406        INIT_LIST_HEAD(&acl->acl_sess_list);
 407        kref_init(&acl->acl_kref);
 408        init_completion(&acl->acl_free_comp);
 409        spin_lock_init(&acl->device_list_lock);
 410        spin_lock_init(&acl->nacl_sess_lock);
 411        atomic_set(&acl->acl_pr_ref_count, 0);
 412        acl->queue_depth = queue_depth;
 413        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 414        acl->se_tpg = tpg;
 415        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 416
 417        tpg->se_tpg_tfo->set_default_node_attributes(acl);
 418
 419        if (core_create_device_list_for_node(acl) < 0) {
 420                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 421                return ERR_PTR(-ENOMEM);
 422        }
 423
 424        if (core_set_queue_depth_for_node(tpg, acl) < 0) {
 425                core_free_device_list_for_node(acl, tpg);
 426                tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
 427                return ERR_PTR(-EINVAL);
 428        }
 429
 430        spin_lock_irq(&tpg->acl_node_lock);
 431        list_add_tail(&acl->acl_list, &tpg->acl_node_list);
 432        tpg->num_node_acls++;
 433        spin_unlock_irq(&tpg->acl_node_lock);
 434
 435done:
 436        pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
 437                " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 438                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 439                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
 440
 441        return acl;
 442}
 443EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
 444
 445/*      core_tpg_del_initiator_node_acl():
 446 *
 447 *
 448 */
 449int core_tpg_del_initiator_node_acl(
 450        struct se_portal_group *tpg,
 451        struct se_node_acl *acl,
 452        int force)
 453{
 454        LIST_HEAD(sess_list);
 455        struct se_session *sess, *sess_tmp;
 456        unsigned long flags;
 457        int rc;
 458
 459        spin_lock_irq(&tpg->acl_node_lock);
 460        if (acl->dynamic_node_acl) {
 461                acl->dynamic_node_acl = 0;
 462        }
 463        list_del(&acl->acl_list);
 464        tpg->num_node_acls--;
 465        spin_unlock_irq(&tpg->acl_node_lock);
 466
 467        spin_lock_irqsave(&acl->nacl_sess_lock, flags);
 468        acl->acl_stop = 1;
 469
 470        list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
 471                                sess_acl_list) {
 472                if (sess->sess_tearing_down != 0)
 473                        continue;
 474
 475                target_get_session(sess);
 476                list_move(&sess->sess_acl_list, &sess_list);
 477        }
 478        spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
 479
 480        list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
 481                list_del(&sess->sess_acl_list);
 482
 483                rc = tpg->se_tpg_tfo->shutdown_session(sess);
 484                target_put_session(sess);
 485                if (!rc)
 486                        continue;
 487                target_put_session(sess);
 488        }
 489        target_put_nacl(acl);
 490        /*
 491         * Wait for last target_put_nacl() to complete in target_complete_nacl()
 492         * for active fabric session transport_deregister_session() callbacks.
 493         */
 494        wait_for_completion(&acl->acl_free_comp);
 495
 496        core_tpg_wait_for_nacl_pr_ref(acl);
 497        core_clear_initiator_node_from_tpg(acl, tpg);
 498        core_free_device_list_for_node(acl, tpg);
 499
 500        pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
 501                " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 502                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 503                tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
 504
 505        return 0;
 506}
 507EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
 508
 509/*      core_tpg_set_initiator_node_queue_depth():
 510 *
 511 *
 512 */
 513int core_tpg_set_initiator_node_queue_depth(
 514        struct se_portal_group *tpg,
 515        unsigned char *initiatorname,
 516        u32 queue_depth,
 517        int force)
 518{
 519        struct se_session *sess, *init_sess = NULL;
 520        struct se_node_acl *acl;
 521        unsigned long flags;
 522        int dynamic_acl = 0;
 523
 524        spin_lock_irq(&tpg->acl_node_lock);
 525        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 526        if (!acl) {
 527                pr_err("Access Control List entry for %s Initiator"
 528                        " Node %s does not exists for TPG %hu, ignoring"
 529                        " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
 530                        initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
 531                spin_unlock_irq(&tpg->acl_node_lock);
 532                return -ENODEV;
 533        }
 534        if (acl->dynamic_node_acl) {
 535                acl->dynamic_node_acl = 0;
 536                dynamic_acl = 1;
 537        }
 538        spin_unlock_irq(&tpg->acl_node_lock);
 539
 540        spin_lock_irqsave(&tpg->session_lock, flags);
 541        list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
 542                if (sess->se_node_acl != acl)
 543                        continue;
 544
 545                if (!force) {
 546                        pr_err("Unable to change queue depth for %s"
 547                                " Initiator Node: %s while session is"
 548                                " operational.  To forcefully change the queue"
 549                                " depth and force session reinstatement"
 550                                " use the \"force=1\" parameter.\n",
 551                                tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
 552                        spin_unlock_irqrestore(&tpg->session_lock, flags);
 553
 554                        spin_lock_irq(&tpg->acl_node_lock);
 555                        if (dynamic_acl)
 556                                acl->dynamic_node_acl = 1;
 557                        spin_unlock_irq(&tpg->acl_node_lock);
 558                        return -EEXIST;
 559                }
 560                /*
 561                 * Determine if the session needs to be closed by our context.
 562                 */
 563                if (!tpg->se_tpg_tfo->shutdown_session(sess))
 564                        continue;
 565
 566                init_sess = sess;
 567                break;
 568        }
 569
 570        /*
 571         * User has requested to change the queue depth for a Initiator Node.
 572         * Change the value in the Node's struct se_node_acl, and call
 573         * core_set_queue_depth_for_node() to add the requested queue depth.
 574         *
 575         * Finally call  tpg->se_tpg_tfo->close_session() to force session
 576         * reinstatement to occur if there is an active session for the
 577         * $FABRIC_MOD Initiator Node in question.
 578         */
 579        acl->queue_depth = queue_depth;
 580
 581        if (core_set_queue_depth_for_node(tpg, acl) < 0) {
 582                spin_unlock_irqrestore(&tpg->session_lock, flags);
 583                /*
 584                 * Force session reinstatement if
 585                 * core_set_queue_depth_for_node() failed, because we assume
 586                 * the $FABRIC_MOD has already the set session reinstatement
 587                 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
 588                 */
 589                if (init_sess)
 590                        tpg->se_tpg_tfo->close_session(init_sess);
 591
 592                spin_lock_irq(&tpg->acl_node_lock);
 593                if (dynamic_acl)
 594                        acl->dynamic_node_acl = 1;
 595                spin_unlock_irq(&tpg->acl_node_lock);
 596                return -EINVAL;
 597        }
 598        spin_unlock_irqrestore(&tpg->session_lock, flags);
 599        /*
 600         * If the $FABRIC_MOD session for the Initiator Node ACL exists,
 601         * forcefully shutdown the $FABRIC_MOD session/nexus.
 602         */
 603        if (init_sess)
 604                tpg->se_tpg_tfo->close_session(init_sess);
 605
 606        pr_debug("Successfully changed queue depth to: %d for Initiator"
 607                " Node: %s on %s Target Portal Group: %u\n", queue_depth,
 608                initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
 609                tpg->se_tpg_tfo->tpg_get_tag(tpg));
 610
 611        spin_lock_irq(&tpg->acl_node_lock);
 612        if (dynamic_acl)
 613                acl->dynamic_node_acl = 1;
 614        spin_unlock_irq(&tpg->acl_node_lock);
 615
 616        return 0;
 617}
 618EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
 619
 620/*      core_tpg_set_initiator_node_tag():
 621 *
 622 *      Initiator nodeacl tags are not used internally, but may be used by
 623 *      userspace to emulate aliases or groups.
 624 *      Returns length of newly-set tag or -EINVAL.
 625 */
 626int core_tpg_set_initiator_node_tag(
 627        struct se_portal_group *tpg,
 628        struct se_node_acl *acl,
 629        const char *new_tag)
 630{
 631        if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
 632                return -EINVAL;
 633
 634        if (!strncmp("NULL", new_tag, 4)) {
 635                acl->acl_tag[0] = '\0';
 636                return 0;
 637        }
 638
 639        return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
 640}
 641EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
 642
 643static void core_tpg_lun_ref_release(struct percpu_ref *ref)
 644{
 645        struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
 646
 647        complete(&lun->lun_ref_comp);
 648}
 649
 650static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
 651{
 652        /* Set in core_dev_setup_virtual_lun0() */
 653        struct se_device *dev = g_lun0_dev;
 654        struct se_lun *lun = &se_tpg->tpg_virt_lun0;
 655        u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
 656        int ret;
 657
 658        lun->unpacked_lun = 0;
 659        lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 660        atomic_set(&lun->lun_acl_count, 0);
 661        init_completion(&lun->lun_shutdown_comp);
 662        INIT_LIST_HEAD(&lun->lun_acl_list);
 663        spin_lock_init(&lun->lun_acl_lock);
 664        spin_lock_init(&lun->lun_sep_lock);
 665        init_completion(&lun->lun_ref_comp);
 666
 667        ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
 668        if (ret < 0)
 669                return ret;
 670
 671        return 0;
 672}
 673
 674int core_tpg_register(
 675        struct target_core_fabric_ops *tfo,
 676        struct se_wwn *se_wwn,
 677        struct se_portal_group *se_tpg,
 678        void *tpg_fabric_ptr,
 679        int se_tpg_type)
 680{
 681        struct se_lun *lun;
 682        u32 i;
 683
 684        se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
 685                        sizeof(struct se_lun), GFP_KERNEL);
 686        if (!se_tpg->tpg_lun_list) {
 687                pr_err("Unable to allocate struct se_portal_group->"
 688                                "tpg_lun_list\n");
 689                return -ENOMEM;
 690        }
 691
 692        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 693                lun = se_tpg->tpg_lun_list[i];
 694                lun->unpacked_lun = i;
 695                lun->lun_link_magic = SE_LUN_LINK_MAGIC;
 696                lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 697                atomic_set(&lun->lun_acl_count, 0);
 698                init_completion(&lun->lun_shutdown_comp);
 699                INIT_LIST_HEAD(&lun->lun_acl_list);
 700                spin_lock_init(&lun->lun_acl_lock);
 701                spin_lock_init(&lun->lun_sep_lock);
 702                init_completion(&lun->lun_ref_comp);
 703        }
 704
 705        se_tpg->se_tpg_type = se_tpg_type;
 706        se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
 707        se_tpg->se_tpg_tfo = tfo;
 708        se_tpg->se_tpg_wwn = se_wwn;
 709        atomic_set(&se_tpg->tpg_pr_ref_count, 0);
 710        INIT_LIST_HEAD(&se_tpg->acl_node_list);
 711        INIT_LIST_HEAD(&se_tpg->se_tpg_node);
 712        INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
 713        spin_lock_init(&se_tpg->acl_node_lock);
 714        spin_lock_init(&se_tpg->session_lock);
 715        spin_lock_init(&se_tpg->tpg_lun_lock);
 716
 717        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
 718                if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
 719                        array_free(se_tpg->tpg_lun_list,
 720                                   TRANSPORT_MAX_LUNS_PER_TPG);
 721                        return -ENOMEM;
 722                }
 723        }
 724
 725        spin_lock_bh(&tpg_lock);
 726        list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
 727        spin_unlock_bh(&tpg_lock);
 728
 729        pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
 730                " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
 731                (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
 732                "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
 733                "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
 734
 735        return 0;
 736}
 737EXPORT_SYMBOL(core_tpg_register);
 738
 739int core_tpg_deregister(struct se_portal_group *se_tpg)
 740{
 741        struct se_node_acl *nacl, *nacl_tmp;
 742
 743        pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
 744                " for endpoint: %s Portal Tag %u\n",
 745                (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
 746                "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
 747                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
 748                se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
 749
 750        spin_lock_bh(&tpg_lock);
 751        list_del(&se_tpg->se_tpg_node);
 752        spin_unlock_bh(&tpg_lock);
 753
 754        while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
 755                cpu_relax();
 756        /*
 757         * Release any remaining demo-mode generated se_node_acl that have
 758         * not been released because of TFO->tpg_check_demo_mode_cache() == 1
 759         * in transport_deregister_session().
 760         */
 761        spin_lock_irq(&se_tpg->acl_node_lock);
 762        list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
 763                        acl_list) {
 764                list_del(&nacl->acl_list);
 765                se_tpg->num_node_acls--;
 766                spin_unlock_irq(&se_tpg->acl_node_lock);
 767
 768                core_tpg_wait_for_nacl_pr_ref(nacl);
 769                core_free_device_list_for_node(nacl, se_tpg);
 770                se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
 771
 772                spin_lock_irq(&se_tpg->acl_node_lock);
 773        }
 774        spin_unlock_irq(&se_tpg->acl_node_lock);
 775
 776        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
 777                core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
 778
 779        se_tpg->se_tpg_fabric_ptr = NULL;
 780        array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
 781        return 0;
 782}
 783EXPORT_SYMBOL(core_tpg_deregister);
 784
 785struct se_lun *core_tpg_alloc_lun(
 786        struct se_portal_group *tpg,
 787        u32 unpacked_lun)
 788{
 789        struct se_lun *lun;
 790
 791        if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
 792                pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
 793                        "-1: %u for Target Portal Group: %u\n",
 794                        tpg->se_tpg_tfo->get_fabric_name(),
 795                        unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
 796                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
 797                return ERR_PTR(-EOVERFLOW);
 798        }
 799
 800        spin_lock(&tpg->tpg_lun_lock);
 801        lun = tpg->tpg_lun_list[unpacked_lun];
 802        if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
 803                pr_err("TPG Logical Unit Number: %u is already active"
 804                        " on %s Target Portal Group: %u, ignoring request.\n",
 805                        unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
 806                        tpg->se_tpg_tfo->tpg_get_tag(tpg));
 807                spin_unlock(&tpg->tpg_lun_lock);
 808                return ERR_PTR(-EINVAL);
 809        }
 810        spin_unlock(&tpg->tpg_lun_lock);
 811
 812        return lun;
 813}
 814
 815int core_tpg_add_lun(
 816        struct se_portal_group *tpg,
 817        struct se_lun *lun,
 818        u32 lun_access,
 819        struct se_device *dev)
 820{
 821        int ret;
 822
 823        ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
 824                              GFP_KERNEL);
 825        if (ret < 0)
 826                return ret;
 827
 828        ret = core_dev_export(dev, tpg, lun);
 829        if (ret < 0) {
 830                percpu_ref_exit(&lun->lun_ref);
 831                return ret;
 832        }
 833
 834        spin_lock(&tpg->tpg_lun_lock);
 835        lun->lun_access = lun_access;
 836        lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
 837        spin_unlock(&tpg->tpg_lun_lock);
 838
 839        return 0;
 840}
 841
 842void core_tpg_remove_lun(
 843        struct se_portal_group *tpg,
 844        struct se_lun *lun)
 845{
 846        core_clear_lun_from_tpg(lun, tpg);
 847        transport_clear_lun_ref(lun);
 848
 849        core_dev_unexport(lun->lun_se_dev, tpg, lun);
 850
 851        spin_lock(&tpg->tpg_lun_lock);
 852        lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 853        spin_unlock(&tpg->tpg_lun_lock);
 854
 855        percpu_ref_exit(&lun->lun_ref);
 856}
 857