linux/drivers/target/target_core_tpg.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_tpg.c
   4 *
   5 * This file contains generic Target Portal Group related functions.
   6 *
   7 * (c) Copyright 2002-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 ******************************************************************************/
  12
  13#include <linux/net.h>
  14#include <linux/string.h>
  15#include <linux/timer.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/in.h>
  19#include <linux/export.h>
  20#include <net/sock.h>
  21#include <net/tcp.h>
  22#include <scsi/scsi_proto.h>
  23
  24#include <target/target_core_base.h>
  25#include <target/target_core_backend.h>
  26#include <target/target_core_fabric.h>
  27
  28#include "target_core_internal.h"
  29#include "target_core_alua.h"
  30#include "target_core_pr.h"
  31#include "target_core_ua.h"
  32
  33extern struct se_device *g_lun0_dev;
  34
  35/*      __core_tpg_get_initiator_node_acl():
  36 *
  37 *      mutex_lock(&tpg->acl_node_mutex); must be held when calling
  38 */
  39struct se_node_acl *__core_tpg_get_initiator_node_acl(
  40        struct se_portal_group *tpg,
  41        const char *initiatorname)
  42{
  43        struct se_node_acl *acl;
  44
  45        list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  46                if (!strcmp(acl->initiatorname, initiatorname))
  47                        return acl;
  48        }
  49
  50        return NULL;
  51}
  52
  53/*      core_tpg_get_initiator_node_acl():
  54 *
  55 *
  56 */
  57struct se_node_acl *core_tpg_get_initiator_node_acl(
  58        struct se_portal_group *tpg,
  59        unsigned char *initiatorname)
  60{
  61        struct se_node_acl *acl;
  62        /*
  63         * Obtain se_node_acl->acl_kref using fabric driver provided
  64         * initiatorname[] during node acl endpoint lookup driven by
  65         * new se_session login.
  66         *
  67         * The reference is held until se_session shutdown -> release
  68         * occurs via fabric driver invoked transport_deregister_session()
  69         * or transport_free_session() code.
  70         */
  71        mutex_lock(&tpg->acl_node_mutex);
  72        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  73        if (acl) {
  74                if (!kref_get_unless_zero(&acl->acl_kref))
  75                        acl = NULL;
  76        }
  77        mutex_unlock(&tpg->acl_node_mutex);
  78
  79        return acl;
  80}
  81EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
  82
  83void core_allocate_nexus_loss_ua(
  84        struct se_node_acl *nacl)
  85{
  86        struct se_dev_entry *deve;
  87
  88        if (!nacl)
  89                return;
  90
  91        rcu_read_lock();
  92        hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
  93                core_scsi3_ua_allocate(deve, 0x29,
  94                        ASCQ_29H_NEXUS_LOSS_OCCURRED);
  95        rcu_read_unlock();
  96}
  97EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
  98
  99/*      core_tpg_add_node_to_devs():
 100 *
 101 *
 102 */
 103void core_tpg_add_node_to_devs(
 104        struct se_node_acl *acl,
 105        struct se_portal_group *tpg,
 106        struct se_lun *lun_orig)
 107{
 108        bool lun_access_ro = true;
 109        struct se_lun *lun;
 110        struct se_device *dev;
 111
 112        mutex_lock(&tpg->tpg_lun_mutex);
 113        hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
 114                if (lun_orig && lun != lun_orig)
 115                        continue;
 116
 117                dev = rcu_dereference_check(lun->lun_se_dev,
 118                                            lockdep_is_held(&tpg->tpg_lun_mutex));
 119                /*
 120                 * By default in LIO-Target $FABRIC_MOD,
 121                 * demo_mode_write_protect is ON, or READ_ONLY;
 122                 */
 123                if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
 124                        lun_access_ro = false;
 125                } else {
 126                        /*
 127                         * Allow only optical drives to issue R/W in default RO
 128                         * demo mode.
 129                         */
 130                        if (dev->transport->get_device_type(dev) == TYPE_DISK)
 131                                lun_access_ro = true;
 132                        else
 133                                lun_access_ro = false;
 134                }
 135
 136                pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
 137                        " access for LUN in Demo Mode\n",
 138                        tpg->se_tpg_tfo->fabric_name,
 139                        tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 140                        lun_access_ro ? "READ-ONLY" : "READ-WRITE");
 141
 142                core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
 143                                                 lun_access_ro, acl, tpg);
 144                /*
 145                 * Check to see if there are any existing persistent reservation
 146                 * APTPL pre-registrations that need to be enabled for this dynamic
 147                 * LUN ACL now..
 148                 */
 149                core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
 150                                                    lun->unpacked_lun);
 151        }
 152        mutex_unlock(&tpg->tpg_lun_mutex);
 153}
 154
 155static void
 156target_set_nacl_queue_depth(struct se_portal_group *tpg,
 157                            struct se_node_acl *acl, u32 queue_depth)
 158{
 159        acl->queue_depth = queue_depth;
 160
 161        if (!acl->queue_depth) {
 162                pr_warn("Queue depth for %s Initiator Node: %s is 0,"
 163                        "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
 164                        acl->initiatorname);
 165                acl->queue_depth = 1;
 166        }
 167}
 168
 169static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
 170                const unsigned char *initiatorname)
 171{
 172        struct se_node_acl *acl;
 173        u32 queue_depth;
 174
 175        acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
 176                        GFP_KERNEL);
 177        if (!acl)
 178                return NULL;
 179
 180        INIT_LIST_HEAD(&acl->acl_list);
 181        INIT_LIST_HEAD(&acl->acl_sess_list);
 182        INIT_HLIST_HEAD(&acl->lun_entry_hlist);
 183        kref_init(&acl->acl_kref);
 184        init_completion(&acl->acl_free_comp);
 185        spin_lock_init(&acl->nacl_sess_lock);
 186        mutex_init(&acl->lun_entry_mutex);
 187        atomic_set(&acl->acl_pr_ref_count, 0);
 188
 189        if (tpg->se_tpg_tfo->tpg_get_default_depth)
 190                queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
 191        else
 192                queue_depth = 1;
 193        target_set_nacl_queue_depth(tpg, acl, queue_depth);
 194
 195        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 196        acl->se_tpg = tpg;
 197        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 198
 199        tpg->se_tpg_tfo->set_default_node_attributes(acl);
 200
 201        return acl;
 202}
 203
 204static void target_add_node_acl(struct se_node_acl *acl)
 205{
 206        struct se_portal_group *tpg = acl->se_tpg;
 207
 208        mutex_lock(&tpg->acl_node_mutex);
 209        list_add_tail(&acl->acl_list, &tpg->acl_node_list);
 210        mutex_unlock(&tpg->acl_node_mutex);
 211
 212        pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
 213                " Initiator Node: %s\n",
 214                tpg->se_tpg_tfo->fabric_name,
 215                tpg->se_tpg_tfo->tpg_get_tag(tpg),
 216                acl->dynamic_node_acl ? "DYNAMIC" : "",
 217                acl->queue_depth,
 218                tpg->se_tpg_tfo->fabric_name,
 219                acl->initiatorname);
 220}
 221
 222bool target_tpg_has_node_acl(struct se_portal_group *tpg,
 223                             const char *initiatorname)
 224{
 225        struct se_node_acl *acl;
 226        bool found = false;
 227
 228        mutex_lock(&tpg->acl_node_mutex);
 229        list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 230                if (!strcmp(acl->initiatorname, initiatorname)) {
 231                        found = true;
 232                        break;
 233                }
 234        }
 235        mutex_unlock(&tpg->acl_node_mutex);
 236
 237        return found;
 238}
 239EXPORT_SYMBOL(target_tpg_has_node_acl);
 240
 241struct se_node_acl *core_tpg_check_initiator_node_acl(
 242        struct se_portal_group *tpg,
 243        unsigned char *initiatorname)
 244{
 245        struct se_node_acl *acl;
 246
 247        acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
 248        if (acl)
 249                return acl;
 250
 251        if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
 252                return NULL;
 253
 254        acl = target_alloc_node_acl(tpg, initiatorname);
 255        if (!acl)
 256                return NULL;
 257        /*
 258         * When allocating a dynamically generated node_acl, go ahead
 259         * and take the extra kref now before returning to the fabric
 260         * driver caller.
 261         *
 262         * Note this reference will be released at session shutdown
 263         * time within transport_free_session() code.
 264         */
 265        kref_get(&acl->acl_kref);
 266        acl->dynamic_node_acl = 1;
 267
 268        /*
 269         * Here we only create demo-mode MappedLUNs from the active
 270         * TPG LUNs if the fabric is not explicitly asking for
 271         * tpg_check_demo_mode_login_only() == 1.
 272         */
 273        if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
 274            (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
 275                core_tpg_add_node_to_devs(acl, tpg, NULL);
 276
 277        target_add_node_acl(acl);
 278        return acl;
 279}
 280EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
 281
 282void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
 283{
 284        while (atomic_read(&nacl->acl_pr_ref_count) != 0)
 285                cpu_relax();
 286}
 287
 288struct se_node_acl *core_tpg_add_initiator_node_acl(
 289        struct se_portal_group *tpg,
 290        const char *initiatorname)
 291{
 292        struct se_node_acl *acl;
 293
 294        mutex_lock(&tpg->acl_node_mutex);
 295        acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
 296        if (acl) {
 297                if (acl->dynamic_node_acl) {
 298                        acl->dynamic_node_acl = 0;
 299                        pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
 300                                " for %s\n", tpg->se_tpg_tfo->fabric_name,
 301                                tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
 302                        mutex_unlock(&tpg->acl_node_mutex);
 303                        return acl;
 304                }
 305
 306                pr_err("ACL entry for %s Initiator"
 307                        " Node %s already exists for TPG %u, ignoring"
 308                        " request.\n",  tpg->se_tpg_tfo->fabric_name,
 309                        initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
 310                mutex_unlock(&tpg->acl_node_mutex);
 311                return ERR_PTR(-EEXIST);
 312        }
 313        mutex_unlock(&tpg->acl_node_mutex);
 314
 315        acl = target_alloc_node_acl(tpg, initiatorname);
 316        if (!acl)
 317                return ERR_PTR(-ENOMEM);
 318
 319        target_add_node_acl(acl);
 320        return acl;
 321}
 322
 323static void target_shutdown_sessions(struct se_node_acl *acl)
 324{
 325        struct se_session *sess;
 326        unsigned long flags;
 327
 328restart:
 329        spin_lock_irqsave(&acl->nacl_sess_lock, flags);
 330        list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
 331                if (atomic_read(&sess->stopped))
 332                        continue;
 333
 334                list_del_init(&sess->sess_acl_list);
 335                spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
 336
 337                if (acl->se_tpg->se_tpg_tfo->close_session)
 338                        acl->se_tpg->se_tpg_tfo->close_session(sess);
 339                goto restart;
 340        }
 341        spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
 342}
 343
 344void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
 345{
 346        struct se_portal_group *tpg = acl->se_tpg;
 347
 348        mutex_lock(&tpg->acl_node_mutex);
 349        if (acl->dynamic_node_acl)
 350                acl->dynamic_node_acl = 0;
 351        list_del_init(&acl->acl_list);
 352        mutex_unlock(&tpg->acl_node_mutex);
 353
 354        target_shutdown_sessions(acl);
 355
 356        target_put_nacl(acl);
 357        /*
 358         * Wait for last target_put_nacl() to complete in target_complete_nacl()
 359         * for active fabric session transport_deregister_session() callbacks.
 360         */
 361        wait_for_completion(&acl->acl_free_comp);
 362
 363        core_tpg_wait_for_nacl_pr_ref(acl);
 364        core_free_device_list_for_node(acl, tpg);
 365
 366        pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
 367                " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
 368                tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 369                tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
 370
 371        kfree(acl);
 372}
 373
 374/*      core_tpg_set_initiator_node_queue_depth():
 375 *
 376 *
 377 */
 378int core_tpg_set_initiator_node_queue_depth(
 379        struct se_node_acl *acl,
 380        u32 queue_depth)
 381{
 382        struct se_portal_group *tpg = acl->se_tpg;
 383
 384        /*
 385         * Allow the setting of se_node_acl queue_depth to be idempotent,
 386         * and not force a session shutdown event if the value is not
 387         * changing.
 388         */
 389        if (acl->queue_depth == queue_depth)
 390                return 0;
 391        /*
 392         * User has requested to change the queue depth for a Initiator Node.
 393         * Change the value in the Node's struct se_node_acl, and call
 394         * target_set_nacl_queue_depth() to set the new queue depth.
 395         */
 396        target_set_nacl_queue_depth(tpg, acl, queue_depth);
 397
 398        /*
 399         * Shutdown all pending sessions to force session reinstatement.
 400         */
 401        target_shutdown_sessions(acl);
 402
 403        pr_debug("Successfully changed queue depth to: %d for Initiator"
 404                " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
 405                acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
 406                tpg->se_tpg_tfo->tpg_get_tag(tpg));
 407
 408        return 0;
 409}
 410EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
 411
 412/*      core_tpg_set_initiator_node_tag():
 413 *
 414 *      Initiator nodeacl tags are not used internally, but may be used by
 415 *      userspace to emulate aliases or groups.
 416 *      Returns length of newly-set tag or -EINVAL.
 417 */
 418int core_tpg_set_initiator_node_tag(
 419        struct se_portal_group *tpg,
 420        struct se_node_acl *acl,
 421        const char *new_tag)
 422{
 423        if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
 424                return -EINVAL;
 425
 426        if (!strncmp("NULL", new_tag, 4)) {
 427                acl->acl_tag[0] = '\0';
 428                return 0;
 429        }
 430
 431        return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
 432}
 433EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
 434
 435static void core_tpg_lun_ref_release(struct percpu_ref *ref)
 436{
 437        struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
 438
 439        complete(&lun->lun_shutdown_comp);
 440}
 441
 442/* Does not change se_wwn->priv. */
 443int core_tpg_register(
 444        struct se_wwn *se_wwn,
 445        struct se_portal_group *se_tpg,
 446        int proto_id)
 447{
 448        int ret;
 449
 450        if (!se_tpg)
 451                return -EINVAL;
 452        /*
 453         * For the typical case where core_tpg_register() is called by a
 454         * fabric driver from target_core_fabric_ops->fabric_make_tpg()
 455         * configfs context, use the original tf_ops pointer already saved
 456         * by target-core in target_fabric_make_wwn().
 457         *
 458         * Otherwise, for special cases like iscsi-target discovery TPGs
 459         * the caller is responsible for setting ->se_tpg_tfo ahead of
 460         * calling core_tpg_register().
 461         */
 462        if (se_wwn)
 463                se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
 464
 465        if (!se_tpg->se_tpg_tfo) {
 466                pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
 467                return -EINVAL;
 468        }
 469
 470        INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
 471        se_tpg->proto_id = proto_id;
 472        se_tpg->se_tpg_wwn = se_wwn;
 473        atomic_set(&se_tpg->tpg_pr_ref_count, 0);
 474        INIT_LIST_HEAD(&se_tpg->acl_node_list);
 475        INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
 476        spin_lock_init(&se_tpg->session_lock);
 477        mutex_init(&se_tpg->tpg_lun_mutex);
 478        mutex_init(&se_tpg->acl_node_mutex);
 479
 480        if (se_tpg->proto_id >= 0) {
 481                se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
 482                if (IS_ERR(se_tpg->tpg_virt_lun0))
 483                        return PTR_ERR(se_tpg->tpg_virt_lun0);
 484
 485                ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
 486                                true, g_lun0_dev);
 487                if (ret < 0) {
 488                        kfree(se_tpg->tpg_virt_lun0);
 489                        return ret;
 490                }
 491        }
 492
 493        pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
 494                 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
 495                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
 496                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
 497                se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
 498
 499        return 0;
 500}
 501EXPORT_SYMBOL(core_tpg_register);
 502
 503int core_tpg_deregister(struct se_portal_group *se_tpg)
 504{
 505        const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
 506        struct se_node_acl *nacl, *nacl_tmp;
 507        LIST_HEAD(node_list);
 508
 509        pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
 510                 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
 511                tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
 512                se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
 513
 514        while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
 515                cpu_relax();
 516
 517        mutex_lock(&se_tpg->acl_node_mutex);
 518        list_splice_init(&se_tpg->acl_node_list, &node_list);
 519        mutex_unlock(&se_tpg->acl_node_mutex);
 520        /*
 521         * Release any remaining demo-mode generated se_node_acl that have
 522         * not been released because of TFO->tpg_check_demo_mode_cache() == 1
 523         * in transport_deregister_session().
 524         */
 525        list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
 526                list_del_init(&nacl->acl_list);
 527
 528                core_tpg_wait_for_nacl_pr_ref(nacl);
 529                core_free_device_list_for_node(nacl, se_tpg);
 530                kfree(nacl);
 531        }
 532
 533        if (se_tpg->proto_id >= 0) {
 534                core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
 535                kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
 536        }
 537
 538        return 0;
 539}
 540EXPORT_SYMBOL(core_tpg_deregister);
 541
 542struct se_lun *core_tpg_alloc_lun(
 543        struct se_portal_group *tpg,
 544        u64 unpacked_lun)
 545{
 546        struct se_lun *lun;
 547
 548        lun = kzalloc(sizeof(*lun), GFP_KERNEL);
 549        if (!lun) {
 550                pr_err("Unable to allocate se_lun memory\n");
 551                return ERR_PTR(-ENOMEM);
 552        }
 553        lun->unpacked_lun = unpacked_lun;
 554        atomic_set(&lun->lun_acl_count, 0);
 555        init_completion(&lun->lun_shutdown_comp);
 556        INIT_LIST_HEAD(&lun->lun_deve_list);
 557        INIT_LIST_HEAD(&lun->lun_dev_link);
 558        atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
 559        spin_lock_init(&lun->lun_deve_lock);
 560        mutex_init(&lun->lun_tg_pt_md_mutex);
 561        INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
 562        spin_lock_init(&lun->lun_tg_pt_gp_lock);
 563        lun->lun_tpg = tpg;
 564
 565        return lun;
 566}
 567
 568int core_tpg_add_lun(
 569        struct se_portal_group *tpg,
 570        struct se_lun *lun,
 571        bool lun_access_ro,
 572        struct se_device *dev)
 573{
 574        int ret;
 575
 576        ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
 577                              GFP_KERNEL);
 578        if (ret < 0)
 579                goto out;
 580
 581        ret = core_alloc_rtpi(lun, dev);
 582        if (ret)
 583                goto out_kill_ref;
 584
 585        if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
 586            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 587                target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
 588
 589        mutex_lock(&tpg->tpg_lun_mutex);
 590
 591        spin_lock(&dev->se_port_lock);
 592        lun->lun_index = dev->dev_index;
 593        rcu_assign_pointer(lun->lun_se_dev, dev);
 594        dev->export_count++;
 595        list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
 596        spin_unlock(&dev->se_port_lock);
 597
 598        if (dev->dev_flags & DF_READ_ONLY)
 599                lun->lun_access_ro = true;
 600        else
 601                lun->lun_access_ro = lun_access_ro;
 602        if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 603                hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
 604        mutex_unlock(&tpg->tpg_lun_mutex);
 605
 606        return 0;
 607
 608out_kill_ref:
 609        percpu_ref_exit(&lun->lun_ref);
 610out:
 611        return ret;
 612}
 613
 614void core_tpg_remove_lun(
 615        struct se_portal_group *tpg,
 616        struct se_lun *lun)
 617{
 618        /*
 619         * rcu_dereference_raw protected by se_lun->lun_group symlink
 620         * reference to se_device->dev_group.
 621         */
 622        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 623
 624        lun->lun_shutdown = true;
 625
 626        core_clear_lun_from_tpg(lun, tpg);
 627        /*
 628         * Wait for any active I/O references to percpu se_lun->lun_ref to
 629         * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
 630         * logic when referencing a remote target port during ALL_TGT_PT=1
 631         * and generating UNIT_ATTENTIONs for ALUA access state transition.
 632         */
 633        transport_clear_lun_ref(lun);
 634
 635        mutex_lock(&tpg->tpg_lun_mutex);
 636        if (lun->lun_se_dev) {
 637                target_detach_tg_pt_gp(lun);
 638
 639                spin_lock(&dev->se_port_lock);
 640                list_del(&lun->lun_dev_link);
 641                dev->export_count--;
 642                rcu_assign_pointer(lun->lun_se_dev, NULL);
 643                spin_unlock(&dev->se_port_lock);
 644        }
 645        if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 646                hlist_del_rcu(&lun->link);
 647
 648        lun->lun_shutdown = false;
 649        mutex_unlock(&tpg->tpg_lun_mutex);
 650
 651        percpu_ref_exit(&lun->lun_ref);
 652}
 653