linux/drivers/target/target_core_device.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_device.c (based on iscsi_target_device.c)
   3 *
   4 * This file contains the TCM Virtual Device and Disk Transport
   5 * agnostic related functions.
   6 *
   7 * (c) Copyright 2003-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License, or
  14 * (at your option) any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24 *
  25 ******************************************************************************/
  26
  27#include <linux/net.h>
  28#include <linux/string.h>
  29#include <linux/delay.h>
  30#include <linux/timer.h>
  31#include <linux/slab.h>
  32#include <linux/spinlock.h>
  33#include <linux/kthread.h>
  34#include <linux/in.h>
  35#include <linux/export.h>
  36#include <asm/unaligned.h>
  37#include <net/sock.h>
  38#include <net/tcp.h>
  39#include <scsi/scsi_common.h>
  40#include <scsi/scsi_proto.h>
  41
  42#include <target/target_core_base.h>
  43#include <target/target_core_backend.h>
  44#include <target/target_core_fabric.h>
  45
  46#include "target_core_internal.h"
  47#include "target_core_alua.h"
  48#include "target_core_pr.h"
  49#include "target_core_ua.h"
  50
  51DEFINE_MUTEX(g_device_mutex);
  52LIST_HEAD(g_device_list);
  53
  54static struct se_hba *lun0_hba;
  55/* not static, needed by tpg.c */
  56struct se_device *g_lun0_dev;
  57
  58sense_reason_t
  59transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
  60{
  61        struct se_lun *se_lun = NULL;
  62        struct se_session *se_sess = se_cmd->se_sess;
  63        struct se_node_acl *nacl = se_sess->se_node_acl;
  64        struct se_dev_entry *deve;
  65        sense_reason_t ret = TCM_NO_SENSE;
  66
  67        rcu_read_lock();
  68        deve = target_nacl_find_deve(nacl, unpacked_lun);
  69        if (deve) {
  70                atomic_long_inc(&deve->total_cmds);
  71
  72                if (se_cmd->data_direction == DMA_TO_DEVICE)
  73                        atomic_long_add(se_cmd->data_length,
  74                                        &deve->write_bytes);
  75                else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  76                        atomic_long_add(se_cmd->data_length,
  77                                        &deve->read_bytes);
  78
  79                se_lun = rcu_dereference(deve->se_lun);
  80                se_cmd->se_lun = rcu_dereference(deve->se_lun);
  81                se_cmd->pr_res_key = deve->pr_res_key;
  82                se_cmd->orig_fe_lun = unpacked_lun;
  83                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  84
  85                percpu_ref_get(&se_lun->lun_ref);
  86                se_cmd->lun_ref_active = true;
  87
  88                if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  89                    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  90                        pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  91                                " Access for 0x%08llx\n",
  92                                se_cmd->se_tfo->get_fabric_name(),
  93                                unpacked_lun);
  94                        rcu_read_unlock();
  95                        ret = TCM_WRITE_PROTECTED;
  96                        goto ref_dev;
  97                }
  98        }
  99        rcu_read_unlock();
 100
 101        if (!se_lun) {
 102                /*
 103                 * Use the se_portal_group->tpg_virt_lun0 to allow for
 104                 * REPORT_LUNS, et al to be returned when no active
 105                 * MappedLUN=0 exists for this Initiator Port.
 106                 */
 107                if (unpacked_lun != 0) {
 108                        pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 109                                " Access for 0x%08llx\n",
 110                                se_cmd->se_tfo->get_fabric_name(),
 111                                unpacked_lun);
 112                        return TCM_NON_EXISTENT_LUN;
 113                }
 114
 115                se_lun = se_sess->se_tpg->tpg_virt_lun0;
 116                se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
 117                se_cmd->orig_fe_lun = 0;
 118                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 119
 120                percpu_ref_get(&se_lun->lun_ref);
 121                se_cmd->lun_ref_active = true;
 122
 123                /*
 124                 * Force WRITE PROTECT for virtual LUN 0
 125                 */
 126                if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
 127                    (se_cmd->data_direction != DMA_NONE)) {
 128                        ret = TCM_WRITE_PROTECTED;
 129                        goto ref_dev;
 130                }
 131        }
 132        /*
 133         * RCU reference protected by percpu se_lun->lun_ref taken above that
 134         * must drop to zero (including initial reference) before this se_lun
 135         * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 136         * target_core_fabric_configfs.c:target_fabric_port_release
 137         */
 138ref_dev:
 139        se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 140        atomic_long_inc(&se_cmd->se_dev->num_cmds);
 141
 142        if (se_cmd->data_direction == DMA_TO_DEVICE)
 143                atomic_long_add(se_cmd->data_length,
 144                                &se_cmd->se_dev->write_bytes);
 145        else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 146                atomic_long_add(se_cmd->data_length,
 147                                &se_cmd->se_dev->read_bytes);
 148
 149        return ret;
 150}
 151EXPORT_SYMBOL(transport_lookup_cmd_lun);
 152
 153int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
 154{
 155        struct se_dev_entry *deve;
 156        struct se_lun *se_lun = NULL;
 157        struct se_session *se_sess = se_cmd->se_sess;
 158        struct se_node_acl *nacl = se_sess->se_node_acl;
 159        struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
 160        unsigned long flags;
 161
 162        rcu_read_lock();
 163        deve = target_nacl_find_deve(nacl, unpacked_lun);
 164        if (deve) {
 165                se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
 166                se_cmd->se_lun = rcu_dereference(deve->se_lun);
 167                se_lun = rcu_dereference(deve->se_lun);
 168                se_cmd->pr_res_key = deve->pr_res_key;
 169                se_cmd->orig_fe_lun = unpacked_lun;
 170        }
 171        rcu_read_unlock();
 172
 173        if (!se_lun) {
 174                pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 175                        " Access for 0x%08llx\n",
 176                        se_cmd->se_tfo->get_fabric_name(),
 177                        unpacked_lun);
 178                return -ENODEV;
 179        }
 180        /*
 181         * XXX: Add percpu se_lun->lun_ref reference count for TMR
 182         */
 183        se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 184        se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 185
 186        spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
 187        list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 188        spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
 189
 190        return 0;
 191}
 192EXPORT_SYMBOL(transport_lookup_tmr_lun);
 193
 194bool target_lun_is_rdonly(struct se_cmd *cmd)
 195{
 196        struct se_session *se_sess = cmd->se_sess;
 197        struct se_dev_entry *deve;
 198        bool ret;
 199
 200        rcu_read_lock();
 201        deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
 202        ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
 203        rcu_read_unlock();
 204
 205        return ret;
 206}
 207EXPORT_SYMBOL(target_lun_is_rdonly);
 208
 209/*
 210 * This function is called from core_scsi3_emulate_pro_register_and_move()
 211 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
 212 * when a matching rtpi is found.
 213 */
 214struct se_dev_entry *core_get_se_deve_from_rtpi(
 215        struct se_node_acl *nacl,
 216        u16 rtpi)
 217{
 218        struct se_dev_entry *deve;
 219        struct se_lun *lun;
 220        struct se_portal_group *tpg = nacl->se_tpg;
 221
 222        rcu_read_lock();
 223        hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 224                lun = rcu_dereference(deve->se_lun);
 225                if (!lun) {
 226                        pr_err("%s device entries device pointer is"
 227                                " NULL, but Initiator has access.\n",
 228                                tpg->se_tpg_tfo->get_fabric_name());
 229                        continue;
 230                }
 231                if (lun->lun_rtpi != rtpi)
 232                        continue;
 233
 234                kref_get(&deve->pr_kref);
 235                rcu_read_unlock();
 236
 237                return deve;
 238        }
 239        rcu_read_unlock();
 240
 241        return NULL;
 242}
 243
 244void core_free_device_list_for_node(
 245        struct se_node_acl *nacl,
 246        struct se_portal_group *tpg)
 247{
 248        struct se_dev_entry *deve;
 249
 250        mutex_lock(&nacl->lun_entry_mutex);
 251        hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 252                struct se_lun *lun = rcu_dereference_check(deve->se_lun,
 253                                        lockdep_is_held(&nacl->lun_entry_mutex));
 254                core_disable_device_list_for_node(lun, deve, nacl, tpg);
 255        }
 256        mutex_unlock(&nacl->lun_entry_mutex);
 257}
 258
 259void core_update_device_list_access(
 260        u64 mapped_lun,
 261        u32 lun_access,
 262        struct se_node_acl *nacl)
 263{
 264        struct se_dev_entry *deve;
 265
 266        mutex_lock(&nacl->lun_entry_mutex);
 267        deve = target_nacl_find_deve(nacl, mapped_lun);
 268        if (deve) {
 269                if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 270                        deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 271                        deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
 272                } else {
 273                        deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
 274                        deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
 275                }
 276        }
 277        mutex_unlock(&nacl->lun_entry_mutex);
 278}
 279
 280/*
 281 * Called with rcu_read_lock or nacl->device_list_lock held.
 282 */
 283struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
 284{
 285        struct se_dev_entry *deve;
 286
 287        hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
 288                if (deve->mapped_lun == mapped_lun)
 289                        return deve;
 290
 291        return NULL;
 292}
 293EXPORT_SYMBOL(target_nacl_find_deve);
 294
 295void target_pr_kref_release(struct kref *kref)
 296{
 297        struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
 298                                                 pr_kref);
 299        complete(&deve->pr_comp);
 300}
 301
 302static void
 303target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
 304                             bool skip_new)
 305{
 306        struct se_dev_entry *tmp;
 307
 308        rcu_read_lock();
 309        hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
 310                if (skip_new && tmp == new)
 311                        continue;
 312                core_scsi3_ua_allocate(tmp, 0x3F,
 313                                       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
 314        }
 315        rcu_read_unlock();
 316}
 317
 318int core_enable_device_list_for_node(
 319        struct se_lun *lun,
 320        struct se_lun_acl *lun_acl,
 321        u64 mapped_lun,
 322        u32 lun_access,
 323        struct se_node_acl *nacl,
 324        struct se_portal_group *tpg)
 325{
 326        struct se_dev_entry *orig, *new;
 327
 328        new = kzalloc(sizeof(*new), GFP_KERNEL);
 329        if (!new) {
 330                pr_err("Unable to allocate se_dev_entry memory\n");
 331                return -ENOMEM;
 332        }
 333
 334        atomic_set(&new->ua_count, 0);
 335        spin_lock_init(&new->ua_lock);
 336        INIT_LIST_HEAD(&new->ua_list);
 337        INIT_LIST_HEAD(&new->lun_link);
 338
 339        new->mapped_lun = mapped_lun;
 340        kref_init(&new->pr_kref);
 341        init_completion(&new->pr_comp);
 342
 343        if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
 344                new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
 345        else
 346                new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
 347
 348        new->creation_time = get_jiffies_64();
 349        new->attach_count++;
 350
 351        mutex_lock(&nacl->lun_entry_mutex);
 352        orig = target_nacl_find_deve(nacl, mapped_lun);
 353        if (orig && orig->se_lun) {
 354                struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
 355                                        lockdep_is_held(&nacl->lun_entry_mutex));
 356
 357                if (orig_lun != lun) {
 358                        pr_err("Existing orig->se_lun doesn't match new lun"
 359                               " for dynamic -> explicit NodeACL conversion:"
 360                                " %s\n", nacl->initiatorname);
 361                        mutex_unlock(&nacl->lun_entry_mutex);
 362                        kfree(new);
 363                        return -EINVAL;
 364                }
 365                BUG_ON(orig->se_lun_acl != NULL);
 366
 367                rcu_assign_pointer(new->se_lun, lun);
 368                rcu_assign_pointer(new->se_lun_acl, lun_acl);
 369                hlist_del_rcu(&orig->link);
 370                hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 371                mutex_unlock(&nacl->lun_entry_mutex);
 372
 373                spin_lock(&lun->lun_deve_lock);
 374                list_del(&orig->lun_link);
 375                list_add_tail(&new->lun_link, &lun->lun_deve_list);
 376                spin_unlock(&lun->lun_deve_lock);
 377
 378                kref_put(&orig->pr_kref, target_pr_kref_release);
 379                wait_for_completion(&orig->pr_comp);
 380
 381                target_luns_data_has_changed(nacl, new, true);
 382                kfree_rcu(orig, rcu_head);
 383                return 0;
 384        }
 385
 386        rcu_assign_pointer(new->se_lun, lun);
 387        rcu_assign_pointer(new->se_lun_acl, lun_acl);
 388        hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
 389        mutex_unlock(&nacl->lun_entry_mutex);
 390
 391        spin_lock(&lun->lun_deve_lock);
 392        list_add_tail(&new->lun_link, &lun->lun_deve_list);
 393        spin_unlock(&lun->lun_deve_lock);
 394
 395        target_luns_data_has_changed(nacl, new, true);
 396        return 0;
 397}
 398
 399/*
 400 *      Called with se_node_acl->lun_entry_mutex held.
 401 */
 402void core_disable_device_list_for_node(
 403        struct se_lun *lun,
 404        struct se_dev_entry *orig,
 405        struct se_node_acl *nacl,
 406        struct se_portal_group *tpg)
 407{
 408        /*
 409         * rcu_dereference_raw protected by se_lun->lun_group symlink
 410         * reference to se_device->dev_group.
 411         */
 412        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 413        /*
 414         * If the MappedLUN entry is being disabled, the entry in
 415         * lun->lun_deve_list must be removed now before clearing the
 416         * struct se_dev_entry pointers below as logic in
 417         * core_alua_do_transition_tg_pt() depends on these being present.
 418         *
 419         * deve->se_lun_acl will be NULL for demo-mode created LUNs
 420         * that have not been explicitly converted to MappedLUNs ->
 421         * struct se_lun_acl, but we remove deve->lun_link from
 422         * lun->lun_deve_list. This also means that active UAs and
 423         * NodeACL context specific PR metadata for demo-mode
 424         * MappedLUN *deve will be released below..
 425         */
 426        spin_lock(&lun->lun_deve_lock);
 427        list_del(&orig->lun_link);
 428        spin_unlock(&lun->lun_deve_lock);
 429        /*
 430         * Disable struct se_dev_entry LUN ACL mapping
 431         */
 432        core_scsi3_ua_release_all(orig);
 433
 434        hlist_del_rcu(&orig->link);
 435        clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
 436        orig->lun_flags = 0;
 437        orig->creation_time = 0;
 438        orig->attach_count--;
 439        /*
 440         * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
 441         * or REGISTER_AND_MOVE PR operation to complete.
 442         */
 443        kref_put(&orig->pr_kref, target_pr_kref_release);
 444        wait_for_completion(&orig->pr_comp);
 445
 446        rcu_assign_pointer(orig->se_lun, NULL);
 447        rcu_assign_pointer(orig->se_lun_acl, NULL);
 448
 449        kfree_rcu(orig, rcu_head);
 450
 451        core_scsi3_free_pr_reg_from_nacl(dev, nacl);
 452        target_luns_data_has_changed(nacl, NULL, false);
 453}
 454
 455/*      core_clear_lun_from_tpg():
 456 *
 457 *
 458 */
 459void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
 460{
 461        struct se_node_acl *nacl;
 462        struct se_dev_entry *deve;
 463
 464        mutex_lock(&tpg->acl_node_mutex);
 465        list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
 466
 467                mutex_lock(&nacl->lun_entry_mutex);
 468                hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
 469                        struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
 470                                        lockdep_is_held(&nacl->lun_entry_mutex));
 471
 472                        if (lun != tmp_lun)
 473                                continue;
 474
 475                        core_disable_device_list_for_node(lun, deve, nacl, tpg);
 476                }
 477                mutex_unlock(&nacl->lun_entry_mutex);
 478        }
 479        mutex_unlock(&tpg->acl_node_mutex);
 480}
 481
 482int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
 483{
 484        struct se_lun *tmp;
 485
 486        spin_lock(&dev->se_port_lock);
 487        if (dev->export_count == 0x0000ffff) {
 488                pr_warn("Reached dev->dev_port_count =="
 489                                " 0x0000ffff\n");
 490                spin_unlock(&dev->se_port_lock);
 491                return -ENOSPC;
 492        }
 493again:
 494        /*
 495         * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
 496         * Here is the table from spc4r17 section 7.7.3.8.
 497         *
 498         *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
 499         *
 500         * Code      Description
 501         * 0h        Reserved
 502         * 1h        Relative port 1, historically known as port A
 503         * 2h        Relative port 2, historically known as port B
 504         * 3h to FFFFh    Relative port 3 through 65 535
 505         */
 506        lun->lun_rtpi = dev->dev_rpti_counter++;
 507        if (!lun->lun_rtpi)
 508                goto again;
 509
 510        list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
 511                /*
 512                 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
 513                 * for 16-bit wrap..
 514                 */
 515                if (lun->lun_rtpi == tmp->lun_rtpi)
 516                        goto again;
 517        }
 518        spin_unlock(&dev->se_port_lock);
 519
 520        return 0;
 521}
 522
 523static void se_release_vpd_for_dev(struct se_device *dev)
 524{
 525        struct t10_vpd *vpd, *vpd_tmp;
 526
 527        spin_lock(&dev->t10_wwn.t10_vpd_lock);
 528        list_for_each_entry_safe(vpd, vpd_tmp,
 529                        &dev->t10_wwn.t10_vpd_list, vpd_list) {
 530                list_del(&vpd->vpd_list);
 531                kfree(vpd);
 532        }
 533        spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 534}
 535
 536static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 537{
 538        u32 aligned_max_sectors;
 539        u32 alignment;
 540        /*
 541         * Limit max_sectors to a PAGE_SIZE aligned value for modern
 542         * transport_allocate_data_tasks() operation.
 543         */
 544        alignment = max(1ul, PAGE_SIZE / block_size);
 545        aligned_max_sectors = rounddown(max_sectors, alignment);
 546
 547        if (max_sectors != aligned_max_sectors)
 548                pr_info("Rounding down aligned max_sectors from %u to %u\n",
 549                        max_sectors, aligned_max_sectors);
 550
 551        return aligned_max_sectors;
 552}
 553
 554int core_dev_add_lun(
 555        struct se_portal_group *tpg,
 556        struct se_device *dev,
 557        struct se_lun *lun)
 558{
 559        int rc;
 560
 561        rc = core_tpg_add_lun(tpg, lun,
 562                                TRANSPORT_LUNFLAGS_READ_WRITE, dev);
 563        if (rc < 0)
 564                return rc;
 565
 566        pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
 567                " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
 568                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 569                tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
 570        /*
 571         * Update LUN maps for dynamically added initiators when
 572         * generate_node_acl is enabled.
 573         */
 574        if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
 575                struct se_node_acl *acl;
 576
 577                mutex_lock(&tpg->acl_node_mutex);
 578                list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
 579                        if (acl->dynamic_node_acl &&
 580                            (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
 581                             !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
 582                                core_tpg_add_node_to_devs(acl, tpg, lun);
 583                        }
 584                }
 585                mutex_unlock(&tpg->acl_node_mutex);
 586        }
 587
 588        return 0;
 589}
 590
 591/*      core_dev_del_lun():
 592 *
 593 *
 594 */
 595void core_dev_del_lun(
 596        struct se_portal_group *tpg,
 597        struct se_lun *lun)
 598{
 599        pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
 600                " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
 601                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 602                tpg->se_tpg_tfo->get_fabric_name());
 603
 604        core_tpg_remove_lun(tpg, lun);
 605}
 606
 607struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
 608        struct se_portal_group *tpg,
 609        struct se_node_acl *nacl,
 610        u64 mapped_lun,
 611        int *ret)
 612{
 613        struct se_lun_acl *lacl;
 614
 615        if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
 616                pr_err("%s InitiatorName exceeds maximum size.\n",
 617                        tpg->se_tpg_tfo->get_fabric_name());
 618                *ret = -EOVERFLOW;
 619                return NULL;
 620        }
 621        lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
 622        if (!lacl) {
 623                pr_err("Unable to allocate memory for struct se_lun_acl.\n");
 624                *ret = -ENOMEM;
 625                return NULL;
 626        }
 627
 628        lacl->mapped_lun = mapped_lun;
 629        lacl->se_lun_nacl = nacl;
 630
 631        return lacl;
 632}
 633
 634int core_dev_add_initiator_node_lun_acl(
 635        struct se_portal_group *tpg,
 636        struct se_lun_acl *lacl,
 637        struct se_lun *lun,
 638        u32 lun_access)
 639{
 640        struct se_node_acl *nacl = lacl->se_lun_nacl;
 641        /*
 642         * rcu_dereference_raw protected by se_lun->lun_group symlink
 643         * reference to se_device->dev_group.
 644         */
 645        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 646
 647        if (!nacl)
 648                return -EINVAL;
 649
 650        if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
 651            (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
 652                lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
 653
 654        lacl->se_lun = lun;
 655
 656        if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
 657                        lun_access, nacl, tpg) < 0)
 658                return -EINVAL;
 659
 660        pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
 661                " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 662                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
 663                (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
 664                nacl->initiatorname);
 665        /*
 666         * Check to see if there are any existing persistent reservation APTPL
 667         * pre-registrations that need to be enabled for this LUN ACL..
 668         */
 669        core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
 670                                            lacl->mapped_lun);
 671        return 0;
 672}
 673
 674int core_dev_del_initiator_node_lun_acl(
 675        struct se_lun *lun,
 676        struct se_lun_acl *lacl)
 677{
 678        struct se_portal_group *tpg = lun->lun_tpg;
 679        struct se_node_acl *nacl;
 680        struct se_dev_entry *deve;
 681
 682        nacl = lacl->se_lun_nacl;
 683        if (!nacl)
 684                return -EINVAL;
 685
 686        mutex_lock(&nacl->lun_entry_mutex);
 687        deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
 688        if (deve)
 689                core_disable_device_list_for_node(lun, deve, nacl, tpg);
 690        mutex_unlock(&nacl->lun_entry_mutex);
 691
 692        pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
 693                " InitiatorNode: %s Mapped LUN: %llu\n",
 694                tpg->se_tpg_tfo->get_fabric_name(),
 695                tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 696                nacl->initiatorname, lacl->mapped_lun);
 697
 698        return 0;
 699}
 700
 701void core_dev_free_initiator_node_lun_acl(
 702        struct se_portal_group *tpg,
 703        struct se_lun_acl *lacl)
 704{
 705        pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
 706                " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
 707                tpg->se_tpg_tfo->tpg_get_tag(tpg),
 708                tpg->se_tpg_tfo->get_fabric_name(),
 709                lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
 710
 711        kfree(lacl);
 712}
 713
 714static void scsi_dump_inquiry(struct se_device *dev)
 715{
 716        struct t10_wwn *wwn = &dev->t10_wwn;
 717        char buf[17];
 718        int i, device_type;
 719        /*
 720         * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
 721         */
 722        for (i = 0; i < 8; i++)
 723                if (wwn->vendor[i] >= 0x20)
 724                        buf[i] = wwn->vendor[i];
 725                else
 726                        buf[i] = ' ';
 727        buf[i] = '\0';
 728        pr_debug("  Vendor: %s\n", buf);
 729
 730        for (i = 0; i < 16; i++)
 731                if (wwn->model[i] >= 0x20)
 732                        buf[i] = wwn->model[i];
 733                else
 734                        buf[i] = ' ';
 735        buf[i] = '\0';
 736        pr_debug("  Model: %s\n", buf);
 737
 738        for (i = 0; i < 4; i++)
 739                if (wwn->revision[i] >= 0x20)
 740                        buf[i] = wwn->revision[i];
 741                else
 742                        buf[i] = ' ';
 743        buf[i] = '\0';
 744        pr_debug("  Revision: %s\n", buf);
 745
 746        device_type = dev->transport->get_device_type(dev);
 747        pr_debug("  Type:   %s ", scsi_device_type(device_type));
 748}
 749
 750struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 751{
 752        struct se_device *dev;
 753        struct se_lun *xcopy_lun;
 754
 755        dev = hba->backend->ops->alloc_device(hba, name);
 756        if (!dev)
 757                return NULL;
 758
 759        dev->dev_link_magic = SE_DEV_LINK_MAGIC;
 760        dev->se_hba = hba;
 761        dev->transport = hba->backend->ops;
 762        dev->prot_length = sizeof(struct t10_pi_tuple);
 763        dev->hba_index = hba->hba_index;
 764
 765        INIT_LIST_HEAD(&dev->dev_list);
 766        INIT_LIST_HEAD(&dev->dev_sep_list);
 767        INIT_LIST_HEAD(&dev->dev_tmr_list);
 768        INIT_LIST_HEAD(&dev->delayed_cmd_list);
 769        INIT_LIST_HEAD(&dev->state_list);
 770        INIT_LIST_HEAD(&dev->qf_cmd_list);
 771        INIT_LIST_HEAD(&dev->g_dev_node);
 772        spin_lock_init(&dev->execute_task_lock);
 773        spin_lock_init(&dev->delayed_cmd_lock);
 774        spin_lock_init(&dev->dev_reservation_lock);
 775        spin_lock_init(&dev->se_port_lock);
 776        spin_lock_init(&dev->se_tmr_lock);
 777        spin_lock_init(&dev->qf_cmd_lock);
 778        sema_init(&dev->caw_sem, 1);
 779        INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 780        spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 781        INIT_LIST_HEAD(&dev->t10_pr.registration_list);
 782        INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
 783        spin_lock_init(&dev->t10_pr.registration_lock);
 784        spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
 785        INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
 786        spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
 787        INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
 788        spin_lock_init(&dev->t10_alua.lba_map_lock);
 789
 790        dev->t10_wwn.t10_dev = dev;
 791        dev->t10_alua.t10_dev = dev;
 792
 793        dev->dev_attrib.da_dev = dev;
 794        dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
 795        dev->dev_attrib.emulate_dpo = 1;
 796        dev->dev_attrib.emulate_fua_write = 1;
 797        dev->dev_attrib.emulate_fua_read = 1;
 798        dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
 799        dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
 800        dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
 801        dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
 802        dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
 803        dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
 804        dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
 805        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
 806        dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
 807        dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
 808        dev->dev_attrib.is_nonrot = DA_IS_NONROT;
 809        dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
 810        dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
 811        dev->dev_attrib.max_unmap_block_desc_count =
 812                DA_MAX_UNMAP_BLOCK_DESC_COUNT;
 813        dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
 814        dev->dev_attrib.unmap_granularity_alignment =
 815                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
 816        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 817
 818        xcopy_lun = &dev->xcopy_lun;
 819        rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
 820        init_completion(&xcopy_lun->lun_ref_comp);
 821        INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
 822        INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
 823        mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
 824        xcopy_lun->lun_tpg = &xcopy_pt_tpg;
 825
 826        return dev;
 827}
 828
 829int target_configure_device(struct se_device *dev)
 830{
 831        struct se_hba *hba = dev->se_hba;
 832        int ret;
 833
 834        if (dev->dev_flags & DF_CONFIGURED) {
 835                pr_err("se_dev->se_dev_ptr already set for storage"
 836                                " object\n");
 837                return -EEXIST;
 838        }
 839
 840        ret = dev->transport->configure_device(dev);
 841        if (ret)
 842                goto out;
 843        /*
 844         * XXX: there is not much point to have two different values here..
 845         */
 846        dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
 847        dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
 848
 849        /*
 850         * Align max_hw_sectors down to PAGE_SIZE I/O transfers
 851         */
 852        dev->dev_attrib.hw_max_sectors =
 853                se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
 854                                         dev->dev_attrib.hw_block_size);
 855        dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 856
 857        dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
 858        dev->creation_time = get_jiffies_64();
 859
 860        ret = core_setup_alua(dev);
 861        if (ret)
 862                goto out;
 863
 864        /*
 865         * Startup the struct se_device processing thread
 866         */
 867        dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
 868                                      dev->transport->name);
 869        if (!dev->tmr_wq) {
 870                pr_err("Unable to create tmr workqueue for %s\n",
 871                        dev->transport->name);
 872                ret = -ENOMEM;
 873                goto out_free_alua;
 874        }
 875
 876        /*
 877         * Setup work_queue for QUEUE_FULL
 878         */
 879        INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
 880
 881        /*
 882         * Preload the initial INQUIRY const values if we are doing
 883         * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
 884         * passthrough because this is being provided by the backend LLD.
 885         */
 886        if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
 887                strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
 888                strncpy(&dev->t10_wwn.model[0],
 889                        dev->transport->inquiry_prod, 16);
 890                strncpy(&dev->t10_wwn.revision[0],
 891                        dev->transport->inquiry_rev, 4);
 892        }
 893
 894        scsi_dump_inquiry(dev);
 895
 896        spin_lock(&hba->device_lock);
 897        hba->dev_count++;
 898        spin_unlock(&hba->device_lock);
 899
 900        mutex_lock(&g_device_mutex);
 901        list_add_tail(&dev->g_dev_node, &g_device_list);
 902        mutex_unlock(&g_device_mutex);
 903
 904        dev->dev_flags |= DF_CONFIGURED;
 905
 906        return 0;
 907
 908out_free_alua:
 909        core_alua_free_lu_gp_mem(dev);
 910out:
 911        se_release_vpd_for_dev(dev);
 912        return ret;
 913}
 914
 915void target_free_device(struct se_device *dev)
 916{
 917        struct se_hba *hba = dev->se_hba;
 918
 919        WARN_ON(!list_empty(&dev->dev_sep_list));
 920
 921        if (dev->dev_flags & DF_CONFIGURED) {
 922                destroy_workqueue(dev->tmr_wq);
 923
 924                mutex_lock(&g_device_mutex);
 925                list_del(&dev->g_dev_node);
 926                mutex_unlock(&g_device_mutex);
 927
 928                spin_lock(&hba->device_lock);
 929                hba->dev_count--;
 930                spin_unlock(&hba->device_lock);
 931        }
 932
 933        core_alua_free_lu_gp_mem(dev);
 934        core_alua_set_lba_map(dev, NULL, 0, 0);
 935        core_scsi3_free_all_registrations(dev);
 936        se_release_vpd_for_dev(dev);
 937
 938        if (dev->transport->free_prot)
 939                dev->transport->free_prot(dev);
 940
 941        dev->transport->free_device(dev);
 942}
 943
 944int core_dev_setup_virtual_lun0(void)
 945{
 946        struct se_hba *hba;
 947        struct se_device *dev;
 948        char buf[] = "rd_pages=8,rd_nullio=1";
 949        int ret;
 950
 951        hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
 952        if (IS_ERR(hba))
 953                return PTR_ERR(hba);
 954
 955        dev = target_alloc_device(hba, "virt_lun0");
 956        if (!dev) {
 957                ret = -ENOMEM;
 958                goto out_free_hba;
 959        }
 960
 961        hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
 962
 963        ret = target_configure_device(dev);
 964        if (ret)
 965                goto out_free_se_dev;
 966
 967        lun0_hba = hba;
 968        g_lun0_dev = dev;
 969        return 0;
 970
 971out_free_se_dev:
 972        target_free_device(dev);
 973out_free_hba:
 974        core_delete_hba(hba);
 975        return ret;
 976}
 977
 978
 979void core_dev_release_virtual_lun0(void)
 980{
 981        struct se_hba *hba = lun0_hba;
 982
 983        if (!hba)
 984                return;
 985
 986        if (g_lun0_dev)
 987                target_free_device(g_lun0_dev);
 988        core_delete_hba(hba);
 989}
 990
 991/*
 992 * Common CDB parsing for kernel and user passthrough.
 993 */
 994sense_reason_t
 995passthrough_parse_cdb(struct se_cmd *cmd,
 996        sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
 997{
 998        unsigned char *cdb = cmd->t_task_cdb;
 999
1000        /*
1001         * Clear a lun set in the cdb if the initiator talking to use spoke
1002         * and old standards version, as we can't assume the underlying device
1003         * won't choke up on it.
1004         */
1005        switch (cdb[0]) {
1006        case READ_10: /* SBC - RDProtect */
1007        case READ_12: /* SBC - RDProtect */
1008        case READ_16: /* SBC - RDProtect */
1009        case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1010        case VERIFY: /* SBC - VRProtect */
1011        case VERIFY_16: /* SBC - VRProtect */
1012        case WRITE_VERIFY: /* SBC - VRProtect */
1013        case WRITE_VERIFY_12: /* SBC - VRProtect */
1014        case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1015                break;
1016        default:
1017                cdb[1] &= 0x1f; /* clear logical unit number */
1018                break;
1019        }
1020
1021        /*
1022         * For REPORT LUNS we always need to emulate the response, for everything
1023         * else, pass it up.
1024         */
1025        if (cdb[0] == REPORT_LUNS) {
1026                cmd->execute_cmd = spc_emulate_report_luns;
1027                return TCM_NO_SENSE;
1028        }
1029
1030        /* Set DATA_CDB flag for ops that should have it */
1031        switch (cdb[0]) {
1032        case READ_6:
1033        case READ_10:
1034        case READ_12:
1035        case READ_16:
1036        case WRITE_6:
1037        case WRITE_10:
1038        case WRITE_12:
1039        case WRITE_16:
1040        case WRITE_VERIFY:
1041        case WRITE_VERIFY_12:
1042        case 0x8e: /* WRITE_VERIFY_16 */
1043        case COMPARE_AND_WRITE:
1044        case XDWRITEREAD_10:
1045                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1046                break;
1047        case VARIABLE_LENGTH_CMD:
1048                switch (get_unaligned_be16(&cdb[8])) {
1049                case READ_32:
1050                case WRITE_32:
1051                case 0x0c: /* WRITE_VERIFY_32 */
1052                case XDWRITEREAD_32:
1053                        cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1054                        break;
1055                }
1056        }
1057
1058        cmd->execute_cmd = exec_cmd;
1059
1060        return TCM_NO_SENSE;
1061}
1062EXPORT_SYMBOL(passthrough_parse_cdb);
1063