linux/drivers/target/target_core_alua.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_alua.c
   3 *
   4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   5 *
   6 * (c) Copyright 2009-2013 Datera, Inc.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/configfs.h>
  29#include <linux/export.h>
  30#include <linux/file.h>
  31#include <scsi/scsi_proto.h>
  32#include <asm/unaligned.h>
  33
  34#include <target/target_core_base.h>
  35#include <target/target_core_backend.h>
  36#include <target/target_core_fabric.h>
  37
  38#include "target_core_internal.h"
  39#include "target_core_alua.h"
  40#include "target_core_ua.h"
  41
  42static sense_reason_t core_alua_check_transition(int state, int valid,
  43                                                 int *primary);
  44static int core_alua_set_tg_pt_secondary_state(
  45                struct se_lun *lun, int explicit, int offline);
  46
  47static char *core_alua_dump_state(int state);
  48
  49static void __target_attach_tg_pt_gp(struct se_lun *lun,
  50                struct t10_alua_tg_pt_gp *tg_pt_gp);
  51
  52static u16 alua_lu_gps_counter;
  53static u32 alua_lu_gps_count;
  54
  55static DEFINE_SPINLOCK(lu_gps_lock);
  56static LIST_HEAD(lu_gps_list);
  57
  58struct t10_alua_lu_gp *default_lu_gp;
  59
  60/*
  61 * REPORT REFERRALS
  62 *
  63 * See sbc3r35 section 5.23
  64 */
  65sense_reason_t
  66target_emulate_report_referrals(struct se_cmd *cmd)
  67{
  68        struct se_device *dev = cmd->se_dev;
  69        struct t10_alua_lba_map *map;
  70        struct t10_alua_lba_map_member *map_mem;
  71        unsigned char *buf;
  72        u32 rd_len = 0, off;
  73
  74        if (cmd->data_length < 4) {
  75                pr_warn("REPORT REFERRALS allocation length %u too"
  76                        " small\n", cmd->data_length);
  77                return TCM_INVALID_CDB_FIELD;
  78        }
  79
  80        buf = transport_kmap_data_sg(cmd);
  81        if (!buf)
  82                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  83
  84        off = 4;
  85        spin_lock(&dev->t10_alua.lba_map_lock);
  86        if (list_empty(&dev->t10_alua.lba_map_list)) {
  87                spin_unlock(&dev->t10_alua.lba_map_lock);
  88                transport_kunmap_data_sg(cmd);
  89
  90                return TCM_UNSUPPORTED_SCSI_OPCODE;
  91        }
  92
  93        list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  94                            lba_map_list) {
  95                int desc_num = off + 3;
  96                int pg_num;
  97
  98                off += 4;
  99                if (cmd->data_length > off)
 100                        put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
 101                off += 8;
 102                if (cmd->data_length > off)
 103                        put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
 104                off += 8;
 105                rd_len += 20;
 106                pg_num = 0;
 107                list_for_each_entry(map_mem, &map->lba_map_mem_list,
 108                                    lba_map_mem_list) {
 109                        int alua_state = map_mem->lba_map_mem_alua_state;
 110                        int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 111
 112                        if (cmd->data_length > off)
 113                                buf[off] = alua_state & 0x0f;
 114                        off += 2;
 115                        if (cmd->data_length > off)
 116                                buf[off] = (alua_pg_id >> 8) & 0xff;
 117                        off++;
 118                        if (cmd->data_length > off)
 119                                buf[off] = (alua_pg_id & 0xff);
 120                        off++;
 121                        rd_len += 4;
 122                        pg_num++;
 123                }
 124                if (cmd->data_length > desc_num)
 125                        buf[desc_num] = pg_num;
 126        }
 127        spin_unlock(&dev->t10_alua.lba_map_lock);
 128
 129        /*
 130         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 131         */
 132        put_unaligned_be16(rd_len, &buf[2]);
 133
 134        transport_kunmap_data_sg(cmd);
 135
 136        target_complete_cmd(cmd, GOOD);
 137        return 0;
 138}
 139
 140/*
 141 * REPORT_TARGET_PORT_GROUPS
 142 *
 143 * See spc4r17 section 6.27
 144 */
 145sense_reason_t
 146target_emulate_report_target_port_groups(struct se_cmd *cmd)
 147{
 148        struct se_device *dev = cmd->se_dev;
 149        struct t10_alua_tg_pt_gp *tg_pt_gp;
 150        struct se_lun *lun;
 151        unsigned char *buf;
 152        u32 rd_len = 0, off;
 153        int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 154
 155        /*
 156         * Skip over RESERVED area to first Target port group descriptor
 157         * depending on the PARAMETER DATA FORMAT type..
 158         */
 159        if (ext_hdr != 0)
 160                off = 8;
 161        else
 162                off = 4;
 163
 164        if (cmd->data_length < off) {
 165                pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 166                        " small for %s header\n", cmd->data_length,
 167                        (ext_hdr) ? "extended" : "normal");
 168                return TCM_INVALID_CDB_FIELD;
 169        }
 170        buf = transport_kmap_data_sg(cmd);
 171        if (!buf)
 172                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 173
 174        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 175        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 176                        tg_pt_gp_list) {
 177                /*
 178                 * Check if the Target port group and Target port descriptor list
 179                 * based on tg_pt_gp_members count will fit into the response payload.
 180                 * Otherwise, bump rd_len to let the initiator know we have exceeded
 181                 * the allocation length and the response is truncated.
 182                 */
 183                if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 184                     cmd->data_length) {
 185                        rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 186                        continue;
 187                }
 188                /*
 189                 * PREF: Preferred target port bit, determine if this
 190                 * bit should be set for port group.
 191                 */
 192                if (tg_pt_gp->tg_pt_gp_pref)
 193                        buf[off] = 0x80;
 194                /*
 195                 * Set the ASYMMETRIC ACCESS State
 196                 */
 197                buf[off++] |= (atomic_read(
 198                        &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
 199                /*
 200                 * Set supported ASYMMETRIC ACCESS State bits
 201                 */
 202                buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 203                /*
 204                 * TARGET PORT GROUP
 205                 */
 206                buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
 207                buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
 208
 209                off++; /* Skip over Reserved */
 210                /*
 211                 * STATUS CODE
 212                 */
 213                buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 214                /*
 215                 * Vendor Specific field
 216                 */
 217                buf[off++] = 0x00;
 218                /*
 219                 * TARGET PORT COUNT
 220                 */
 221                buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 222                rd_len += 8;
 223
 224                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 225                list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 226                                lun_tg_pt_gp_link) {
 227                        /*
 228                         * Start Target Port descriptor format
 229                         *
 230                         * See spc4r17 section 6.2.7 Table 247
 231                         */
 232                        off += 2; /* Skip over Obsolete */
 233                        /*
 234                         * Set RELATIVE TARGET PORT IDENTIFIER
 235                         */
 236                        buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
 237                        buf[off++] = (lun->lun_rtpi & 0xff);
 238                        rd_len += 4;
 239                }
 240                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 241        }
 242        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 243        /*
 244         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 245         */
 246        put_unaligned_be32(rd_len, &buf[0]);
 247
 248        /*
 249         * Fill in the Extended header parameter data format if requested
 250         */
 251        if (ext_hdr != 0) {
 252                buf[4] = 0x10;
 253                /*
 254                 * Set the implicit transition time (in seconds) for the application
 255                 * client to use as a base for it's transition timeout value.
 256                 *
 257                 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 258                 * this CDB was received upon to determine this value individually
 259                 * for ALUA target port group.
 260                 */
 261                spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
 262                tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
 263                if (tg_pt_gp)
 264                        buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 265                spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
 266        }
 267        transport_kunmap_data_sg(cmd);
 268
 269        target_complete_cmd(cmd, GOOD);
 270        return 0;
 271}
 272
 273/*
 274 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 275 *
 276 * See spc4r17 section 6.35
 277 */
 278sense_reason_t
 279target_emulate_set_target_port_groups(struct se_cmd *cmd)
 280{
 281        struct se_device *dev = cmd->se_dev;
 282        struct se_lun *l_lun = cmd->se_lun;
 283        struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 284        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 285        unsigned char *buf;
 286        unsigned char *ptr;
 287        sense_reason_t rc = TCM_NO_SENSE;
 288        u32 len = 4; /* Skip over RESERVED area in header */
 289        int alua_access_state, primary = 0, valid_states;
 290        u16 tg_pt_id, rtpi;
 291
 292        if (cmd->data_length < 4) {
 293                pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 294                        " small\n", cmd->data_length);
 295                return TCM_INVALID_PARAMETER_LIST;
 296        }
 297
 298        buf = transport_kmap_data_sg(cmd);
 299        if (!buf)
 300                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 301
 302        /*
 303         * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 304         * for the local tg_pt_gp.
 305         */
 306        spin_lock(&l_lun->lun_tg_pt_gp_lock);
 307        l_tg_pt_gp = l_lun->lun_tg_pt_gp;
 308        if (!l_tg_pt_gp) {
 309                spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 310                pr_err("Unable to access l_lun->tg_pt_gp\n");
 311                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 312                goto out;
 313        }
 314
 315        if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 316                spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 317                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 318                                " while TPGS_EXPLICIT_ALUA is disabled\n");
 319                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 320                goto out;
 321        }
 322        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 323        spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 324
 325        ptr = &buf[4]; /* Skip over RESERVED area in header */
 326
 327        while (len < cmd->data_length) {
 328                bool found = false;
 329                alua_access_state = (ptr[0] & 0x0f);
 330                /*
 331                 * Check the received ALUA access state, and determine if
 332                 * the state is a primary or secondary target port asymmetric
 333                 * access state.
 334                 */
 335                rc = core_alua_check_transition(alua_access_state,
 336                                                valid_states, &primary);
 337                if (rc) {
 338                        /*
 339                         * If the SET TARGET PORT GROUPS attempts to establish
 340                         * an invalid combination of target port asymmetric
 341                         * access states or attempts to establish an
 342                         * unsupported target port asymmetric access state,
 343                         * then the command shall be terminated with CHECK
 344                         * CONDITION status, with the sense key set to ILLEGAL
 345                         * REQUEST, and the additional sense code set to INVALID
 346                         * FIELD IN PARAMETER LIST.
 347                         */
 348                        goto out;
 349                }
 350
 351                /*
 352                 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 353                 * specifies a primary target port asymmetric access state,
 354                 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 355                 * a primary target port group for which the primary target
 356                 * port asymmetric access state shall be changed. If the
 357                 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 358                 * port asymmetric access state, then the TARGET PORT GROUP OR
 359                 * TARGET PORT field specifies the relative target port
 360                 * identifier (see 3.1.120) of the target port for which the
 361                 * secondary target port asymmetric access state shall be
 362                 * changed.
 363                 */
 364                if (primary) {
 365                        tg_pt_id = get_unaligned_be16(ptr + 2);
 366                        /*
 367                         * Locate the matching target port group ID from
 368                         * the global tg_pt_gp list
 369                         */
 370                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 371                        list_for_each_entry(tg_pt_gp,
 372                                        &dev->t10_alua.tg_pt_gps_list,
 373                                        tg_pt_gp_list) {
 374                                if (!tg_pt_gp->tg_pt_gp_valid_id)
 375                                        continue;
 376
 377                                if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 378                                        continue;
 379
 380                                atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 381
 382                                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 383
 384                                if (!core_alua_do_port_transition(tg_pt_gp,
 385                                                dev, l_lun, nacl,
 386                                                alua_access_state, 1))
 387                                        found = true;
 388
 389                                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 390                                atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 391                                break;
 392                        }
 393                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 394                } else {
 395                        struct se_lun *lun;
 396
 397                        /*
 398                         * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 399                         * the Target Port in question for the the incoming
 400                         * SET_TARGET_PORT_GROUPS op.
 401                         */
 402                        rtpi = get_unaligned_be16(ptr + 2);
 403                        /*
 404                         * Locate the matching relative target port identifier
 405                         * for the struct se_device storage object.
 406                         */
 407                        spin_lock(&dev->se_port_lock);
 408                        list_for_each_entry(lun, &dev->dev_sep_list,
 409                                                        lun_dev_link) {
 410                                if (lun->lun_rtpi != rtpi)
 411                                        continue;
 412
 413                                // XXX: racy unlock
 414                                spin_unlock(&dev->se_port_lock);
 415
 416                                if (!core_alua_set_tg_pt_secondary_state(
 417                                                lun, 1, 1))
 418                                        found = true;
 419
 420                                spin_lock(&dev->se_port_lock);
 421                                break;
 422                        }
 423                        spin_unlock(&dev->se_port_lock);
 424                }
 425
 426                if (!found) {
 427                        rc = TCM_INVALID_PARAMETER_LIST;
 428                        goto out;
 429                }
 430
 431                ptr += 4;
 432                len += 4;
 433        }
 434
 435out:
 436        transport_kunmap_data_sg(cmd);
 437        if (!rc)
 438                target_complete_cmd(cmd, GOOD);
 439        return rc;
 440}
 441
 442static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
 443{
 444        /*
 445         * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
 446         * The ALUA additional sense code qualifier (ASCQ) is determined
 447         * by the ALUA primary or secondary access state..
 448         */
 449        pr_debug("[%s]: ALUA TG Port not available, "
 450                "SenseKey: NOT_READY, ASC/ASCQ: "
 451                "0x04/0x%02x\n",
 452                cmd->se_tfo->get_fabric_name(), alua_ascq);
 453
 454        cmd->scsi_asc = 0x04;
 455        cmd->scsi_ascq = alua_ascq;
 456}
 457
 458static inline void core_alua_state_nonoptimized(
 459        struct se_cmd *cmd,
 460        unsigned char *cdb,
 461        int nonop_delay_msecs)
 462{
 463        /*
 464         * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 465         * later to determine if processing of this cmd needs to be
 466         * temporarily delayed for the Active/NonOptimized primary access state.
 467         */
 468        cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 469        cmd->alua_nonop_delay = nonop_delay_msecs;
 470}
 471
 472static inline int core_alua_state_lba_dependent(
 473        struct se_cmd *cmd,
 474        struct t10_alua_tg_pt_gp *tg_pt_gp)
 475{
 476        struct se_device *dev = cmd->se_dev;
 477        u64 segment_size, segment_mult, sectors, lba;
 478
 479        /* Only need to check for cdb actually containing LBAs */
 480        if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 481                return 0;
 482
 483        spin_lock(&dev->t10_alua.lba_map_lock);
 484        segment_size = dev->t10_alua.lba_map_segment_size;
 485        segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 486        sectors = cmd->data_length / dev->dev_attrib.block_size;
 487
 488        lba = cmd->t_task_lba;
 489        while (lba < cmd->t_task_lba + sectors) {
 490                struct t10_alua_lba_map *cur_map = NULL, *map;
 491                struct t10_alua_lba_map_member *map_mem;
 492
 493                list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 494                                    lba_map_list) {
 495                        u64 start_lba, last_lba;
 496                        u64 first_lba = map->lba_map_first_lba;
 497
 498                        if (segment_mult) {
 499                                u64 tmp = lba;
 500                                start_lba = do_div(tmp, segment_size * segment_mult);
 501
 502                                last_lba = first_lba + segment_size - 1;
 503                                if (start_lba >= first_lba &&
 504                                    start_lba <= last_lba) {
 505                                        lba += segment_size;
 506                                        cur_map = map;
 507                                        break;
 508                                }
 509                        } else {
 510                                last_lba = map->lba_map_last_lba;
 511                                if (lba >= first_lba && lba <= last_lba) {
 512                                        lba = last_lba + 1;
 513                                        cur_map = map;
 514                                        break;
 515                                }
 516                        }
 517                }
 518                if (!cur_map) {
 519                        spin_unlock(&dev->t10_alua.lba_map_lock);
 520                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 521                        return 1;
 522                }
 523                list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 524                                    lba_map_mem_list) {
 525                        if (map_mem->lba_map_mem_alua_pg_id !=
 526                            tg_pt_gp->tg_pt_gp_id)
 527                                continue;
 528                        switch(map_mem->lba_map_mem_alua_state) {
 529                        case ALUA_ACCESS_STATE_STANDBY:
 530                                spin_unlock(&dev->t10_alua.lba_map_lock);
 531                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 532                                return 1;
 533                        case ALUA_ACCESS_STATE_UNAVAILABLE:
 534                                spin_unlock(&dev->t10_alua.lba_map_lock);
 535                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 536                                return 1;
 537                        default:
 538                                break;
 539                        }
 540                }
 541        }
 542        spin_unlock(&dev->t10_alua.lba_map_lock);
 543        return 0;
 544}
 545
 546static inline int core_alua_state_standby(
 547        struct se_cmd *cmd,
 548        unsigned char *cdb)
 549{
 550        /*
 551         * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 552         * spc4r17 section 5.9.2.4.4
 553         */
 554        switch (cdb[0]) {
 555        case INQUIRY:
 556        case LOG_SELECT:
 557        case LOG_SENSE:
 558        case MODE_SELECT:
 559        case MODE_SENSE:
 560        case REPORT_LUNS:
 561        case RECEIVE_DIAGNOSTIC:
 562        case SEND_DIAGNOSTIC:
 563        case READ_CAPACITY:
 564                return 0;
 565        case SERVICE_ACTION_IN_16:
 566                switch (cdb[1] & 0x1f) {
 567                case SAI_READ_CAPACITY_16:
 568                        return 0;
 569                default:
 570                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 571                        return 1;
 572                }
 573        case MAINTENANCE_IN:
 574                switch (cdb[1] & 0x1f) {
 575                case MI_REPORT_TARGET_PGS:
 576                        return 0;
 577                default:
 578                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 579                        return 1;
 580                }
 581        case MAINTENANCE_OUT:
 582                switch (cdb[1]) {
 583                case MO_SET_TARGET_PGS:
 584                        return 0;
 585                default:
 586                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 587                        return 1;
 588                }
 589        case REQUEST_SENSE:
 590        case PERSISTENT_RESERVE_IN:
 591        case PERSISTENT_RESERVE_OUT:
 592        case READ_BUFFER:
 593        case WRITE_BUFFER:
 594                return 0;
 595        default:
 596                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 597                return 1;
 598        }
 599
 600        return 0;
 601}
 602
 603static inline int core_alua_state_unavailable(
 604        struct se_cmd *cmd,
 605        unsigned char *cdb)
 606{
 607        /*
 608         * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 609         * spc4r17 section 5.9.2.4.5
 610         */
 611        switch (cdb[0]) {
 612        case INQUIRY:
 613        case REPORT_LUNS:
 614                return 0;
 615        case MAINTENANCE_IN:
 616                switch (cdb[1] & 0x1f) {
 617                case MI_REPORT_TARGET_PGS:
 618                        return 0;
 619                default:
 620                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 621                        return 1;
 622                }
 623        case MAINTENANCE_OUT:
 624                switch (cdb[1]) {
 625                case MO_SET_TARGET_PGS:
 626                        return 0;
 627                default:
 628                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 629                        return 1;
 630                }
 631        case REQUEST_SENSE:
 632        case READ_BUFFER:
 633        case WRITE_BUFFER:
 634                return 0;
 635        default:
 636                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 637                return 1;
 638        }
 639
 640        return 0;
 641}
 642
 643static inline int core_alua_state_transition(
 644        struct se_cmd *cmd,
 645        unsigned char *cdb)
 646{
 647        /*
 648         * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 649         * spc4r17 section 5.9.2.5
 650         */
 651        switch (cdb[0]) {
 652        case INQUIRY:
 653        case REPORT_LUNS:
 654                return 0;
 655        case MAINTENANCE_IN:
 656                switch (cdb[1] & 0x1f) {
 657                case MI_REPORT_TARGET_PGS:
 658                        return 0;
 659                default:
 660                        set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 661                        return 1;
 662                }
 663        case REQUEST_SENSE:
 664        case READ_BUFFER:
 665        case WRITE_BUFFER:
 666                return 0;
 667        default:
 668                set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 669                return 1;
 670        }
 671
 672        return 0;
 673}
 674
 675/*
 676 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 677 * return 0: Used to signal success
 678 * return -1: Used to signal failure, and invalid cdb field
 679 */
 680sense_reason_t
 681target_alua_state_check(struct se_cmd *cmd)
 682{
 683        struct se_device *dev = cmd->se_dev;
 684        unsigned char *cdb = cmd->t_task_cdb;
 685        struct se_lun *lun = cmd->se_lun;
 686        struct t10_alua_tg_pt_gp *tg_pt_gp;
 687        int out_alua_state, nonop_delay_msecs;
 688
 689        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 690                return 0;
 691        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
 692                return 0;
 693
 694        /*
 695         * First, check for a struct se_port specific secondary ALUA target port
 696         * access state: OFFLINE
 697         */
 698        if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
 699                pr_debug("ALUA: Got secondary offline status for local"
 700                                " target port\n");
 701                set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
 702                return TCM_CHECK_CONDITION_NOT_READY;
 703        }
 704
 705        if (!lun->lun_tg_pt_gp)
 706                return 0;
 707
 708        spin_lock(&lun->lun_tg_pt_gp_lock);
 709        tg_pt_gp = lun->lun_tg_pt_gp;
 710        out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
 711        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 712
 713        // XXX: keeps using tg_pt_gp witout reference after unlock
 714        spin_unlock(&lun->lun_tg_pt_gp_lock);
 715        /*
 716         * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 717         * statement so the compiler knows explicitly to check this case first.
 718         * For the Optimized ALUA access state case, we want to process the
 719         * incoming fabric cmd ASAP..
 720         */
 721        if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 722                return 0;
 723
 724        switch (out_alua_state) {
 725        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 726                core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 727                break;
 728        case ALUA_ACCESS_STATE_STANDBY:
 729                if (core_alua_state_standby(cmd, cdb))
 730                        return TCM_CHECK_CONDITION_NOT_READY;
 731                break;
 732        case ALUA_ACCESS_STATE_UNAVAILABLE:
 733                if (core_alua_state_unavailable(cmd, cdb))
 734                        return TCM_CHECK_CONDITION_NOT_READY;
 735                break;
 736        case ALUA_ACCESS_STATE_TRANSITION:
 737                if (core_alua_state_transition(cmd, cdb))
 738                        return TCM_CHECK_CONDITION_NOT_READY;
 739                break;
 740        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 741                if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
 742                        return TCM_CHECK_CONDITION_NOT_READY;
 743                break;
 744        /*
 745         * OFFLINE is a secondary ALUA target port group access state, that is
 746         * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
 747         */
 748        case ALUA_ACCESS_STATE_OFFLINE:
 749        default:
 750                pr_err("Unknown ALUA access state: 0x%02x\n",
 751                                out_alua_state);
 752                return TCM_INVALID_CDB_FIELD;
 753        }
 754
 755        return 0;
 756}
 757
 758/*
 759 * Check implicit and explicit ALUA state change request.
 760 */
 761static sense_reason_t
 762core_alua_check_transition(int state, int valid, int *primary)
 763{
 764        /*
 765         * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 766         * defined as primary target port asymmetric access states.
 767         */
 768        switch (state) {
 769        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 770                if (!(valid & ALUA_AO_SUP))
 771                        goto not_supported;
 772                *primary = 1;
 773                break;
 774        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 775                if (!(valid & ALUA_AN_SUP))
 776                        goto not_supported;
 777                *primary = 1;
 778                break;
 779        case ALUA_ACCESS_STATE_STANDBY:
 780                if (!(valid & ALUA_S_SUP))
 781                        goto not_supported;
 782                *primary = 1;
 783                break;
 784        case ALUA_ACCESS_STATE_UNAVAILABLE:
 785                if (!(valid & ALUA_U_SUP))
 786                        goto not_supported;
 787                *primary = 1;
 788                break;
 789        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 790                if (!(valid & ALUA_LBD_SUP))
 791                        goto not_supported;
 792                *primary = 1;
 793                break;
 794        case ALUA_ACCESS_STATE_OFFLINE:
 795                /*
 796                 * OFFLINE state is defined as a secondary target port
 797                 * asymmetric access state.
 798                 */
 799                if (!(valid & ALUA_O_SUP))
 800                        goto not_supported;
 801                *primary = 0;
 802                break;
 803        case ALUA_ACCESS_STATE_TRANSITION:
 804                /*
 805                 * Transitioning is set internally, and
 806                 * cannot be selected manually.
 807                 */
 808                goto not_supported;
 809        default:
 810                pr_err("Unknown ALUA access state: 0x%02x\n", state);
 811                return TCM_INVALID_PARAMETER_LIST;
 812        }
 813
 814        return 0;
 815
 816not_supported:
 817        pr_err("ALUA access state %s not supported",
 818               core_alua_dump_state(state));
 819        return TCM_INVALID_PARAMETER_LIST;
 820}
 821
 822static char *core_alua_dump_state(int state)
 823{
 824        switch (state) {
 825        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 826                return "Active/Optimized";
 827        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 828                return "Active/NonOptimized";
 829        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 830                return "LBA Dependent";
 831        case ALUA_ACCESS_STATE_STANDBY:
 832                return "Standby";
 833        case ALUA_ACCESS_STATE_UNAVAILABLE:
 834                return "Unavailable";
 835        case ALUA_ACCESS_STATE_OFFLINE:
 836                return "Offline";
 837        case ALUA_ACCESS_STATE_TRANSITION:
 838                return "Transitioning";
 839        default:
 840                return "Unknown";
 841        }
 842
 843        return NULL;
 844}
 845
 846char *core_alua_dump_status(int status)
 847{
 848        switch (status) {
 849        case ALUA_STATUS_NONE:
 850                return "None";
 851        case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 852                return "Altered by Explicit STPG";
 853        case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 854                return "Altered by Implicit ALUA";
 855        default:
 856                return "Unknown";
 857        }
 858
 859        return NULL;
 860}
 861
 862/*
 863 * Used by fabric modules to determine when we need to delay processing
 864 * for the Active/NonOptimized paths..
 865 */
 866int core_alua_check_nonop_delay(
 867        struct se_cmd *cmd)
 868{
 869        if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 870                return 0;
 871        if (in_interrupt())
 872                return 0;
 873        /*
 874         * The ALUA Active/NonOptimized access state delay can be disabled
 875         * in via configfs with a value of zero
 876         */
 877        if (!cmd->alua_nonop_delay)
 878                return 0;
 879        /*
 880         * struct se_cmd->alua_nonop_delay gets set by a target port group
 881         * defined interval in core_alua_state_nonoptimized()
 882         */
 883        msleep_interruptible(cmd->alua_nonop_delay);
 884        return 0;
 885}
 886EXPORT_SYMBOL(core_alua_check_nonop_delay);
 887
 888static int core_alua_write_tpg_metadata(
 889        const char *path,
 890        unsigned char *md_buf,
 891        u32 md_buf_len)
 892{
 893        struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 894        int ret;
 895
 896        if (IS_ERR(file)) {
 897                pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 898                return -ENODEV;
 899        }
 900        ret = kernel_write(file, md_buf, md_buf_len, 0);
 901        if (ret < 0)
 902                pr_err("Error writing ALUA metadata file: %s\n", path);
 903        fput(file);
 904        return (ret < 0) ? -EIO : 0;
 905}
 906
 907/*
 908 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
 909 */
 910static int core_alua_update_tpg_primary_metadata(
 911        struct t10_alua_tg_pt_gp *tg_pt_gp)
 912{
 913        unsigned char *md_buf;
 914        struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 915        char path[ALUA_METADATA_PATH_LEN];
 916        int len, rc;
 917
 918        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 919        if (!md_buf) {
 920                pr_err("Unable to allocate buf for ALUA metadata\n");
 921                return -ENOMEM;
 922        }
 923
 924        memset(path, 0, ALUA_METADATA_PATH_LEN);
 925
 926        len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 927                        "tg_pt_gp_id=%hu\n"
 928                        "alua_access_state=0x%02x\n"
 929                        "alua_access_status=0x%02x\n",
 930                        tg_pt_gp->tg_pt_gp_id,
 931                        tg_pt_gp->tg_pt_gp_alua_pending_state,
 932                        tg_pt_gp->tg_pt_gp_alua_access_status);
 933
 934        snprintf(path, ALUA_METADATA_PATH_LEN,
 935                "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
 936                config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 937
 938        rc = core_alua_write_tpg_metadata(path, md_buf, len);
 939        kfree(md_buf);
 940        return rc;
 941}
 942
 943static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 944{
 945        struct se_dev_entry *se_deve;
 946        struct se_lun *lun;
 947        struct se_lun_acl *lacl;
 948
 949        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 950        list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 951                                lun_tg_pt_gp_link) {
 952                /*
 953                 * After an implicit target port asymmetric access state
 954                 * change, a device server shall establish a unit attention
 955                 * condition for the initiator port associated with every I_T
 956                 * nexus with the additional sense code set to ASYMMETRIC
 957                 * ACCESS STATE CHANGED.
 958                 *
 959                 * After an explicit target port asymmetric access state
 960                 * change, a device server shall establish a unit attention
 961                 * condition with the additional sense code set to ASYMMETRIC
 962                 * ACCESS STATE CHANGED for the initiator port associated with
 963                 * every I_T nexus other than the I_T nexus on which the SET
 964                 * TARGET PORT GROUPS command
 965                 */
 966                if (!percpu_ref_tryget_live(&lun->lun_ref))
 967                        continue;
 968                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 969
 970                spin_lock(&lun->lun_deve_lock);
 971                list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
 972                        lacl = rcu_dereference_check(se_deve->se_lun_acl,
 973                                        lockdep_is_held(&lun->lun_deve_lock));
 974
 975                        /*
 976                         * spc4r37 p.242:
 977                         * After an explicit target port asymmetric access
 978                         * state change, a device server shall establish a
 979                         * unit attention condition with the additional sense
 980                         * code set to ASYMMETRIC ACCESS STATE CHANGED for
 981                         * the initiator port associated with every I_T nexus
 982                         * other than the I_T nexus on which the SET TARGET
 983                         * PORT GROUPS command was received.
 984                         */
 985                        if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
 986                             ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
 987                           (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
 988                            (tg_pt_gp->tg_pt_gp_alua_lun == lun))
 989                                continue;
 990
 991                        /*
 992                         * se_deve->se_lun_acl pointer may be NULL for a
 993                         * entry created without explicit Node+MappedLUN ACLs
 994                         */
 995                        if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
 996                            (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
 997                                continue;
 998
 999                        core_scsi3_ua_allocate(se_deve, 0x2A,
1000                                ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1001                }
1002                spin_unlock(&lun->lun_deve_lock);
1003
1004                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1005                percpu_ref_put(&lun->lun_ref);
1006        }
1007        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1008}
1009
1010static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1011{
1012        struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1013                struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
1014        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1015        bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1016                         ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
1017
1018        /*
1019         * Update the ALUA metadata buf that has been allocated in
1020         * core_alua_do_port_transition(), this metadata will be written
1021         * to struct file.
1022         *
1023         * Note that there is the case where we do not want to update the
1024         * metadata when the saved metadata is being parsed in userspace
1025         * when setting the existing port access state and access status.
1026         *
1027         * Also note that the failure to write out the ALUA metadata to
1028         * struct file does NOT affect the actual ALUA transition.
1029         */
1030        if (tg_pt_gp->tg_pt_gp_write_metadata) {
1031                mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1032                core_alua_update_tpg_primary_metadata(tg_pt_gp);
1033                mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1034        }
1035        /*
1036         * Set the current primary ALUA access state to the requested new state
1037         */
1038        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1039                   tg_pt_gp->tg_pt_gp_alua_pending_state);
1040
1041        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1042                " from primary access state %s to %s\n", (explicit) ? "explicit" :
1043                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1044                tg_pt_gp->tg_pt_gp_id,
1045                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1046                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1047
1048        core_alua_queue_state_change_ua(tg_pt_gp);
1049
1050        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1051        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1052        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1053
1054        if (tg_pt_gp->tg_pt_gp_transition_complete)
1055                complete(tg_pt_gp->tg_pt_gp_transition_complete);
1056}
1057
1058static int core_alua_do_transition_tg_pt(
1059        struct t10_alua_tg_pt_gp *tg_pt_gp,
1060        int new_state,
1061        int explicit)
1062{
1063        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1064        DECLARE_COMPLETION_ONSTACK(wait);
1065
1066        /* Nothing to be done here */
1067        if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1068                return 0;
1069
1070        if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1071                return -EAGAIN;
1072
1073        /*
1074         * Flush any pending transitions
1075         */
1076        if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1077            atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1078            ALUA_ACCESS_STATE_TRANSITION) {
1079                /* Just in case */
1080                tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1081                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1082                flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1083                wait_for_completion(&wait);
1084                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1085                return 0;
1086        }
1087
1088        /*
1089         * Save the old primary ALUA access state, and set the current state
1090         * to ALUA_ACCESS_STATE_TRANSITION.
1091         */
1092        tg_pt_gp->tg_pt_gp_alua_previous_state =
1093                atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1094        tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1095
1096        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1097                        ALUA_ACCESS_STATE_TRANSITION);
1098        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1099                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1100                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1101
1102        core_alua_queue_state_change_ua(tg_pt_gp);
1103
1104        /*
1105         * Check for the optional ALUA primary state transition delay
1106         */
1107        if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1108                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1109
1110        /*
1111         * Take a reference for workqueue item
1112         */
1113        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1114        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1115        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1116
1117        if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1118                unsigned long transition_tmo;
1119
1120                transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1121                queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1122                                   &tg_pt_gp->tg_pt_gp_transition_work,
1123                                   transition_tmo);
1124        } else {
1125                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1126                queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1127                                   &tg_pt_gp->tg_pt_gp_transition_work, 0);
1128                wait_for_completion(&wait);
1129                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1130        }
1131
1132        return 0;
1133}
1134
1135int core_alua_do_port_transition(
1136        struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1137        struct se_device *l_dev,
1138        struct se_lun *l_lun,
1139        struct se_node_acl *l_nacl,
1140        int new_state,
1141        int explicit)
1142{
1143        struct se_device *dev;
1144        struct t10_alua_lu_gp *lu_gp;
1145        struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1146        struct t10_alua_tg_pt_gp *tg_pt_gp;
1147        int primary, valid_states, rc = 0;
1148
1149        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1150        if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1151                return -EINVAL;
1152
1153        local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1154        spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1155        lu_gp = local_lu_gp_mem->lu_gp;
1156        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1157        spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1158        /*
1159         * For storage objects that are members of the 'default_lu_gp',
1160         * we only do transition on the passed *l_tp_pt_gp, and not
1161         * on all of the matching target port groups IDs in default_lu_gp.
1162         */
1163        if (!lu_gp->lu_gp_id) {
1164                /*
1165                 * core_alua_do_transition_tg_pt() will always return
1166                 * success.
1167                 */
1168                l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1169                l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1170                rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1171                                                   new_state, explicit);
1172                atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1173                return rc;
1174        }
1175        /*
1176         * For all other LU groups aside from 'default_lu_gp', walk all of
1177         * the associated storage objects looking for a matching target port
1178         * group ID from the local target port group.
1179         */
1180        spin_lock(&lu_gp->lu_gp_lock);
1181        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1182                                lu_gp_mem_list) {
1183
1184                dev = lu_gp_mem->lu_gp_mem_dev;
1185                atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1186                spin_unlock(&lu_gp->lu_gp_lock);
1187
1188                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1189                list_for_each_entry(tg_pt_gp,
1190                                &dev->t10_alua.tg_pt_gps_list,
1191                                tg_pt_gp_list) {
1192
1193                        if (!tg_pt_gp->tg_pt_gp_valid_id)
1194                                continue;
1195                        /*
1196                         * If the target behavior port asymmetric access state
1197                         * is changed for any target port group accessible via
1198                         * a logical unit within a LU group, the target port
1199                         * behavior group asymmetric access states for the same
1200                         * target port group accessible via other logical units
1201                         * in that LU group will also change.
1202                         */
1203                        if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1204                                continue;
1205
1206                        if (l_tg_pt_gp == tg_pt_gp) {
1207                                tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1208                                tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1209                        } else {
1210                                tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1211                                tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1212                        }
1213                        atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1214                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1215                        /*
1216                         * core_alua_do_transition_tg_pt() will always return
1217                         * success.
1218                         */
1219                        rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1220                                        new_state, explicit);
1221
1222                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1223                        atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1224                        if (rc)
1225                                break;
1226                }
1227                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1228
1229                spin_lock(&lu_gp->lu_gp_lock);
1230                atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1231        }
1232        spin_unlock(&lu_gp->lu_gp_lock);
1233
1234        if (!rc) {
1235                pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1236                         " Group IDs: %hu %s transition to primary state: %s\n",
1237                         config_item_name(&lu_gp->lu_gp_group.cg_item),
1238                         l_tg_pt_gp->tg_pt_gp_id,
1239                         (explicit) ? "explicit" : "implicit",
1240                         core_alua_dump_state(new_state));
1241        }
1242
1243        atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1244        return rc;
1245}
1246
1247static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1248{
1249        struct se_portal_group *se_tpg = lun->lun_tpg;
1250        unsigned char *md_buf;
1251        char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1252        int len, rc;
1253
1254        mutex_lock(&lun->lun_tg_pt_md_mutex);
1255
1256        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1257        if (!md_buf) {
1258                pr_err("Unable to allocate buf for ALUA metadata\n");
1259                rc = -ENOMEM;
1260                goto out_unlock;
1261        }
1262
1263        memset(path, 0, ALUA_METADATA_PATH_LEN);
1264        memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1265
1266        len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1267                        se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1268
1269        if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1270                snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1271                                se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1272
1273        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1274                        "alua_tg_pt_status=0x%02x\n",
1275                        atomic_read(&lun->lun_tg_pt_secondary_offline),
1276                        lun->lun_tg_pt_secondary_stat);
1277
1278        snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu",
1279                        se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1280                        lun->unpacked_lun);
1281
1282        rc = core_alua_write_tpg_metadata(path, md_buf, len);
1283        kfree(md_buf);
1284
1285out_unlock:
1286        mutex_unlock(&lun->lun_tg_pt_md_mutex);
1287        return rc;
1288}
1289
1290static int core_alua_set_tg_pt_secondary_state(
1291        struct se_lun *lun,
1292        int explicit,
1293        int offline)
1294{
1295        struct t10_alua_tg_pt_gp *tg_pt_gp;
1296        int trans_delay_msecs;
1297
1298        spin_lock(&lun->lun_tg_pt_gp_lock);
1299        tg_pt_gp = lun->lun_tg_pt_gp;
1300        if (!tg_pt_gp) {
1301                spin_unlock(&lun->lun_tg_pt_gp_lock);
1302                pr_err("Unable to complete secondary state"
1303                                " transition\n");
1304                return -EINVAL;
1305        }
1306        trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1307        /*
1308         * Set the secondary ALUA target port access state to OFFLINE
1309         * or release the previously secondary state for struct se_lun
1310         */
1311        if (offline)
1312                atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1313        else
1314                atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1315
1316        lun->lun_tg_pt_secondary_stat = (explicit) ?
1317                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1318                        ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1319
1320        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1321                " to secondary access state: %s\n", (explicit) ? "explicit" :
1322                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1323                tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1324
1325        spin_unlock(&lun->lun_tg_pt_gp_lock);
1326        /*
1327         * Do the optional transition delay after we set the secondary
1328         * ALUA access state.
1329         */
1330        if (trans_delay_msecs != 0)
1331                msleep_interruptible(trans_delay_msecs);
1332        /*
1333         * See if we need to update the ALUA fabric port metadata for
1334         * secondary state and status
1335         */
1336        if (lun->lun_tg_pt_secondary_write_md)
1337                core_alua_update_tpg_secondary_metadata(lun);
1338
1339        return 0;
1340}
1341
1342struct t10_alua_lba_map *
1343core_alua_allocate_lba_map(struct list_head *list,
1344                           u64 first_lba, u64 last_lba)
1345{
1346        struct t10_alua_lba_map *lba_map;
1347
1348        lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1349        if (!lba_map) {
1350                pr_err("Unable to allocate struct t10_alua_lba_map\n");
1351                return ERR_PTR(-ENOMEM);
1352        }
1353        INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1354        lba_map->lba_map_first_lba = first_lba;
1355        lba_map->lba_map_last_lba = last_lba;
1356
1357        list_add_tail(&lba_map->lba_map_list, list);
1358        return lba_map;
1359}
1360
1361int
1362core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1363                               int pg_id, int state)
1364{
1365        struct t10_alua_lba_map_member *lba_map_mem;
1366
1367        list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1368                            lba_map_mem_list) {
1369                if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1370                        pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1371                        return -EINVAL;
1372                }
1373        }
1374
1375        lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1376        if (!lba_map_mem) {
1377                pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1378                return -ENOMEM;
1379        }
1380        lba_map_mem->lba_map_mem_alua_state = state;
1381        lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1382
1383        list_add_tail(&lba_map_mem->lba_map_mem_list,
1384                      &lba_map->lba_map_mem_list);
1385        return 0;
1386}
1387
1388void
1389core_alua_free_lba_map(struct list_head *lba_list)
1390{
1391        struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1392        struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1393
1394        list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1395                                 lba_map_list) {
1396                list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1397                                         &lba_map->lba_map_mem_list,
1398                                         lba_map_mem_list) {
1399                        list_del(&lba_map_mem->lba_map_mem_list);
1400                        kmem_cache_free(t10_alua_lba_map_mem_cache,
1401                                        lba_map_mem);
1402                }
1403                list_del(&lba_map->lba_map_list);
1404                kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1405        }
1406}
1407
1408void
1409core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1410                      int segment_size, int segment_mult)
1411{
1412        struct list_head old_lba_map_list;
1413        struct t10_alua_tg_pt_gp *tg_pt_gp;
1414        int activate = 0, supported;
1415
1416        INIT_LIST_HEAD(&old_lba_map_list);
1417        spin_lock(&dev->t10_alua.lba_map_lock);
1418        dev->t10_alua.lba_map_segment_size = segment_size;
1419        dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1420        list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1421        if (lba_map_list) {
1422                list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1423                activate = 1;
1424        }
1425        spin_unlock(&dev->t10_alua.lba_map_lock);
1426        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1427        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1428                            tg_pt_gp_list) {
1429
1430                if (!tg_pt_gp->tg_pt_gp_valid_id)
1431                        continue;
1432                supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1433                if (activate)
1434                        supported |= ALUA_LBD_SUP;
1435                else
1436                        supported &= ~ALUA_LBD_SUP;
1437                tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1438        }
1439        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1440        core_alua_free_lba_map(&old_lba_map_list);
1441}
1442
1443struct t10_alua_lu_gp *
1444core_alua_allocate_lu_gp(const char *name, int def_group)
1445{
1446        struct t10_alua_lu_gp *lu_gp;
1447
1448        lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1449        if (!lu_gp) {
1450                pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1451                return ERR_PTR(-ENOMEM);
1452        }
1453        INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1454        INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1455        spin_lock_init(&lu_gp->lu_gp_lock);
1456        atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1457
1458        if (def_group) {
1459                lu_gp->lu_gp_id = alua_lu_gps_counter++;
1460                lu_gp->lu_gp_valid_id = 1;
1461                alua_lu_gps_count++;
1462        }
1463
1464        return lu_gp;
1465}
1466
1467int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1468{
1469        struct t10_alua_lu_gp *lu_gp_tmp;
1470        u16 lu_gp_id_tmp;
1471        /*
1472         * The lu_gp->lu_gp_id may only be set once..
1473         */
1474        if (lu_gp->lu_gp_valid_id) {
1475                pr_warn("ALUA LU Group already has a valid ID,"
1476                        " ignoring request\n");
1477                return -EINVAL;
1478        }
1479
1480        spin_lock(&lu_gps_lock);
1481        if (alua_lu_gps_count == 0x0000ffff) {
1482                pr_err("Maximum ALUA alua_lu_gps_count:"
1483                                " 0x0000ffff reached\n");
1484                spin_unlock(&lu_gps_lock);
1485                kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1486                return -ENOSPC;
1487        }
1488again:
1489        lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1490                                alua_lu_gps_counter++;
1491
1492        list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1493                if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1494                        if (!lu_gp_id)
1495                                goto again;
1496
1497                        pr_warn("ALUA Logical Unit Group ID: %hu"
1498                                " already exists, ignoring request\n",
1499                                lu_gp_id);
1500                        spin_unlock(&lu_gps_lock);
1501                        return -EINVAL;
1502                }
1503        }
1504
1505        lu_gp->lu_gp_id = lu_gp_id_tmp;
1506        lu_gp->lu_gp_valid_id = 1;
1507        list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1508        alua_lu_gps_count++;
1509        spin_unlock(&lu_gps_lock);
1510
1511        return 0;
1512}
1513
1514static struct t10_alua_lu_gp_member *
1515core_alua_allocate_lu_gp_mem(struct se_device *dev)
1516{
1517        struct t10_alua_lu_gp_member *lu_gp_mem;
1518
1519        lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1520        if (!lu_gp_mem) {
1521                pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1522                return ERR_PTR(-ENOMEM);
1523        }
1524        INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1525        spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1526        atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1527
1528        lu_gp_mem->lu_gp_mem_dev = dev;
1529        dev->dev_alua_lu_gp_mem = lu_gp_mem;
1530
1531        return lu_gp_mem;
1532}
1533
1534void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1535{
1536        struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1537        /*
1538         * Once we have reached this point, config_item_put() has
1539         * already been called from target_core_alua_drop_lu_gp().
1540         *
1541         * Here, we remove the *lu_gp from the global list so that
1542         * no associations can be made while we are releasing
1543         * struct t10_alua_lu_gp.
1544         */
1545        spin_lock(&lu_gps_lock);
1546        list_del(&lu_gp->lu_gp_node);
1547        alua_lu_gps_count--;
1548        spin_unlock(&lu_gps_lock);
1549        /*
1550         * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1551         * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1552         * released with core_alua_put_lu_gp_from_name()
1553         */
1554        while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1555                cpu_relax();
1556        /*
1557         * Release reference to struct t10_alua_lu_gp * from all associated
1558         * struct se_device.
1559         */
1560        spin_lock(&lu_gp->lu_gp_lock);
1561        list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1562                                &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1563                if (lu_gp_mem->lu_gp_assoc) {
1564                        list_del(&lu_gp_mem->lu_gp_mem_list);
1565                        lu_gp->lu_gp_members--;
1566                        lu_gp_mem->lu_gp_assoc = 0;
1567                }
1568                spin_unlock(&lu_gp->lu_gp_lock);
1569                /*
1570                 *
1571                 * lu_gp_mem is associated with a single
1572                 * struct se_device->dev_alua_lu_gp_mem, and is released when
1573                 * struct se_device is released via core_alua_free_lu_gp_mem().
1574                 *
1575                 * If the passed lu_gp does NOT match the default_lu_gp, assume
1576                 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1577                 */
1578                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1579                if (lu_gp != default_lu_gp)
1580                        __core_alua_attach_lu_gp_mem(lu_gp_mem,
1581                                        default_lu_gp);
1582                else
1583                        lu_gp_mem->lu_gp = NULL;
1584                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1585
1586                spin_lock(&lu_gp->lu_gp_lock);
1587        }
1588        spin_unlock(&lu_gp->lu_gp_lock);
1589
1590        kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1591}
1592
1593void core_alua_free_lu_gp_mem(struct se_device *dev)
1594{
1595        struct t10_alua_lu_gp *lu_gp;
1596        struct t10_alua_lu_gp_member *lu_gp_mem;
1597
1598        lu_gp_mem = dev->dev_alua_lu_gp_mem;
1599        if (!lu_gp_mem)
1600                return;
1601
1602        while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1603                cpu_relax();
1604
1605        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1606        lu_gp = lu_gp_mem->lu_gp;
1607        if (lu_gp) {
1608                spin_lock(&lu_gp->lu_gp_lock);
1609                if (lu_gp_mem->lu_gp_assoc) {
1610                        list_del(&lu_gp_mem->lu_gp_mem_list);
1611                        lu_gp->lu_gp_members--;
1612                        lu_gp_mem->lu_gp_assoc = 0;
1613                }
1614                spin_unlock(&lu_gp->lu_gp_lock);
1615                lu_gp_mem->lu_gp = NULL;
1616        }
1617        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1618
1619        kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1620}
1621
1622struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1623{
1624        struct t10_alua_lu_gp *lu_gp;
1625        struct config_item *ci;
1626
1627        spin_lock(&lu_gps_lock);
1628        list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1629                if (!lu_gp->lu_gp_valid_id)
1630                        continue;
1631                ci = &lu_gp->lu_gp_group.cg_item;
1632                if (!strcmp(config_item_name(ci), name)) {
1633                        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1634                        spin_unlock(&lu_gps_lock);
1635                        return lu_gp;
1636                }
1637        }
1638        spin_unlock(&lu_gps_lock);
1639
1640        return NULL;
1641}
1642
1643void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1644{
1645        spin_lock(&lu_gps_lock);
1646        atomic_dec(&lu_gp->lu_gp_ref_cnt);
1647        spin_unlock(&lu_gps_lock);
1648}
1649
1650/*
1651 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1652 */
1653void __core_alua_attach_lu_gp_mem(
1654        struct t10_alua_lu_gp_member *lu_gp_mem,
1655        struct t10_alua_lu_gp *lu_gp)
1656{
1657        spin_lock(&lu_gp->lu_gp_lock);
1658        lu_gp_mem->lu_gp = lu_gp;
1659        lu_gp_mem->lu_gp_assoc = 1;
1660        list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1661        lu_gp->lu_gp_members++;
1662        spin_unlock(&lu_gp->lu_gp_lock);
1663}
1664
1665/*
1666 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1667 */
1668void __core_alua_drop_lu_gp_mem(
1669        struct t10_alua_lu_gp_member *lu_gp_mem,
1670        struct t10_alua_lu_gp *lu_gp)
1671{
1672        spin_lock(&lu_gp->lu_gp_lock);
1673        list_del(&lu_gp_mem->lu_gp_mem_list);
1674        lu_gp_mem->lu_gp = NULL;
1675        lu_gp_mem->lu_gp_assoc = 0;
1676        lu_gp->lu_gp_members--;
1677        spin_unlock(&lu_gp->lu_gp_lock);
1678}
1679
1680struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1681                const char *name, int def_group)
1682{
1683        struct t10_alua_tg_pt_gp *tg_pt_gp;
1684
1685        tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1686        if (!tg_pt_gp) {
1687                pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1688                return NULL;
1689        }
1690        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1691        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1692        mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1693        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1694        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1695        INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1696                          core_alua_do_transition_tg_pt_work);
1697        tg_pt_gp->tg_pt_gp_dev = dev;
1698        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1699                ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1700        /*
1701         * Enable both explicit and implicit ALUA support by default
1702         */
1703        tg_pt_gp->tg_pt_gp_alua_access_type =
1704                        TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1705        /*
1706         * Set the default Active/NonOptimized Delay in milliseconds
1707         */
1708        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1709        tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1710        tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1711
1712        /*
1713         * Enable all supported states
1714         */
1715        tg_pt_gp->tg_pt_gp_alua_supported_states =
1716            ALUA_T_SUP | ALUA_O_SUP |
1717            ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1718
1719        if (def_group) {
1720                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1721                tg_pt_gp->tg_pt_gp_id =
1722                                dev->t10_alua.alua_tg_pt_gps_counter++;
1723                tg_pt_gp->tg_pt_gp_valid_id = 1;
1724                dev->t10_alua.alua_tg_pt_gps_count++;
1725                list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1726                              &dev->t10_alua.tg_pt_gps_list);
1727                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1728        }
1729
1730        return tg_pt_gp;
1731}
1732
1733int core_alua_set_tg_pt_gp_id(
1734        struct t10_alua_tg_pt_gp *tg_pt_gp,
1735        u16 tg_pt_gp_id)
1736{
1737        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1738        struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1739        u16 tg_pt_gp_id_tmp;
1740
1741        /*
1742         * The tg_pt_gp->tg_pt_gp_id may only be set once..
1743         */
1744        if (tg_pt_gp->tg_pt_gp_valid_id) {
1745                pr_warn("ALUA TG PT Group already has a valid ID,"
1746                        " ignoring request\n");
1747                return -EINVAL;
1748        }
1749
1750        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1751        if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1752                pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1753                        " 0x0000ffff reached\n");
1754                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1755                kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1756                return -ENOSPC;
1757        }
1758again:
1759        tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1760                        dev->t10_alua.alua_tg_pt_gps_counter++;
1761
1762        list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1763                        tg_pt_gp_list) {
1764                if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1765                        if (!tg_pt_gp_id)
1766                                goto again;
1767
1768                        pr_err("ALUA Target Port Group ID: %hu already"
1769                                " exists, ignoring request\n", tg_pt_gp_id);
1770                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1771                        return -EINVAL;
1772                }
1773        }
1774
1775        tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1776        tg_pt_gp->tg_pt_gp_valid_id = 1;
1777        list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1778                        &dev->t10_alua.tg_pt_gps_list);
1779        dev->t10_alua.alua_tg_pt_gps_count++;
1780        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1781
1782        return 0;
1783}
1784
1785void core_alua_free_tg_pt_gp(
1786        struct t10_alua_tg_pt_gp *tg_pt_gp)
1787{
1788        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1789        struct se_lun *lun, *next;
1790
1791        /*
1792         * Once we have reached this point, config_item_put() has already
1793         * been called from target_core_alua_drop_tg_pt_gp().
1794         *
1795         * Here we remove *tg_pt_gp from the global list so that
1796         * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1797         * can be made while we are releasing struct t10_alua_tg_pt_gp.
1798         */
1799        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1800        list_del(&tg_pt_gp->tg_pt_gp_list);
1801        dev->t10_alua.alua_tg_pt_gps_counter--;
1802        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1803
1804        flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1805
1806        /*
1807         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1808         * core_alua_get_tg_pt_gp_by_name() in
1809         * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1810         * to be released with core_alua_put_tg_pt_gp_from_name().
1811         */
1812        while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1813                cpu_relax();
1814
1815        /*
1816         * Release reference to struct t10_alua_tg_pt_gp from all associated
1817         * struct se_port.
1818         */
1819        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1820        list_for_each_entry_safe(lun, next,
1821                        &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1822                list_del_init(&lun->lun_tg_pt_gp_link);
1823                tg_pt_gp->tg_pt_gp_members--;
1824
1825                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1826                /*
1827                 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1828                 * assume we want to re-associate a given tg_pt_gp_mem with
1829                 * default_tg_pt_gp.
1830                 */
1831                spin_lock(&lun->lun_tg_pt_gp_lock);
1832                if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1833                        __target_attach_tg_pt_gp(lun,
1834                                        dev->t10_alua.default_tg_pt_gp);
1835                } else
1836                        lun->lun_tg_pt_gp = NULL;
1837                spin_unlock(&lun->lun_tg_pt_gp_lock);
1838
1839                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1840        }
1841        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1842
1843        kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1844}
1845
1846static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1847                struct se_device *dev, const char *name)
1848{
1849        struct t10_alua_tg_pt_gp *tg_pt_gp;
1850        struct config_item *ci;
1851
1852        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1853        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1854                        tg_pt_gp_list) {
1855                if (!tg_pt_gp->tg_pt_gp_valid_id)
1856                        continue;
1857                ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1858                if (!strcmp(config_item_name(ci), name)) {
1859                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1860                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1861                        return tg_pt_gp;
1862                }
1863        }
1864        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1865
1866        return NULL;
1867}
1868
1869static void core_alua_put_tg_pt_gp_from_name(
1870        struct t10_alua_tg_pt_gp *tg_pt_gp)
1871{
1872        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1873
1874        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1875        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1876        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1877}
1878
1879static void __target_attach_tg_pt_gp(struct se_lun *lun,
1880                struct t10_alua_tg_pt_gp *tg_pt_gp)
1881{
1882        struct se_dev_entry *se_deve;
1883
1884        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1885
1886        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1887        lun->lun_tg_pt_gp = tg_pt_gp;
1888        list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1889        tg_pt_gp->tg_pt_gp_members++;
1890        spin_lock(&lun->lun_deve_lock);
1891        list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1892                core_scsi3_ua_allocate(se_deve, 0x3f,
1893                                       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1894        spin_unlock(&lun->lun_deve_lock);
1895        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1896}
1897
1898void target_attach_tg_pt_gp(struct se_lun *lun,
1899                struct t10_alua_tg_pt_gp *tg_pt_gp)
1900{
1901        spin_lock(&lun->lun_tg_pt_gp_lock);
1902        __target_attach_tg_pt_gp(lun, tg_pt_gp);
1903        spin_unlock(&lun->lun_tg_pt_gp_lock);
1904}
1905
1906static void __target_detach_tg_pt_gp(struct se_lun *lun,
1907                struct t10_alua_tg_pt_gp *tg_pt_gp)
1908{
1909        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1910
1911        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1912        list_del_init(&lun->lun_tg_pt_gp_link);
1913        tg_pt_gp->tg_pt_gp_members--;
1914        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1915
1916        lun->lun_tg_pt_gp = NULL;
1917}
1918
1919void target_detach_tg_pt_gp(struct se_lun *lun)
1920{
1921        struct t10_alua_tg_pt_gp *tg_pt_gp;
1922
1923        spin_lock(&lun->lun_tg_pt_gp_lock);
1924        tg_pt_gp = lun->lun_tg_pt_gp;
1925        if (tg_pt_gp)
1926                __target_detach_tg_pt_gp(lun, tg_pt_gp);
1927        spin_unlock(&lun->lun_tg_pt_gp_lock);
1928}
1929
1930ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1931{
1932        struct config_item *tg_pt_ci;
1933        struct t10_alua_tg_pt_gp *tg_pt_gp;
1934        ssize_t len = 0;
1935
1936        spin_lock(&lun->lun_tg_pt_gp_lock);
1937        tg_pt_gp = lun->lun_tg_pt_gp;
1938        if (tg_pt_gp) {
1939                tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1940                len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1941                        " %hu\nTG Port Primary Access State: %s\nTG Port "
1942                        "Primary Access Status: %s\nTG Port Secondary Access"
1943                        " State: %s\nTG Port Secondary Access Status: %s\n",
1944                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1945                        core_alua_dump_state(atomic_read(
1946                                        &tg_pt_gp->tg_pt_gp_alua_access_state)),
1947                        core_alua_dump_status(
1948                                tg_pt_gp->tg_pt_gp_alua_access_status),
1949                        atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1950                        "Offline" : "None",
1951                        core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1952        }
1953        spin_unlock(&lun->lun_tg_pt_gp_lock);
1954
1955        return len;
1956}
1957
1958ssize_t core_alua_store_tg_pt_gp_info(
1959        struct se_lun *lun,
1960        const char *page,
1961        size_t count)
1962{
1963        struct se_portal_group *tpg = lun->lun_tpg;
1964        /*
1965         * rcu_dereference_raw protected by se_lun->lun_group symlink
1966         * reference to se_device->dev_group.
1967         */
1968        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1969        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1970        unsigned char buf[TG_PT_GROUP_NAME_BUF];
1971        int move = 0;
1972
1973        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
1974            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1975                return -ENODEV;
1976
1977        if (count > TG_PT_GROUP_NAME_BUF) {
1978                pr_err("ALUA Target Port Group alias too large!\n");
1979                return -EINVAL;
1980        }
1981        memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1982        memcpy(buf, page, count);
1983        /*
1984         * Any ALUA target port group alias besides "NULL" means we will be
1985         * making a new group association.
1986         */
1987        if (strcmp(strstrip(buf), "NULL")) {
1988                /*
1989                 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1990                 * struct t10_alua_tg_pt_gp.  This reference is released with
1991                 * core_alua_put_tg_pt_gp_from_name() below.
1992                 */
1993                tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1994                                        strstrip(buf));
1995                if (!tg_pt_gp_new)
1996                        return -ENODEV;
1997        }
1998
1999        spin_lock(&lun->lun_tg_pt_gp_lock);
2000        tg_pt_gp = lun->lun_tg_pt_gp;
2001        if (tg_pt_gp) {
2002                /*
2003                 * Clearing an existing tg_pt_gp association, and replacing
2004                 * with the default_tg_pt_gp.
2005                 */
2006                if (!tg_pt_gp_new) {
2007                        pr_debug("Target_Core_ConfigFS: Moving"
2008                                " %s/tpgt_%hu/%s from ALUA Target Port Group:"
2009                                " alua/%s, ID: %hu back to"
2010                                " default_tg_pt_gp\n",
2011                                tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2012                                tpg->se_tpg_tfo->tpg_get_tag(tpg),
2013                                config_item_name(&lun->lun_group.cg_item),
2014                                config_item_name(
2015                                        &tg_pt_gp->tg_pt_gp_group.cg_item),
2016                                tg_pt_gp->tg_pt_gp_id);
2017
2018                        __target_detach_tg_pt_gp(lun, tg_pt_gp);
2019                        __target_attach_tg_pt_gp(lun,
2020                                        dev->t10_alua.default_tg_pt_gp);
2021                        spin_unlock(&lun->lun_tg_pt_gp_lock);
2022
2023                        return count;
2024                }
2025                __target_detach_tg_pt_gp(lun, tg_pt_gp);
2026                move = 1;
2027        }
2028
2029        __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
2030        spin_unlock(&lun->lun_tg_pt_gp_lock);
2031        pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2032                " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2033                "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2034                tpg->se_tpg_tfo->tpg_get_tag(tpg),
2035                config_item_name(&lun->lun_group.cg_item),
2036                config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2037                tg_pt_gp_new->tg_pt_gp_id);
2038
2039        core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2040        return count;
2041}
2042
2043ssize_t core_alua_show_access_type(
2044        struct t10_alua_tg_pt_gp *tg_pt_gp,
2045        char *page)
2046{
2047        if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2048            (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2049                return sprintf(page, "Implicit and Explicit\n");
2050        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2051                return sprintf(page, "Implicit\n");
2052        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2053                return sprintf(page, "Explicit\n");
2054        else
2055                return sprintf(page, "None\n");
2056}
2057
2058ssize_t core_alua_store_access_type(
2059        struct t10_alua_tg_pt_gp *tg_pt_gp,
2060        const char *page,
2061        size_t count)
2062{
2063        unsigned long tmp;
2064        int ret;
2065
2066        ret = kstrtoul(page, 0, &tmp);
2067        if (ret < 0) {
2068                pr_err("Unable to extract alua_access_type\n");
2069                return ret;
2070        }
2071        if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2072                pr_err("Illegal value for alua_access_type:"
2073                                " %lu\n", tmp);
2074                return -EINVAL;
2075        }
2076        if (tmp == 3)
2077                tg_pt_gp->tg_pt_gp_alua_access_type =
2078                        TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2079        else if (tmp == 2)
2080                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2081        else if (tmp == 1)
2082                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2083        else
2084                tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2085
2086        return count;
2087}
2088
2089ssize_t core_alua_show_nonop_delay_msecs(
2090        struct t10_alua_tg_pt_gp *tg_pt_gp,
2091        char *page)
2092{
2093        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2094}
2095
2096ssize_t core_alua_store_nonop_delay_msecs(
2097        struct t10_alua_tg_pt_gp *tg_pt_gp,
2098        const char *page,
2099        size_t count)
2100{
2101        unsigned long tmp;
2102        int ret;
2103
2104        ret = kstrtoul(page, 0, &tmp);
2105        if (ret < 0) {
2106                pr_err("Unable to extract nonop_delay_msecs\n");
2107                return ret;
2108        }
2109        if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2110                pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2111                        " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2112                        ALUA_MAX_NONOP_DELAY_MSECS);
2113                return -EINVAL;
2114        }
2115        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2116
2117        return count;
2118}
2119
2120ssize_t core_alua_show_trans_delay_msecs(
2121        struct t10_alua_tg_pt_gp *tg_pt_gp,
2122        char *page)
2123{
2124        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2125}
2126
2127ssize_t core_alua_store_trans_delay_msecs(
2128        struct t10_alua_tg_pt_gp *tg_pt_gp,
2129        const char *page,
2130        size_t count)
2131{
2132        unsigned long tmp;
2133        int ret;
2134
2135        ret = kstrtoul(page, 0, &tmp);
2136        if (ret < 0) {
2137                pr_err("Unable to extract trans_delay_msecs\n");
2138                return ret;
2139        }
2140        if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2141                pr_err("Passed trans_delay_msecs: %lu, exceeds"
2142                        " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2143                        ALUA_MAX_TRANS_DELAY_MSECS);
2144                return -EINVAL;
2145        }
2146        tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2147
2148        return count;
2149}
2150
2151ssize_t core_alua_show_implicit_trans_secs(
2152        struct t10_alua_tg_pt_gp *tg_pt_gp,
2153        char *page)
2154{
2155        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2156}
2157
2158ssize_t core_alua_store_implicit_trans_secs(
2159        struct t10_alua_tg_pt_gp *tg_pt_gp,
2160        const char *page,
2161        size_t count)
2162{
2163        unsigned long tmp;
2164        int ret;
2165
2166        ret = kstrtoul(page, 0, &tmp);
2167        if (ret < 0) {
2168                pr_err("Unable to extract implicit_trans_secs\n");
2169                return ret;
2170        }
2171        if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2172                pr_err("Passed implicit_trans_secs: %lu, exceeds"
2173                        " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2174                        ALUA_MAX_IMPLICIT_TRANS_SECS);
2175                return  -EINVAL;
2176        }
2177        tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2178
2179        return count;
2180}
2181
2182ssize_t core_alua_show_preferred_bit(
2183        struct t10_alua_tg_pt_gp *tg_pt_gp,
2184        char *page)
2185{
2186        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2187}
2188
2189ssize_t core_alua_store_preferred_bit(
2190        struct t10_alua_tg_pt_gp *tg_pt_gp,
2191        const char *page,
2192        size_t count)
2193{
2194        unsigned long tmp;
2195        int ret;
2196
2197        ret = kstrtoul(page, 0, &tmp);
2198        if (ret < 0) {
2199                pr_err("Unable to extract preferred ALUA value\n");
2200                return ret;
2201        }
2202        if ((tmp != 0) && (tmp != 1)) {
2203                pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2204                return -EINVAL;
2205        }
2206        tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2207
2208        return count;
2209}
2210
2211ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2212{
2213        return sprintf(page, "%d\n",
2214                atomic_read(&lun->lun_tg_pt_secondary_offline));
2215}
2216
2217ssize_t core_alua_store_offline_bit(
2218        struct se_lun *lun,
2219        const char *page,
2220        size_t count)
2221{
2222        /*
2223         * rcu_dereference_raw protected by se_lun->lun_group symlink
2224         * reference to se_device->dev_group.
2225         */
2226        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2227        unsigned long tmp;
2228        int ret;
2229
2230        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
2231            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2232                return -ENODEV;
2233
2234        ret = kstrtoul(page, 0, &tmp);
2235        if (ret < 0) {
2236                pr_err("Unable to extract alua_tg_pt_offline value\n");
2237                return ret;
2238        }
2239        if ((tmp != 0) && (tmp != 1)) {
2240                pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2241                                tmp);
2242                return -EINVAL;
2243        }
2244
2245        ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2246        if (ret < 0)
2247                return -EINVAL;
2248
2249        return count;
2250}
2251
2252ssize_t core_alua_show_secondary_status(
2253        struct se_lun *lun,
2254        char *page)
2255{
2256        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2257}
2258
2259ssize_t core_alua_store_secondary_status(
2260        struct se_lun *lun,
2261        const char *page,
2262        size_t count)
2263{
2264        unsigned long tmp;
2265        int ret;
2266
2267        ret = kstrtoul(page, 0, &tmp);
2268        if (ret < 0) {
2269                pr_err("Unable to extract alua_tg_pt_status\n");
2270                return ret;
2271        }
2272        if ((tmp != ALUA_STATUS_NONE) &&
2273            (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2274            (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2275                pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2276                                tmp);
2277                return -EINVAL;
2278        }
2279        lun->lun_tg_pt_secondary_stat = (int)tmp;
2280
2281        return count;
2282}
2283
2284ssize_t core_alua_show_secondary_write_metadata(
2285        struct se_lun *lun,
2286        char *page)
2287{
2288        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2289}
2290
2291ssize_t core_alua_store_secondary_write_metadata(
2292        struct se_lun *lun,
2293        const char *page,
2294        size_t count)
2295{
2296        unsigned long tmp;
2297        int ret;
2298
2299        ret = kstrtoul(page, 0, &tmp);
2300        if (ret < 0) {
2301                pr_err("Unable to extract alua_tg_pt_write_md\n");
2302                return ret;
2303        }
2304        if ((tmp != 0) && (tmp != 1)) {
2305                pr_err("Illegal value for alua_tg_pt_write_md:"
2306                                " %lu\n", tmp);
2307                return -EINVAL;
2308        }
2309        lun->lun_tg_pt_secondary_write_md = (int)tmp;
2310
2311        return count;
2312}
2313
2314int core_setup_alua(struct se_device *dev)
2315{
2316        if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
2317            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2318                struct t10_alua_lu_gp_member *lu_gp_mem;
2319
2320                /*
2321                 * Associate this struct se_device with the default ALUA
2322                 * LUN Group.
2323                 */
2324                lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2325                if (IS_ERR(lu_gp_mem))
2326                        return PTR_ERR(lu_gp_mem);
2327
2328                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2329                __core_alua_attach_lu_gp_mem(lu_gp_mem,
2330                                default_lu_gp);
2331                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2332
2333                pr_debug("%s: Adding to default ALUA LU Group:"
2334                        " core/alua/lu_gps/default_lu_gp\n",
2335                        dev->transport->name);
2336        }
2337
2338        return 0;
2339}
2340