linux/drivers/target/target_core_alua.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_alua.c
   3 *
   4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   5 *
   6 * (c) Copyright 2009-2013 Datera, Inc.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/configfs.h>
  29#include <linux/export.h>
  30#include <linux/file.h>
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_cmnd.h>
  33#include <asm/unaligned.h>
  34
  35#include <target/target_core_base.h>
  36#include <target/target_core_backend.h>
  37#include <target/target_core_fabric.h>
  38#include <target/target_core_configfs.h>
  39
  40#include "target_core_internal.h"
  41#include "target_core_alua.h"
  42#include "target_core_ua.h"
  43
  44static sense_reason_t core_alua_check_transition(int state, int valid,
  45                                                 int *primary);
  46static int core_alua_set_tg_pt_secondary_state(
  47                struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
  48                struct se_port *port, int explicit, int offline);
  49
  50static char *core_alua_dump_state(int state);
  51
  52static u16 alua_lu_gps_counter;
  53static u32 alua_lu_gps_count;
  54
  55static DEFINE_SPINLOCK(lu_gps_lock);
  56static LIST_HEAD(lu_gps_list);
  57
  58struct t10_alua_lu_gp *default_lu_gp;
  59
  60/*
  61 * REPORT REFERRALS
  62 *
  63 * See sbc3r35 section 5.23
  64 */
  65sense_reason_t
  66target_emulate_report_referrals(struct se_cmd *cmd)
  67{
  68        struct se_device *dev = cmd->se_dev;
  69        struct t10_alua_lba_map *map;
  70        struct t10_alua_lba_map_member *map_mem;
  71        unsigned char *buf;
  72        u32 rd_len = 0, off;
  73
  74        if (cmd->data_length < 4) {
  75                pr_warn("REPORT REFERRALS allocation length %u too"
  76                        " small\n", cmd->data_length);
  77                return TCM_INVALID_CDB_FIELD;
  78        }
  79
  80        buf = transport_kmap_data_sg(cmd);
  81        if (!buf)
  82                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  83
  84        off = 4;
  85        spin_lock(&dev->t10_alua.lba_map_lock);
  86        if (list_empty(&dev->t10_alua.lba_map_list)) {
  87                spin_unlock(&dev->t10_alua.lba_map_lock);
  88                transport_kunmap_data_sg(cmd);
  89
  90                return TCM_UNSUPPORTED_SCSI_OPCODE;
  91        }
  92
  93        list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  94                            lba_map_list) {
  95                int desc_num = off + 3;
  96                int pg_num;
  97
  98                off += 4;
  99                if (cmd->data_length > off)
 100                        put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
 101                off += 8;
 102                if (cmd->data_length > off)
 103                        put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
 104                off += 8;
 105                rd_len += 20;
 106                pg_num = 0;
 107                list_for_each_entry(map_mem, &map->lba_map_mem_list,
 108                                    lba_map_mem_list) {
 109                        int alua_state = map_mem->lba_map_mem_alua_state;
 110                        int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 111
 112                        if (cmd->data_length > off)
 113                                buf[off] = alua_state & 0x0f;
 114                        off += 2;
 115                        if (cmd->data_length > off)
 116                                buf[off] = (alua_pg_id >> 8) & 0xff;
 117                        off++;
 118                        if (cmd->data_length > off)
 119                                buf[off] = (alua_pg_id & 0xff);
 120                        off++;
 121                        rd_len += 4;
 122                        pg_num++;
 123                }
 124                if (cmd->data_length > desc_num)
 125                        buf[desc_num] = pg_num;
 126        }
 127        spin_unlock(&dev->t10_alua.lba_map_lock);
 128
 129        /*
 130         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 131         */
 132        put_unaligned_be16(rd_len, &buf[2]);
 133
 134        transport_kunmap_data_sg(cmd);
 135
 136        target_complete_cmd(cmd, GOOD);
 137        return 0;
 138}
 139
 140/*
 141 * REPORT_TARGET_PORT_GROUPS
 142 *
 143 * See spc4r17 section 6.27
 144 */
 145sense_reason_t
 146target_emulate_report_target_port_groups(struct se_cmd *cmd)
 147{
 148        struct se_device *dev = cmd->se_dev;
 149        struct se_port *port;
 150        struct t10_alua_tg_pt_gp *tg_pt_gp;
 151        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 152        unsigned char *buf;
 153        u32 rd_len = 0, off;
 154        int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 155
 156        /*
 157         * Skip over RESERVED area to first Target port group descriptor
 158         * depending on the PARAMETER DATA FORMAT type..
 159         */
 160        if (ext_hdr != 0)
 161                off = 8;
 162        else
 163                off = 4;
 164
 165        if (cmd->data_length < off) {
 166                pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 167                        " small for %s header\n", cmd->data_length,
 168                        (ext_hdr) ? "extended" : "normal");
 169                return TCM_INVALID_CDB_FIELD;
 170        }
 171        buf = transport_kmap_data_sg(cmd);
 172        if (!buf)
 173                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 174
 175        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 176        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 177                        tg_pt_gp_list) {
 178                /*
 179                 * Check if the Target port group and Target port descriptor list
 180                 * based on tg_pt_gp_members count will fit into the response payload.
 181                 * Otherwise, bump rd_len to let the initiator know we have exceeded
 182                 * the allocation length and the response is truncated.
 183                 */
 184                if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 185                     cmd->data_length) {
 186                        rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 187                        continue;
 188                }
 189                /*
 190                 * PREF: Preferred target port bit, determine if this
 191                 * bit should be set for port group.
 192                 */
 193                if (tg_pt_gp->tg_pt_gp_pref)
 194                        buf[off] = 0x80;
 195                /*
 196                 * Set the ASYMMETRIC ACCESS State
 197                 */
 198                buf[off++] |= (atomic_read(
 199                        &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
 200                /*
 201                 * Set supported ASYMMETRIC ACCESS State bits
 202                 */
 203                buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 204                /*
 205                 * TARGET PORT GROUP
 206                 */
 207                buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
 208                buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
 209
 210                off++; /* Skip over Reserved */
 211                /*
 212                 * STATUS CODE
 213                 */
 214                buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 215                /*
 216                 * Vendor Specific field
 217                 */
 218                buf[off++] = 0x00;
 219                /*
 220                 * TARGET PORT COUNT
 221                 */
 222                buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 223                rd_len += 8;
 224
 225                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 226                list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
 227                                tg_pt_gp_mem_list) {
 228                        port = tg_pt_gp_mem->tg_pt;
 229                        /*
 230                         * Start Target Port descriptor format
 231                         *
 232                         * See spc4r17 section 6.2.7 Table 247
 233                         */
 234                        off += 2; /* Skip over Obsolete */
 235                        /*
 236                         * Set RELATIVE TARGET PORT IDENTIFIER
 237                         */
 238                        buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
 239                        buf[off++] = (port->sep_rtpi & 0xff);
 240                        rd_len += 4;
 241                }
 242                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 243        }
 244        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 245        /*
 246         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 247         */
 248        put_unaligned_be32(rd_len, &buf[0]);
 249
 250        /*
 251         * Fill in the Extended header parameter data format if requested
 252         */
 253        if (ext_hdr != 0) {
 254                buf[4] = 0x10;
 255                /*
 256                 * Set the implicit transition time (in seconds) for the application
 257                 * client to use as a base for it's transition timeout value.
 258                 *
 259                 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 260                 * this CDB was received upon to determine this value individually
 261                 * for ALUA target port group.
 262                 */
 263                port = cmd->se_lun->lun_sep;
 264                tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
 265                if (tg_pt_gp_mem) {
 266                        spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 267                        tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
 268                        if (tg_pt_gp)
 269                                buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 270                        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 271                }
 272        }
 273        transport_kunmap_data_sg(cmd);
 274
 275        target_complete_cmd(cmd, GOOD);
 276        return 0;
 277}
 278
 279/*
 280 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 281 *
 282 * See spc4r17 section 6.35
 283 */
 284sense_reason_t
 285target_emulate_set_target_port_groups(struct se_cmd *cmd)
 286{
 287        struct se_device *dev = cmd->se_dev;
 288        struct se_port *port, *l_port = cmd->se_lun->lun_sep;
 289        struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 290        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 291        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
 292        unsigned char *buf;
 293        unsigned char *ptr;
 294        sense_reason_t rc = TCM_NO_SENSE;
 295        u32 len = 4; /* Skip over RESERVED area in header */
 296        int alua_access_state, primary = 0, valid_states;
 297        u16 tg_pt_id, rtpi;
 298
 299        if (!l_port)
 300                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 301
 302        if (cmd->data_length < 4) {
 303                pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 304                        " small\n", cmd->data_length);
 305                return TCM_INVALID_PARAMETER_LIST;
 306        }
 307
 308        buf = transport_kmap_data_sg(cmd);
 309        if (!buf)
 310                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 311
 312        /*
 313         * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 314         * for the local tg_pt_gp.
 315         */
 316        l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
 317        if (!l_tg_pt_gp_mem) {
 318                pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
 319                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 320                goto out;
 321        }
 322        spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 323        l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
 324        if (!l_tg_pt_gp) {
 325                spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 326                pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
 327                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 328                goto out;
 329        }
 330        spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 331
 332        if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 333                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 334                                " while TPGS_EXPLICIT_ALUA is disabled\n");
 335                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 336                goto out;
 337        }
 338        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 339
 340        ptr = &buf[4]; /* Skip over RESERVED area in header */
 341
 342        while (len < cmd->data_length) {
 343                bool found = false;
 344                alua_access_state = (ptr[0] & 0x0f);
 345                /*
 346                 * Check the received ALUA access state, and determine if
 347                 * the state is a primary or secondary target port asymmetric
 348                 * access state.
 349                 */
 350                rc = core_alua_check_transition(alua_access_state,
 351                                                valid_states, &primary);
 352                if (rc) {
 353                        /*
 354                         * If the SET TARGET PORT GROUPS attempts to establish
 355                         * an invalid combination of target port asymmetric
 356                         * access states or attempts to establish an
 357                         * unsupported target port asymmetric access state,
 358                         * then the command shall be terminated with CHECK
 359                         * CONDITION status, with the sense key set to ILLEGAL
 360                         * REQUEST, and the additional sense code set to INVALID
 361                         * FIELD IN PARAMETER LIST.
 362                         */
 363                        goto out;
 364                }
 365
 366                /*
 367                 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 368                 * specifies a primary target port asymmetric access state,
 369                 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 370                 * a primary target port group for which the primary target
 371                 * port asymmetric access state shall be changed. If the
 372                 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 373                 * port asymmetric access state, then the TARGET PORT GROUP OR
 374                 * TARGET PORT field specifies the relative target port
 375                 * identifier (see 3.1.120) of the target port for which the
 376                 * secondary target port asymmetric access state shall be
 377                 * changed.
 378                 */
 379                if (primary) {
 380                        tg_pt_id = get_unaligned_be16(ptr + 2);
 381                        /*
 382                         * Locate the matching target port group ID from
 383                         * the global tg_pt_gp list
 384                         */
 385                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 386                        list_for_each_entry(tg_pt_gp,
 387                                        &dev->t10_alua.tg_pt_gps_list,
 388                                        tg_pt_gp_list) {
 389                                if (!tg_pt_gp->tg_pt_gp_valid_id)
 390                                        continue;
 391
 392                                if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 393                                        continue;
 394
 395                                atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
 396                                smp_mb__after_atomic();
 397
 398                                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 399
 400                                if (!core_alua_do_port_transition(tg_pt_gp,
 401                                                dev, l_port, nacl,
 402                                                alua_access_state, 1))
 403                                        found = true;
 404
 405                                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 406                                atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
 407                                smp_mb__after_atomic();
 408                                break;
 409                        }
 410                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 411                } else {
 412                        /*
 413                         * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 414                         * the Target Port in question for the the incoming
 415                         * SET_TARGET_PORT_GROUPS op.
 416                         */
 417                        rtpi = get_unaligned_be16(ptr + 2);
 418                        /*
 419                         * Locate the matching relative target port identifier
 420                         * for the struct se_device storage object.
 421                         */
 422                        spin_lock(&dev->se_port_lock);
 423                        list_for_each_entry(port, &dev->dev_sep_list,
 424                                                        sep_list) {
 425                                if (port->sep_rtpi != rtpi)
 426                                        continue;
 427
 428                                tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
 429
 430                                spin_unlock(&dev->se_port_lock);
 431
 432                                if (!core_alua_set_tg_pt_secondary_state(
 433                                                tg_pt_gp_mem, port, 1, 1))
 434                                        found = true;
 435
 436                                spin_lock(&dev->se_port_lock);
 437                                break;
 438                        }
 439                        spin_unlock(&dev->se_port_lock);
 440                }
 441
 442                if (!found) {
 443                        rc = TCM_INVALID_PARAMETER_LIST;
 444                        goto out;
 445                }
 446
 447                ptr += 4;
 448                len += 4;
 449        }
 450
 451out:
 452        transport_kunmap_data_sg(cmd);
 453        if (!rc)
 454                target_complete_cmd(cmd, GOOD);
 455        return rc;
 456}
 457
 458static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
 459{
 460        /*
 461         * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
 462         * The ALUA additional sense code qualifier (ASCQ) is determined
 463         * by the ALUA primary or secondary access state..
 464         */
 465        pr_debug("[%s]: ALUA TG Port not available, "
 466                "SenseKey: NOT_READY, ASC/ASCQ: "
 467                "0x04/0x%02x\n",
 468                cmd->se_tfo->get_fabric_name(), alua_ascq);
 469
 470        cmd->scsi_asc = 0x04;
 471        cmd->scsi_ascq = alua_ascq;
 472}
 473
 474static inline void core_alua_state_nonoptimized(
 475        struct se_cmd *cmd,
 476        unsigned char *cdb,
 477        int nonop_delay_msecs)
 478{
 479        /*
 480         * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 481         * later to determine if processing of this cmd needs to be
 482         * temporarily delayed for the Active/NonOptimized primary access state.
 483         */
 484        cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 485        cmd->alua_nonop_delay = nonop_delay_msecs;
 486}
 487
 488static inline int core_alua_state_lba_dependent(
 489        struct se_cmd *cmd,
 490        struct t10_alua_tg_pt_gp *tg_pt_gp)
 491{
 492        struct se_device *dev = cmd->se_dev;
 493        u64 segment_size, segment_mult, sectors, lba;
 494
 495        /* Only need to check for cdb actually containing LBAs */
 496        if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 497                return 0;
 498
 499        spin_lock(&dev->t10_alua.lba_map_lock);
 500        segment_size = dev->t10_alua.lba_map_segment_size;
 501        segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 502        sectors = cmd->data_length / dev->dev_attrib.block_size;
 503
 504        lba = cmd->t_task_lba;
 505        while (lba < cmd->t_task_lba + sectors) {
 506                struct t10_alua_lba_map *cur_map = NULL, *map;
 507                struct t10_alua_lba_map_member *map_mem;
 508
 509                list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 510                                    lba_map_list) {
 511                        u64 start_lba, last_lba;
 512                        u64 first_lba = map->lba_map_first_lba;
 513
 514                        if (segment_mult) {
 515                                u64 tmp = lba;
 516                                start_lba = do_div(tmp, segment_size * segment_mult);
 517
 518                                last_lba = first_lba + segment_size - 1;
 519                                if (start_lba >= first_lba &&
 520                                    start_lba <= last_lba) {
 521                                        lba += segment_size;
 522                                        cur_map = map;
 523                                        break;
 524                                }
 525                        } else {
 526                                last_lba = map->lba_map_last_lba;
 527                                if (lba >= first_lba && lba <= last_lba) {
 528                                        lba = last_lba + 1;
 529                                        cur_map = map;
 530                                        break;
 531                                }
 532                        }
 533                }
 534                if (!cur_map) {
 535                        spin_unlock(&dev->t10_alua.lba_map_lock);
 536                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 537                        return 1;
 538                }
 539                list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 540                                    lba_map_mem_list) {
 541                        if (map_mem->lba_map_mem_alua_pg_id !=
 542                            tg_pt_gp->tg_pt_gp_id)
 543                                continue;
 544                        switch(map_mem->lba_map_mem_alua_state) {
 545                        case ALUA_ACCESS_STATE_STANDBY:
 546                                spin_unlock(&dev->t10_alua.lba_map_lock);
 547                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 548                                return 1;
 549                        case ALUA_ACCESS_STATE_UNAVAILABLE:
 550                                spin_unlock(&dev->t10_alua.lba_map_lock);
 551                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 552                                return 1;
 553                        default:
 554                                break;
 555                        }
 556                }
 557        }
 558        spin_unlock(&dev->t10_alua.lba_map_lock);
 559        return 0;
 560}
 561
 562static inline int core_alua_state_standby(
 563        struct se_cmd *cmd,
 564        unsigned char *cdb)
 565{
 566        /*
 567         * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 568         * spc4r17 section 5.9.2.4.4
 569         */
 570        switch (cdb[0]) {
 571        case INQUIRY:
 572        case LOG_SELECT:
 573        case LOG_SENSE:
 574        case MODE_SELECT:
 575        case MODE_SENSE:
 576        case REPORT_LUNS:
 577        case RECEIVE_DIAGNOSTIC:
 578        case SEND_DIAGNOSTIC:
 579        case READ_CAPACITY:
 580                return 0;
 581        case SERVICE_ACTION_IN:
 582                switch (cdb[1] & 0x1f) {
 583                case SAI_READ_CAPACITY_16:
 584                        return 0;
 585                default:
 586                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 587                        return 1;
 588                }
 589        case MAINTENANCE_IN:
 590                switch (cdb[1] & 0x1f) {
 591                case MI_REPORT_TARGET_PGS:
 592                        return 0;
 593                default:
 594                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 595                        return 1;
 596                }
 597        case MAINTENANCE_OUT:
 598                switch (cdb[1]) {
 599                case MO_SET_TARGET_PGS:
 600                        return 0;
 601                default:
 602                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 603                        return 1;
 604                }
 605        case REQUEST_SENSE:
 606        case PERSISTENT_RESERVE_IN:
 607        case PERSISTENT_RESERVE_OUT:
 608        case READ_BUFFER:
 609        case WRITE_BUFFER:
 610                return 0;
 611        default:
 612                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 613                return 1;
 614        }
 615
 616        return 0;
 617}
 618
 619static inline int core_alua_state_unavailable(
 620        struct se_cmd *cmd,
 621        unsigned char *cdb)
 622{
 623        /*
 624         * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 625         * spc4r17 section 5.9.2.4.5
 626         */
 627        switch (cdb[0]) {
 628        case INQUIRY:
 629        case REPORT_LUNS:
 630                return 0;
 631        case MAINTENANCE_IN:
 632                switch (cdb[1] & 0x1f) {
 633                case MI_REPORT_TARGET_PGS:
 634                        return 0;
 635                default:
 636                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 637                        return 1;
 638                }
 639        case MAINTENANCE_OUT:
 640                switch (cdb[1]) {
 641                case MO_SET_TARGET_PGS:
 642                        return 0;
 643                default:
 644                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 645                        return 1;
 646                }
 647        case REQUEST_SENSE:
 648        case READ_BUFFER:
 649        case WRITE_BUFFER:
 650                return 0;
 651        default:
 652                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 653                return 1;
 654        }
 655
 656        return 0;
 657}
 658
 659static inline int core_alua_state_transition(
 660        struct se_cmd *cmd,
 661        unsigned char *cdb)
 662{
 663        /*
 664         * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 665         * spc4r17 section 5.9.2.5
 666         */
 667        switch (cdb[0]) {
 668        case INQUIRY:
 669        case REPORT_LUNS:
 670                return 0;
 671        case MAINTENANCE_IN:
 672                switch (cdb[1] & 0x1f) {
 673                case MI_REPORT_TARGET_PGS:
 674                        return 0;
 675                default:
 676                        set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 677                        return 1;
 678                }
 679        case REQUEST_SENSE:
 680        case READ_BUFFER:
 681        case WRITE_BUFFER:
 682                return 0;
 683        default:
 684                set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 685                return 1;
 686        }
 687
 688        return 0;
 689}
 690
 691/*
 692 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 693 * return 0: Used to signal success
 694 * return -1: Used to signal failure, and invalid cdb field
 695 */
 696sense_reason_t
 697target_alua_state_check(struct se_cmd *cmd)
 698{
 699        struct se_device *dev = cmd->se_dev;
 700        unsigned char *cdb = cmd->t_task_cdb;
 701        struct se_lun *lun = cmd->se_lun;
 702        struct se_port *port = lun->lun_sep;
 703        struct t10_alua_tg_pt_gp *tg_pt_gp;
 704        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 705        int out_alua_state, nonop_delay_msecs;
 706
 707        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 708                return 0;
 709        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
 710                return 0;
 711
 712        if (!port)
 713                return 0;
 714        /*
 715         * First, check for a struct se_port specific secondary ALUA target port
 716         * access state: OFFLINE
 717         */
 718        if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
 719                pr_debug("ALUA: Got secondary offline status for local"
 720                                " target port\n");
 721                set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
 722                return TCM_CHECK_CONDITION_NOT_READY;
 723        }
 724         /*
 725         * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
 726         * ALUA target port group, to obtain current ALUA access state.
 727         * Otherwise look for the underlying struct se_device association with
 728         * a ALUA logical unit group.
 729         */
 730        tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
 731        if (!tg_pt_gp_mem)
 732                return 0;
 733
 734        spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 735        tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
 736        out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
 737        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 738        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 739        /*
 740         * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 741         * statement so the compiler knows explicitly to check this case first.
 742         * For the Optimized ALUA access state case, we want to process the
 743         * incoming fabric cmd ASAP..
 744         */
 745        if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 746                return 0;
 747
 748        switch (out_alua_state) {
 749        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 750                core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 751                break;
 752        case ALUA_ACCESS_STATE_STANDBY:
 753                if (core_alua_state_standby(cmd, cdb))
 754                        return TCM_CHECK_CONDITION_NOT_READY;
 755                break;
 756        case ALUA_ACCESS_STATE_UNAVAILABLE:
 757                if (core_alua_state_unavailable(cmd, cdb))
 758                        return TCM_CHECK_CONDITION_NOT_READY;
 759                break;
 760        case ALUA_ACCESS_STATE_TRANSITION:
 761                if (core_alua_state_transition(cmd, cdb))
 762                        return TCM_CHECK_CONDITION_NOT_READY;
 763                break;
 764        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 765                if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
 766                        return TCM_CHECK_CONDITION_NOT_READY;
 767                break;
 768        /*
 769         * OFFLINE is a secondary ALUA target port group access state, that is
 770         * handled above with struct se_port->sep_tg_pt_secondary_offline=1
 771         */
 772        case ALUA_ACCESS_STATE_OFFLINE:
 773        default:
 774                pr_err("Unknown ALUA access state: 0x%02x\n",
 775                                out_alua_state);
 776                return TCM_INVALID_CDB_FIELD;
 777        }
 778
 779        return 0;
 780}
 781
 782/*
 783 * Check implicit and explicit ALUA state change request.
 784 */
 785static sense_reason_t
 786core_alua_check_transition(int state, int valid, int *primary)
 787{
 788        /*
 789         * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 790         * defined as primary target port asymmetric access states.
 791         */
 792        switch (state) {
 793        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 794                if (!(valid & ALUA_AO_SUP))
 795                        goto not_supported;
 796                *primary = 1;
 797                break;
 798        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 799                if (!(valid & ALUA_AN_SUP))
 800                        goto not_supported;
 801                *primary = 1;
 802                break;
 803        case ALUA_ACCESS_STATE_STANDBY:
 804                if (!(valid & ALUA_S_SUP))
 805                        goto not_supported;
 806                *primary = 1;
 807                break;
 808        case ALUA_ACCESS_STATE_UNAVAILABLE:
 809                if (!(valid & ALUA_U_SUP))
 810                        goto not_supported;
 811                *primary = 1;
 812                break;
 813        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 814                if (!(valid & ALUA_LBD_SUP))
 815                        goto not_supported;
 816                *primary = 1;
 817                break;
 818        case ALUA_ACCESS_STATE_OFFLINE:
 819                /*
 820                 * OFFLINE state is defined as a secondary target port
 821                 * asymmetric access state.
 822                 */
 823                if (!(valid & ALUA_O_SUP))
 824                        goto not_supported;
 825                *primary = 0;
 826                break;
 827        case ALUA_ACCESS_STATE_TRANSITION:
 828                /*
 829                 * Transitioning is set internally, and
 830                 * cannot be selected manually.
 831                 */
 832                goto not_supported;
 833        default:
 834                pr_err("Unknown ALUA access state: 0x%02x\n", state);
 835                return TCM_INVALID_PARAMETER_LIST;
 836        }
 837
 838        return 0;
 839
 840not_supported:
 841        pr_err("ALUA access state %s not supported",
 842               core_alua_dump_state(state));
 843        return TCM_INVALID_PARAMETER_LIST;
 844}
 845
 846static char *core_alua_dump_state(int state)
 847{
 848        switch (state) {
 849        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 850                return "Active/Optimized";
 851        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 852                return "Active/NonOptimized";
 853        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 854                return "LBA Dependent";
 855        case ALUA_ACCESS_STATE_STANDBY:
 856                return "Standby";
 857        case ALUA_ACCESS_STATE_UNAVAILABLE:
 858                return "Unavailable";
 859        case ALUA_ACCESS_STATE_OFFLINE:
 860                return "Offline";
 861        case ALUA_ACCESS_STATE_TRANSITION:
 862                return "Transitioning";
 863        default:
 864                return "Unknown";
 865        }
 866
 867        return NULL;
 868}
 869
 870char *core_alua_dump_status(int status)
 871{
 872        switch (status) {
 873        case ALUA_STATUS_NONE:
 874                return "None";
 875        case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 876                return "Altered by Explicit STPG";
 877        case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 878                return "Altered by Implicit ALUA";
 879        default:
 880                return "Unknown";
 881        }
 882
 883        return NULL;
 884}
 885
 886/*
 887 * Used by fabric modules to determine when we need to delay processing
 888 * for the Active/NonOptimized paths..
 889 */
 890int core_alua_check_nonop_delay(
 891        struct se_cmd *cmd)
 892{
 893        if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 894                return 0;
 895        if (in_interrupt())
 896                return 0;
 897        /*
 898         * The ALUA Active/NonOptimized access state delay can be disabled
 899         * in via configfs with a value of zero
 900         */
 901        if (!cmd->alua_nonop_delay)
 902                return 0;
 903        /*
 904         * struct se_cmd->alua_nonop_delay gets set by a target port group
 905         * defined interval in core_alua_state_nonoptimized()
 906         */
 907        msleep_interruptible(cmd->alua_nonop_delay);
 908        return 0;
 909}
 910EXPORT_SYMBOL(core_alua_check_nonop_delay);
 911
 912/*
 913 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
 914 *
 915 */
 916static int core_alua_write_tpg_metadata(
 917        const char *path,
 918        unsigned char *md_buf,
 919        u32 md_buf_len)
 920{
 921        struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 922        int ret;
 923
 924        if (IS_ERR(file)) {
 925                pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 926                return -ENODEV;
 927        }
 928        ret = kernel_write(file, md_buf, md_buf_len, 0);
 929        if (ret < 0)
 930                pr_err("Error writing ALUA metadata file: %s\n", path);
 931        fput(file);
 932        return (ret < 0) ? -EIO : 0;
 933}
 934
 935/*
 936 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
 937 */
 938static int core_alua_update_tpg_primary_metadata(
 939        struct t10_alua_tg_pt_gp *tg_pt_gp)
 940{
 941        unsigned char *md_buf;
 942        struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 943        char path[ALUA_METADATA_PATH_LEN];
 944        int len, rc;
 945
 946        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 947        if (!md_buf) {
 948                pr_err("Unable to allocate buf for ALUA metadata\n");
 949                return -ENOMEM;
 950        }
 951
 952        memset(path, 0, ALUA_METADATA_PATH_LEN);
 953
 954        len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 955                        "tg_pt_gp_id=%hu\n"
 956                        "alua_access_state=0x%02x\n"
 957                        "alua_access_status=0x%02x\n",
 958                        tg_pt_gp->tg_pt_gp_id,
 959                        tg_pt_gp->tg_pt_gp_alua_pending_state,
 960                        tg_pt_gp->tg_pt_gp_alua_access_status);
 961
 962        snprintf(path, ALUA_METADATA_PATH_LEN,
 963                "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
 964                config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 965
 966        rc = core_alua_write_tpg_metadata(path, md_buf, len);
 967        kfree(md_buf);
 968        return rc;
 969}
 970
 971static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
 972{
 973        struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
 974                struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
 975        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
 976        struct se_dev_entry *se_deve;
 977        struct se_lun_acl *lacl;
 978        struct se_port *port;
 979        struct t10_alua_tg_pt_gp_member *mem;
 980        bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
 981                         ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
 982
 983        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 984        list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
 985                                tg_pt_gp_mem_list) {
 986                port = mem->tg_pt;
 987                /*
 988                 * After an implicit target port asymmetric access state
 989                 * change, a device server shall establish a unit attention
 990                 * condition for the initiator port associated with every I_T
 991                 * nexus with the additional sense code set to ASYMMETRIC
 992                 * ACCESS STATE CHANGED.
 993                 *
 994                 * After an explicit target port asymmetric access state
 995                 * change, a device server shall establish a unit attention
 996                 * condition with the additional sense code set to ASYMMETRIC
 997                 * ACCESS STATE CHANGED for the initiator port associated with
 998                 * every I_T nexus other than the I_T nexus on which the SET
 999                 * TARGET PORT GROUPS command
1000                 */
1001                atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
1002                smp_mb__after_atomic();
1003                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1004
1005                spin_lock_bh(&port->sep_alua_lock);
1006                list_for_each_entry(se_deve, &port->sep_alua_list,
1007                                        alua_port_list) {
1008                        lacl = se_deve->se_lun_acl;
1009                        /*
1010                         * se_deve->se_lun_acl pointer may be NULL for a
1011                         * entry created without explicit Node+MappedLUN ACLs
1012                         */
1013                        if (!lacl)
1014                                continue;
1015
1016                        if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
1017                             ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
1018                           (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1019                            (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
1020                           (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
1021                            (tg_pt_gp->tg_pt_gp_alua_port == port))
1022                                continue;
1023
1024                        core_scsi3_ua_allocate(lacl->se_lun_nacl,
1025                                se_deve->mapped_lun, 0x2A,
1026                                ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1027                }
1028                spin_unlock_bh(&port->sep_alua_lock);
1029
1030                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1031                atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
1032                smp_mb__after_atomic();
1033        }
1034        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1035        /*
1036         * Update the ALUA metadata buf that has been allocated in
1037         * core_alua_do_port_transition(), this metadata will be written
1038         * to struct file.
1039         *
1040         * Note that there is the case where we do not want to update the
1041         * metadata when the saved metadata is being parsed in userspace
1042         * when setting the existing port access state and access status.
1043         *
1044         * Also note that the failure to write out the ALUA metadata to
1045         * struct file does NOT affect the actual ALUA transition.
1046         */
1047        if (tg_pt_gp->tg_pt_gp_write_metadata) {
1048                mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1049                core_alua_update_tpg_primary_metadata(tg_pt_gp);
1050                mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1051        }
1052        /*
1053         * Set the current primary ALUA access state to the requested new state
1054         */
1055        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1056                   tg_pt_gp->tg_pt_gp_alua_pending_state);
1057
1058        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1059                " from primary access state %s to %s\n", (explicit) ? "explicit" :
1060                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1061                tg_pt_gp->tg_pt_gp_id,
1062                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1063                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1064        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1065        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1066        smp_mb__after_atomic();
1067        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1068
1069        if (tg_pt_gp->tg_pt_gp_transition_complete)
1070                complete(tg_pt_gp->tg_pt_gp_transition_complete);
1071}
1072
1073static int core_alua_do_transition_tg_pt(
1074        struct t10_alua_tg_pt_gp *tg_pt_gp,
1075        int new_state,
1076        int explicit)
1077{
1078        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1079        DECLARE_COMPLETION_ONSTACK(wait);
1080
1081        /* Nothing to be done here */
1082        if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1083                return 0;
1084
1085        if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1086                return -EAGAIN;
1087
1088        /*
1089         * Flush any pending transitions
1090         */
1091        if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1092            atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1093            ALUA_ACCESS_STATE_TRANSITION) {
1094                /* Just in case */
1095                tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1096                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1097                flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1098                wait_for_completion(&wait);
1099                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1100                return 0;
1101        }
1102
1103        /*
1104         * Save the old primary ALUA access state, and set the current state
1105         * to ALUA_ACCESS_STATE_TRANSITION.
1106         */
1107        tg_pt_gp->tg_pt_gp_alua_previous_state =
1108                atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1109        tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1110
1111        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1112                        ALUA_ACCESS_STATE_TRANSITION);
1113        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1114                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1115                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1116
1117        /*
1118         * Check for the optional ALUA primary state transition delay
1119         */
1120        if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1121                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1122
1123        /*
1124         * Take a reference for workqueue item
1125         */
1126        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1127        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1128        smp_mb__after_atomic();
1129        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1130
1131        if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1132                unsigned long transition_tmo;
1133
1134                transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1135                queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1136                                   &tg_pt_gp->tg_pt_gp_transition_work,
1137                                   transition_tmo);
1138        } else {
1139                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1140                queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1141                                   &tg_pt_gp->tg_pt_gp_transition_work, 0);
1142                wait_for_completion(&wait);
1143                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1144        }
1145
1146        return 0;
1147}
1148
1149int core_alua_do_port_transition(
1150        struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1151        struct se_device *l_dev,
1152        struct se_port *l_port,
1153        struct se_node_acl *l_nacl,
1154        int new_state,
1155        int explicit)
1156{
1157        struct se_device *dev;
1158        struct t10_alua_lu_gp *lu_gp;
1159        struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1160        struct t10_alua_tg_pt_gp *tg_pt_gp;
1161        int primary, valid_states, rc = 0;
1162
1163        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1164        if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1165                return -EINVAL;
1166
1167        local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1168        spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1169        lu_gp = local_lu_gp_mem->lu_gp;
1170        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1171        smp_mb__after_atomic();
1172        spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1173        /*
1174         * For storage objects that are members of the 'default_lu_gp',
1175         * we only do transition on the passed *l_tp_pt_gp, and not
1176         * on all of the matching target port groups IDs in default_lu_gp.
1177         */
1178        if (!lu_gp->lu_gp_id) {
1179                /*
1180                 * core_alua_do_transition_tg_pt() will always return
1181                 * success.
1182                 */
1183                l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
1184                l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1185                rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1186                                                   new_state, explicit);
1187                atomic_dec(&lu_gp->lu_gp_ref_cnt);
1188                smp_mb__after_atomic();
1189                return rc;
1190        }
1191        /*
1192         * For all other LU groups aside from 'default_lu_gp', walk all of
1193         * the associated storage objects looking for a matching target port
1194         * group ID from the local target port group.
1195         */
1196        spin_lock(&lu_gp->lu_gp_lock);
1197        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1198                                lu_gp_mem_list) {
1199
1200                dev = lu_gp_mem->lu_gp_mem_dev;
1201                atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
1202                smp_mb__after_atomic();
1203                spin_unlock(&lu_gp->lu_gp_lock);
1204
1205                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1206                list_for_each_entry(tg_pt_gp,
1207                                &dev->t10_alua.tg_pt_gps_list,
1208                                tg_pt_gp_list) {
1209
1210                        if (!tg_pt_gp->tg_pt_gp_valid_id)
1211                                continue;
1212                        /*
1213                         * If the target behavior port asymmetric access state
1214                         * is changed for any target port group accessible via
1215                         * a logical unit within a LU group, the target port
1216                         * behavior group asymmetric access states for the same
1217                         * target port group accessible via other logical units
1218                         * in that LU group will also change.
1219                         */
1220                        if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1221                                continue;
1222
1223                        if (l_tg_pt_gp == tg_pt_gp) {
1224                                tg_pt_gp->tg_pt_gp_alua_port = l_port;
1225                                tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1226                        } else {
1227                                tg_pt_gp->tg_pt_gp_alua_port = NULL;
1228                                tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1229                        }
1230                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1231                        smp_mb__after_atomic();
1232                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1233                        /*
1234                         * core_alua_do_transition_tg_pt() will always return
1235                         * success.
1236                         */
1237                        rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1238                                        new_state, explicit);
1239
1240                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1241                        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1242                        smp_mb__after_atomic();
1243                        if (rc)
1244                                break;
1245                }
1246                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1247
1248                spin_lock(&lu_gp->lu_gp_lock);
1249                atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
1250                smp_mb__after_atomic();
1251        }
1252        spin_unlock(&lu_gp->lu_gp_lock);
1253
1254        if (!rc) {
1255                pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1256                         " Group IDs: %hu %s transition to primary state: %s\n",
1257                         config_item_name(&lu_gp->lu_gp_group.cg_item),
1258                         l_tg_pt_gp->tg_pt_gp_id,
1259                         (explicit) ? "explicit" : "implicit",
1260                         core_alua_dump_state(new_state));
1261        }
1262
1263        atomic_dec(&lu_gp->lu_gp_ref_cnt);
1264        smp_mb__after_atomic();
1265        return rc;
1266}
1267
1268/*
1269 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
1270 */
1271static int core_alua_update_tpg_secondary_metadata(
1272        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1273        struct se_port *port)
1274{
1275        unsigned char *md_buf;
1276        struct se_portal_group *se_tpg = port->sep_tpg;
1277        char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1278        int len, rc;
1279
1280        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1281        if (!md_buf) {
1282                pr_err("Unable to allocate buf for ALUA metadata\n");
1283                return -ENOMEM;
1284        }
1285
1286        memset(path, 0, ALUA_METADATA_PATH_LEN);
1287        memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1288
1289        len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1290                        se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1291
1292        if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1293                snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1294                                se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1295
1296        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1297                        "alua_tg_pt_status=0x%02x\n",
1298                        atomic_read(&port->sep_tg_pt_secondary_offline),
1299                        port->sep_tg_pt_secondary_stat);
1300
1301        snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1302                        se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1303                        port->sep_lun->unpacked_lun);
1304
1305        rc = core_alua_write_tpg_metadata(path, md_buf, len);
1306        kfree(md_buf);
1307
1308        return rc;
1309}
1310
1311static int core_alua_set_tg_pt_secondary_state(
1312        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1313        struct se_port *port,
1314        int explicit,
1315        int offline)
1316{
1317        struct t10_alua_tg_pt_gp *tg_pt_gp;
1318        int trans_delay_msecs;
1319
1320        spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1321        tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1322        if (!tg_pt_gp) {
1323                spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1324                pr_err("Unable to complete secondary state"
1325                                " transition\n");
1326                return -EINVAL;
1327        }
1328        trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1329        /*
1330         * Set the secondary ALUA target port access state to OFFLINE
1331         * or release the previously secondary state for struct se_port
1332         */
1333        if (offline)
1334                atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1335        else
1336                atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1337
1338        port->sep_tg_pt_secondary_stat = (explicit) ?
1339                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1340                        ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1341
1342        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1343                " to secondary access state: %s\n", (explicit) ? "explicit" :
1344                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1345                tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1346
1347        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1348        /*
1349         * Do the optional transition delay after we set the secondary
1350         * ALUA access state.
1351         */
1352        if (trans_delay_msecs != 0)
1353                msleep_interruptible(trans_delay_msecs);
1354        /*
1355         * See if we need to update the ALUA fabric port metadata for
1356         * secondary state and status
1357         */
1358        if (port->sep_tg_pt_secondary_write_md) {
1359                mutex_lock(&port->sep_tg_pt_md_mutex);
1360                core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
1361                mutex_unlock(&port->sep_tg_pt_md_mutex);
1362        }
1363
1364        return 0;
1365}
1366
1367struct t10_alua_lba_map *
1368core_alua_allocate_lba_map(struct list_head *list,
1369                           u64 first_lba, u64 last_lba)
1370{
1371        struct t10_alua_lba_map *lba_map;
1372
1373        lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1374        if (!lba_map) {
1375                pr_err("Unable to allocate struct t10_alua_lba_map\n");
1376                return ERR_PTR(-ENOMEM);
1377        }
1378        INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1379        lba_map->lba_map_first_lba = first_lba;
1380        lba_map->lba_map_last_lba = last_lba;
1381
1382        list_add_tail(&lba_map->lba_map_list, list);
1383        return lba_map;
1384}
1385
1386int
1387core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1388                               int pg_id, int state)
1389{
1390        struct t10_alua_lba_map_member *lba_map_mem;
1391
1392        list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1393                            lba_map_mem_list) {
1394                if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1395                        pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1396                        return -EINVAL;
1397                }
1398        }
1399
1400        lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1401        if (!lba_map_mem) {
1402                pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1403                return -ENOMEM;
1404        }
1405        lba_map_mem->lba_map_mem_alua_state = state;
1406        lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1407
1408        list_add_tail(&lba_map_mem->lba_map_mem_list,
1409                      &lba_map->lba_map_mem_list);
1410        return 0;
1411}
1412
1413void
1414core_alua_free_lba_map(struct list_head *lba_list)
1415{
1416        struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1417        struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1418
1419        list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1420                                 lba_map_list) {
1421                list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1422                                         &lba_map->lba_map_mem_list,
1423                                         lba_map_mem_list) {
1424                        list_del(&lba_map_mem->lba_map_mem_list);
1425                        kmem_cache_free(t10_alua_lba_map_mem_cache,
1426                                        lba_map_mem);
1427                }
1428                list_del(&lba_map->lba_map_list);
1429                kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1430        }
1431}
1432
1433void
1434core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1435                      int segment_size, int segment_mult)
1436{
1437        struct list_head old_lba_map_list;
1438        struct t10_alua_tg_pt_gp *tg_pt_gp;
1439        int activate = 0, supported;
1440
1441        INIT_LIST_HEAD(&old_lba_map_list);
1442        spin_lock(&dev->t10_alua.lba_map_lock);
1443        dev->t10_alua.lba_map_segment_size = segment_size;
1444        dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1445        list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1446        if (lba_map_list) {
1447                list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1448                activate = 1;
1449        }
1450        spin_unlock(&dev->t10_alua.lba_map_lock);
1451        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1452        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1453                            tg_pt_gp_list) {
1454
1455                if (!tg_pt_gp->tg_pt_gp_valid_id)
1456                        continue;
1457                supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1458                if (activate)
1459                        supported |= ALUA_LBD_SUP;
1460                else
1461                        supported &= ~ALUA_LBD_SUP;
1462                tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1463        }
1464        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1465        core_alua_free_lba_map(&old_lba_map_list);
1466}
1467
1468struct t10_alua_lu_gp *
1469core_alua_allocate_lu_gp(const char *name, int def_group)
1470{
1471        struct t10_alua_lu_gp *lu_gp;
1472
1473        lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1474        if (!lu_gp) {
1475                pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1476                return ERR_PTR(-ENOMEM);
1477        }
1478        INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1479        INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1480        spin_lock_init(&lu_gp->lu_gp_lock);
1481        atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1482
1483        if (def_group) {
1484                lu_gp->lu_gp_id = alua_lu_gps_counter++;
1485                lu_gp->lu_gp_valid_id = 1;
1486                alua_lu_gps_count++;
1487        }
1488
1489        return lu_gp;
1490}
1491
1492int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1493{
1494        struct t10_alua_lu_gp *lu_gp_tmp;
1495        u16 lu_gp_id_tmp;
1496        /*
1497         * The lu_gp->lu_gp_id may only be set once..
1498         */
1499        if (lu_gp->lu_gp_valid_id) {
1500                pr_warn("ALUA LU Group already has a valid ID,"
1501                        " ignoring request\n");
1502                return -EINVAL;
1503        }
1504
1505        spin_lock(&lu_gps_lock);
1506        if (alua_lu_gps_count == 0x0000ffff) {
1507                pr_err("Maximum ALUA alua_lu_gps_count:"
1508                                " 0x0000ffff reached\n");
1509                spin_unlock(&lu_gps_lock);
1510                kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1511                return -ENOSPC;
1512        }
1513again:
1514        lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1515                                alua_lu_gps_counter++;
1516
1517        list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1518                if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1519                        if (!lu_gp_id)
1520                                goto again;
1521
1522                        pr_warn("ALUA Logical Unit Group ID: %hu"
1523                                " already exists, ignoring request\n",
1524                                lu_gp_id);
1525                        spin_unlock(&lu_gps_lock);
1526                        return -EINVAL;
1527                }
1528        }
1529
1530        lu_gp->lu_gp_id = lu_gp_id_tmp;
1531        lu_gp->lu_gp_valid_id = 1;
1532        list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1533        alua_lu_gps_count++;
1534        spin_unlock(&lu_gps_lock);
1535
1536        return 0;
1537}
1538
1539static struct t10_alua_lu_gp_member *
1540core_alua_allocate_lu_gp_mem(struct se_device *dev)
1541{
1542        struct t10_alua_lu_gp_member *lu_gp_mem;
1543
1544        lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1545        if (!lu_gp_mem) {
1546                pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1547                return ERR_PTR(-ENOMEM);
1548        }
1549        INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1550        spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1551        atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1552
1553        lu_gp_mem->lu_gp_mem_dev = dev;
1554        dev->dev_alua_lu_gp_mem = lu_gp_mem;
1555
1556        return lu_gp_mem;
1557}
1558
1559void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1560{
1561        struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1562        /*
1563         * Once we have reached this point, config_item_put() has
1564         * already been called from target_core_alua_drop_lu_gp().
1565         *
1566         * Here, we remove the *lu_gp from the global list so that
1567         * no associations can be made while we are releasing
1568         * struct t10_alua_lu_gp.
1569         */
1570        spin_lock(&lu_gps_lock);
1571        list_del(&lu_gp->lu_gp_node);
1572        alua_lu_gps_count--;
1573        spin_unlock(&lu_gps_lock);
1574        /*
1575         * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1576         * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1577         * released with core_alua_put_lu_gp_from_name()
1578         */
1579        while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1580                cpu_relax();
1581        /*
1582         * Release reference to struct t10_alua_lu_gp * from all associated
1583         * struct se_device.
1584         */
1585        spin_lock(&lu_gp->lu_gp_lock);
1586        list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1587                                &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1588                if (lu_gp_mem->lu_gp_assoc) {
1589                        list_del(&lu_gp_mem->lu_gp_mem_list);
1590                        lu_gp->lu_gp_members--;
1591                        lu_gp_mem->lu_gp_assoc = 0;
1592                }
1593                spin_unlock(&lu_gp->lu_gp_lock);
1594                /*
1595                 *
1596                 * lu_gp_mem is associated with a single
1597                 * struct se_device->dev_alua_lu_gp_mem, and is released when
1598                 * struct se_device is released via core_alua_free_lu_gp_mem().
1599                 *
1600                 * If the passed lu_gp does NOT match the default_lu_gp, assume
1601                 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1602                 */
1603                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1604                if (lu_gp != default_lu_gp)
1605                        __core_alua_attach_lu_gp_mem(lu_gp_mem,
1606                                        default_lu_gp);
1607                else
1608                        lu_gp_mem->lu_gp = NULL;
1609                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1610
1611                spin_lock(&lu_gp->lu_gp_lock);
1612        }
1613        spin_unlock(&lu_gp->lu_gp_lock);
1614
1615        kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1616}
1617
1618void core_alua_free_lu_gp_mem(struct se_device *dev)
1619{
1620        struct t10_alua_lu_gp *lu_gp;
1621        struct t10_alua_lu_gp_member *lu_gp_mem;
1622
1623        lu_gp_mem = dev->dev_alua_lu_gp_mem;
1624        if (!lu_gp_mem)
1625                return;
1626
1627        while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1628                cpu_relax();
1629
1630        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1631        lu_gp = lu_gp_mem->lu_gp;
1632        if (lu_gp) {
1633                spin_lock(&lu_gp->lu_gp_lock);
1634                if (lu_gp_mem->lu_gp_assoc) {
1635                        list_del(&lu_gp_mem->lu_gp_mem_list);
1636                        lu_gp->lu_gp_members--;
1637                        lu_gp_mem->lu_gp_assoc = 0;
1638                }
1639                spin_unlock(&lu_gp->lu_gp_lock);
1640                lu_gp_mem->lu_gp = NULL;
1641        }
1642        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1643
1644        kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1645}
1646
1647struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1648{
1649        struct t10_alua_lu_gp *lu_gp;
1650        struct config_item *ci;
1651
1652        spin_lock(&lu_gps_lock);
1653        list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1654                if (!lu_gp->lu_gp_valid_id)
1655                        continue;
1656                ci = &lu_gp->lu_gp_group.cg_item;
1657                if (!strcmp(config_item_name(ci), name)) {
1658                        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1659                        spin_unlock(&lu_gps_lock);
1660                        return lu_gp;
1661                }
1662        }
1663        spin_unlock(&lu_gps_lock);
1664
1665        return NULL;
1666}
1667
1668void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1669{
1670        spin_lock(&lu_gps_lock);
1671        atomic_dec(&lu_gp->lu_gp_ref_cnt);
1672        spin_unlock(&lu_gps_lock);
1673}
1674
1675/*
1676 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1677 */
1678void __core_alua_attach_lu_gp_mem(
1679        struct t10_alua_lu_gp_member *lu_gp_mem,
1680        struct t10_alua_lu_gp *lu_gp)
1681{
1682        spin_lock(&lu_gp->lu_gp_lock);
1683        lu_gp_mem->lu_gp = lu_gp;
1684        lu_gp_mem->lu_gp_assoc = 1;
1685        list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1686        lu_gp->lu_gp_members++;
1687        spin_unlock(&lu_gp->lu_gp_lock);
1688}
1689
1690/*
1691 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1692 */
1693void __core_alua_drop_lu_gp_mem(
1694        struct t10_alua_lu_gp_member *lu_gp_mem,
1695        struct t10_alua_lu_gp *lu_gp)
1696{
1697        spin_lock(&lu_gp->lu_gp_lock);
1698        list_del(&lu_gp_mem->lu_gp_mem_list);
1699        lu_gp_mem->lu_gp = NULL;
1700        lu_gp_mem->lu_gp_assoc = 0;
1701        lu_gp->lu_gp_members--;
1702        spin_unlock(&lu_gp->lu_gp_lock);
1703}
1704
1705struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1706                const char *name, int def_group)
1707{
1708        struct t10_alua_tg_pt_gp *tg_pt_gp;
1709
1710        tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1711        if (!tg_pt_gp) {
1712                pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1713                return NULL;
1714        }
1715        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1716        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1717        mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1718        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1719        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1720        INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1721                          core_alua_do_transition_tg_pt_work);
1722        tg_pt_gp->tg_pt_gp_dev = dev;
1723        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1724                ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1725        /*
1726         * Enable both explicit and implicit ALUA support by default
1727         */
1728        tg_pt_gp->tg_pt_gp_alua_access_type =
1729                        TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1730        /*
1731         * Set the default Active/NonOptimized Delay in milliseconds
1732         */
1733        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1734        tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1735        tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1736
1737        /*
1738         * Enable all supported states
1739         */
1740        tg_pt_gp->tg_pt_gp_alua_supported_states =
1741            ALUA_T_SUP | ALUA_O_SUP |
1742            ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1743
1744        if (def_group) {
1745                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1746                tg_pt_gp->tg_pt_gp_id =
1747                                dev->t10_alua.alua_tg_pt_gps_counter++;
1748                tg_pt_gp->tg_pt_gp_valid_id = 1;
1749                dev->t10_alua.alua_tg_pt_gps_count++;
1750                list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1751                              &dev->t10_alua.tg_pt_gps_list);
1752                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1753        }
1754
1755        return tg_pt_gp;
1756}
1757
1758int core_alua_set_tg_pt_gp_id(
1759        struct t10_alua_tg_pt_gp *tg_pt_gp,
1760        u16 tg_pt_gp_id)
1761{
1762        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1763        struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1764        u16 tg_pt_gp_id_tmp;
1765
1766        /*
1767         * The tg_pt_gp->tg_pt_gp_id may only be set once..
1768         */
1769        if (tg_pt_gp->tg_pt_gp_valid_id) {
1770                pr_warn("ALUA TG PT Group already has a valid ID,"
1771                        " ignoring request\n");
1772                return -EINVAL;
1773        }
1774
1775        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1776        if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1777                pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1778                        " 0x0000ffff reached\n");
1779                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1780                kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1781                return -ENOSPC;
1782        }
1783again:
1784        tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1785                        dev->t10_alua.alua_tg_pt_gps_counter++;
1786
1787        list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1788                        tg_pt_gp_list) {
1789                if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1790                        if (!tg_pt_gp_id)
1791                                goto again;
1792
1793                        pr_err("ALUA Target Port Group ID: %hu already"
1794                                " exists, ignoring request\n", tg_pt_gp_id);
1795                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1796                        return -EINVAL;
1797                }
1798        }
1799
1800        tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1801        tg_pt_gp->tg_pt_gp_valid_id = 1;
1802        list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1803                        &dev->t10_alua.tg_pt_gps_list);
1804        dev->t10_alua.alua_tg_pt_gps_count++;
1805        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1806
1807        return 0;
1808}
1809
1810struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1811        struct se_port *port)
1812{
1813        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1814
1815        tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1816                                GFP_KERNEL);
1817        if (!tg_pt_gp_mem) {
1818                pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1819                return ERR_PTR(-ENOMEM);
1820        }
1821        INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1822        spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1823        atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1824
1825        tg_pt_gp_mem->tg_pt = port;
1826        port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1827
1828        return tg_pt_gp_mem;
1829}
1830
1831void core_alua_free_tg_pt_gp(
1832        struct t10_alua_tg_pt_gp *tg_pt_gp)
1833{
1834        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1835        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1836
1837        /*
1838         * Once we have reached this point, config_item_put() has already
1839         * been called from target_core_alua_drop_tg_pt_gp().
1840         *
1841         * Here we remove *tg_pt_gp from the global list so that
1842         * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1843         * can be made while we are releasing struct t10_alua_tg_pt_gp.
1844         */
1845        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1846        list_del(&tg_pt_gp->tg_pt_gp_list);
1847        dev->t10_alua.alua_tg_pt_gps_counter--;
1848        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1849
1850        flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1851
1852        /*
1853         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1854         * core_alua_get_tg_pt_gp_by_name() in
1855         * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1856         * to be released with core_alua_put_tg_pt_gp_from_name().
1857         */
1858        while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1859                cpu_relax();
1860
1861        /*
1862         * Release reference to struct t10_alua_tg_pt_gp from all associated
1863         * struct se_port.
1864         */
1865        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1866        list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1867                        &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1868                if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1869                        list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1870                        tg_pt_gp->tg_pt_gp_members--;
1871                        tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1872                }
1873                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1874                /*
1875                 * tg_pt_gp_mem is associated with a single
1876                 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1877                 * core_alua_free_tg_pt_gp_mem().
1878                 *
1879                 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1880                 * assume we want to re-associate a given tg_pt_gp_mem with
1881                 * default_tg_pt_gp.
1882                 */
1883                spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1884                if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1885                        __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1886                                        dev->t10_alua.default_tg_pt_gp);
1887                } else
1888                        tg_pt_gp_mem->tg_pt_gp = NULL;
1889                spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1890
1891                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1892        }
1893        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1894
1895        kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1896}
1897
1898void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1899{
1900        struct t10_alua_tg_pt_gp *tg_pt_gp;
1901        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1902
1903        tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1904        if (!tg_pt_gp_mem)
1905                return;
1906
1907        while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1908                cpu_relax();
1909
1910        spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1911        tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1912        if (tg_pt_gp) {
1913                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1914                if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1915                        list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1916                        tg_pt_gp->tg_pt_gp_members--;
1917                        tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1918                }
1919                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1920                tg_pt_gp_mem->tg_pt_gp = NULL;
1921        }
1922        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1923
1924        kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1925}
1926
1927static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1928                struct se_device *dev, const char *name)
1929{
1930        struct t10_alua_tg_pt_gp *tg_pt_gp;
1931        struct config_item *ci;
1932
1933        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1934        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1935                        tg_pt_gp_list) {
1936                if (!tg_pt_gp->tg_pt_gp_valid_id)
1937                        continue;
1938                ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1939                if (!strcmp(config_item_name(ci), name)) {
1940                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1941                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1942                        return tg_pt_gp;
1943                }
1944        }
1945        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1946
1947        return NULL;
1948}
1949
1950static void core_alua_put_tg_pt_gp_from_name(
1951        struct t10_alua_tg_pt_gp *tg_pt_gp)
1952{
1953        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1954
1955        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1956        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1957        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1958}
1959
1960/*
1961 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1962 */
1963void __core_alua_attach_tg_pt_gp_mem(
1964        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1965        struct t10_alua_tg_pt_gp *tg_pt_gp)
1966{
1967        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1968        tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1969        tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1970        list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1971                        &tg_pt_gp->tg_pt_gp_mem_list);
1972        tg_pt_gp->tg_pt_gp_members++;
1973        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1974}
1975
1976/*
1977 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1978 */
1979static void __core_alua_drop_tg_pt_gp_mem(
1980        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1981        struct t10_alua_tg_pt_gp *tg_pt_gp)
1982{
1983        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1984        list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1985        tg_pt_gp_mem->tg_pt_gp = NULL;
1986        tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1987        tg_pt_gp->tg_pt_gp_members--;
1988        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1989}
1990
1991ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1992{
1993        struct config_item *tg_pt_ci;
1994        struct t10_alua_tg_pt_gp *tg_pt_gp;
1995        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1996        ssize_t len = 0;
1997
1998        tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1999        if (!tg_pt_gp_mem)
2000                return len;
2001
2002        spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2003        tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2004        if (tg_pt_gp) {
2005                tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
2006                len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
2007                        " %hu\nTG Port Primary Access State: %s\nTG Port "
2008                        "Primary Access Status: %s\nTG Port Secondary Access"
2009                        " State: %s\nTG Port Secondary Access Status: %s\n",
2010                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
2011                        core_alua_dump_state(atomic_read(
2012                                        &tg_pt_gp->tg_pt_gp_alua_access_state)),
2013                        core_alua_dump_status(
2014                                tg_pt_gp->tg_pt_gp_alua_access_status),
2015                        (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
2016                        "Offline" : "None",
2017                        core_alua_dump_status(port->sep_tg_pt_secondary_stat));
2018        }
2019        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2020
2021        return len;
2022}
2023
2024ssize_t core_alua_store_tg_pt_gp_info(
2025        struct se_port *port,
2026        const char *page,
2027        size_t count)
2028{
2029        struct se_portal_group *tpg;
2030        struct se_lun *lun;
2031        struct se_device *dev = port->sep_lun->lun_se_dev;
2032        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
2033        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2034        unsigned char buf[TG_PT_GROUP_NAME_BUF];
2035        int move = 0;
2036
2037        tpg = port->sep_tpg;
2038        lun = port->sep_lun;
2039
2040        tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
2041        if (!tg_pt_gp_mem)
2042                return 0;
2043
2044        if (count > TG_PT_GROUP_NAME_BUF) {
2045                pr_err("ALUA Target Port Group alias too large!\n");
2046                return -EINVAL;
2047        }
2048        memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2049        memcpy(buf, page, count);
2050        /*
2051         * Any ALUA target port group alias besides "NULL" means we will be
2052         * making a new group association.
2053         */
2054        if (strcmp(strstrip(buf), "NULL")) {
2055                /*
2056                 * core_alua_get_tg_pt_gp_by_name() will increment reference to
2057                 * struct t10_alua_tg_pt_gp.  This reference is released with
2058                 * core_alua_put_tg_pt_gp_from_name() below.
2059                 */
2060                tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
2061                                        strstrip(buf));
2062                if (!tg_pt_gp_new)
2063                        return -ENODEV;
2064        }
2065
2066        spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2067        tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2068        if (tg_pt_gp) {
2069                /*
2070                 * Clearing an existing tg_pt_gp association, and replacing
2071                 * with the default_tg_pt_gp.
2072                 */
2073                if (!tg_pt_gp_new) {
2074                        pr_debug("Target_Core_ConfigFS: Moving"
2075                                " %s/tpgt_%hu/%s from ALUA Target Port Group:"
2076                                " alua/%s, ID: %hu back to"
2077                                " default_tg_pt_gp\n",
2078                                tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2079                                tpg->se_tpg_tfo->tpg_get_tag(tpg),
2080                                config_item_name(&lun->lun_group.cg_item),
2081                                config_item_name(
2082                                        &tg_pt_gp->tg_pt_gp_group.cg_item),
2083                                tg_pt_gp->tg_pt_gp_id);
2084
2085                        __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2086                        __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
2087                                        dev->t10_alua.default_tg_pt_gp);
2088                        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2089
2090                        return count;
2091                }
2092                /*
2093                 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
2094                 */
2095                __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2096                move = 1;
2097        }
2098        /*
2099         * Associate tg_pt_gp_mem with tg_pt_gp_new.
2100         */
2101        __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
2102        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2103        pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2104                " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2105                "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2106                tpg->se_tpg_tfo->tpg_get_tag(tpg),
2107                config_item_name(&lun->lun_group.cg_item),
2108                config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2109                tg_pt_gp_new->tg_pt_gp_id);
2110
2111        core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2112        return count;
2113}
2114
2115ssize_t core_alua_show_access_type(
2116        struct t10_alua_tg_pt_gp *tg_pt_gp,
2117        char *page)
2118{
2119        if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2120            (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2121                return sprintf(page, "Implicit and Explicit\n");
2122        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2123                return sprintf(page, "Implicit\n");
2124        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2125                return sprintf(page, "Explicit\n");
2126        else
2127                return sprintf(page, "None\n");
2128}
2129
2130ssize_t core_alua_store_access_type(
2131        struct t10_alua_tg_pt_gp *tg_pt_gp,
2132        const char *page,
2133        size_t count)
2134{
2135        unsigned long tmp;
2136        int ret;
2137
2138        ret = kstrtoul(page, 0, &tmp);
2139        if (ret < 0) {
2140                pr_err("Unable to extract alua_access_type\n");
2141                return ret;
2142        }
2143        if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2144                pr_err("Illegal value for alua_access_type:"
2145                                " %lu\n", tmp);
2146                return -EINVAL;
2147        }
2148        if (tmp == 3)
2149                tg_pt_gp->tg_pt_gp_alua_access_type =
2150                        TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2151        else if (tmp == 2)
2152                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2153        else if (tmp == 1)
2154                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2155        else
2156                tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2157
2158        return count;
2159}
2160
2161ssize_t core_alua_show_nonop_delay_msecs(
2162        struct t10_alua_tg_pt_gp *tg_pt_gp,
2163        char *page)
2164{
2165        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2166}
2167
2168ssize_t core_alua_store_nonop_delay_msecs(
2169        struct t10_alua_tg_pt_gp *tg_pt_gp,
2170        const char *page,
2171        size_t count)
2172{
2173        unsigned long tmp;
2174        int ret;
2175
2176        ret = kstrtoul(page, 0, &tmp);
2177        if (ret < 0) {
2178                pr_err("Unable to extract nonop_delay_msecs\n");
2179                return ret;
2180        }
2181        if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2182                pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2183                        " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2184                        ALUA_MAX_NONOP_DELAY_MSECS);
2185                return -EINVAL;
2186        }
2187        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2188
2189        return count;
2190}
2191
2192ssize_t core_alua_show_trans_delay_msecs(
2193        struct t10_alua_tg_pt_gp *tg_pt_gp,
2194        char *page)
2195{
2196        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2197}
2198
2199ssize_t core_alua_store_trans_delay_msecs(
2200        struct t10_alua_tg_pt_gp *tg_pt_gp,
2201        const char *page,
2202        size_t count)
2203{
2204        unsigned long tmp;
2205        int ret;
2206
2207        ret = kstrtoul(page, 0, &tmp);
2208        if (ret < 0) {
2209                pr_err("Unable to extract trans_delay_msecs\n");
2210                return ret;
2211        }
2212        if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2213                pr_err("Passed trans_delay_msecs: %lu, exceeds"
2214                        " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2215                        ALUA_MAX_TRANS_DELAY_MSECS);
2216                return -EINVAL;
2217        }
2218        tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2219
2220        return count;
2221}
2222
2223ssize_t core_alua_show_implicit_trans_secs(
2224        struct t10_alua_tg_pt_gp *tg_pt_gp,
2225        char *page)
2226{
2227        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2228}
2229
2230ssize_t core_alua_store_implicit_trans_secs(
2231        struct t10_alua_tg_pt_gp *tg_pt_gp,
2232        const char *page,
2233        size_t count)
2234{
2235        unsigned long tmp;
2236        int ret;
2237
2238        ret = kstrtoul(page, 0, &tmp);
2239        if (ret < 0) {
2240                pr_err("Unable to extract implicit_trans_secs\n");
2241                return ret;
2242        }
2243        if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2244                pr_err("Passed implicit_trans_secs: %lu, exceeds"
2245                        " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2246                        ALUA_MAX_IMPLICIT_TRANS_SECS);
2247                return  -EINVAL;
2248        }
2249        tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2250
2251        return count;
2252}
2253
2254ssize_t core_alua_show_preferred_bit(
2255        struct t10_alua_tg_pt_gp *tg_pt_gp,
2256        char *page)
2257{
2258        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2259}
2260
2261ssize_t core_alua_store_preferred_bit(
2262        struct t10_alua_tg_pt_gp *tg_pt_gp,
2263        const char *page,
2264        size_t count)
2265{
2266        unsigned long tmp;
2267        int ret;
2268
2269        ret = kstrtoul(page, 0, &tmp);
2270        if (ret < 0) {
2271                pr_err("Unable to extract preferred ALUA value\n");
2272                return ret;
2273        }
2274        if ((tmp != 0) && (tmp != 1)) {
2275                pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2276                return -EINVAL;
2277        }
2278        tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2279
2280        return count;
2281}
2282
2283ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2284{
2285        if (!lun->lun_sep)
2286                return -ENODEV;
2287
2288        return sprintf(page, "%d\n",
2289                atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
2290}
2291
2292ssize_t core_alua_store_offline_bit(
2293        struct se_lun *lun,
2294        const char *page,
2295        size_t count)
2296{
2297        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2298        unsigned long tmp;
2299        int ret;
2300
2301        if (!lun->lun_sep)
2302                return -ENODEV;
2303
2304        ret = kstrtoul(page, 0, &tmp);
2305        if (ret < 0) {
2306                pr_err("Unable to extract alua_tg_pt_offline value\n");
2307                return ret;
2308        }
2309        if ((tmp != 0) && (tmp != 1)) {
2310                pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2311                                tmp);
2312                return -EINVAL;
2313        }
2314        tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
2315        if (!tg_pt_gp_mem) {
2316                pr_err("Unable to locate *tg_pt_gp_mem\n");
2317                return -EINVAL;
2318        }
2319
2320        ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
2321                        lun->lun_sep, 0, (int)tmp);
2322        if (ret < 0)
2323                return -EINVAL;
2324
2325        return count;
2326}
2327
2328ssize_t core_alua_show_secondary_status(
2329        struct se_lun *lun,
2330        char *page)
2331{
2332        return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
2333}
2334
2335ssize_t core_alua_store_secondary_status(
2336        struct se_lun *lun,
2337        const char *page,
2338        size_t count)
2339{
2340        unsigned long tmp;
2341        int ret;
2342
2343        ret = kstrtoul(page, 0, &tmp);
2344        if (ret < 0) {
2345                pr_err("Unable to extract alua_tg_pt_status\n");
2346                return ret;
2347        }
2348        if ((tmp != ALUA_STATUS_NONE) &&
2349            (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2350            (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2351                pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2352                                tmp);
2353                return -EINVAL;
2354        }
2355        lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
2356
2357        return count;
2358}
2359
2360ssize_t core_alua_show_secondary_write_metadata(
2361        struct se_lun *lun,
2362        char *page)
2363{
2364        return sprintf(page, "%d\n",
2365                        lun->lun_sep->sep_tg_pt_secondary_write_md);
2366}
2367
2368ssize_t core_alua_store_secondary_write_metadata(
2369        struct se_lun *lun,
2370        const char *page,
2371        size_t count)
2372{
2373        unsigned long tmp;
2374        int ret;
2375
2376        ret = kstrtoul(page, 0, &tmp);
2377        if (ret < 0) {
2378                pr_err("Unable to extract alua_tg_pt_write_md\n");
2379                return ret;
2380        }
2381        if ((tmp != 0) && (tmp != 1)) {
2382                pr_err("Illegal value for alua_tg_pt_write_md:"
2383                                " %lu\n", tmp);
2384                return -EINVAL;
2385        }
2386        lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
2387
2388        return count;
2389}
2390
2391int core_setup_alua(struct se_device *dev)
2392{
2393        if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
2394            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2395                struct t10_alua_lu_gp_member *lu_gp_mem;
2396
2397                /*
2398                 * Associate this struct se_device with the default ALUA
2399                 * LUN Group.
2400                 */
2401                lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2402                if (IS_ERR(lu_gp_mem))
2403                        return PTR_ERR(lu_gp_mem);
2404
2405                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2406                __core_alua_attach_lu_gp_mem(lu_gp_mem,
2407                                default_lu_gp);
2408                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2409
2410                pr_debug("%s: Adding to default ALUA LU Group:"
2411                        " core/alua/lu_gps/default_lu_gp\n",
2412                        dev->transport->name);
2413        }
2414
2415        return 0;
2416}
2417