linux/drivers/target/target_core_alua.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_alua.c
   3 *
   4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   5 *
   6 * (c) Copyright 2009-2013 Datera, Inc.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/configfs.h>
  29#include <linux/delay.h>
  30#include <linux/export.h>
  31#include <linux/fcntl.h>
  32#include <linux/file.h>
  33#include <linux/fs.h>
  34#include <scsi/scsi_proto.h>
  35#include <asm/unaligned.h>
  36
  37#include <target/target_core_base.h>
  38#include <target/target_core_backend.h>
  39#include <target/target_core_fabric.h>
  40
  41#include "target_core_internal.h"
  42#include "target_core_alua.h"
  43#include "target_core_ua.h"
  44
  45static sense_reason_t core_alua_check_transition(int state, int valid,
  46                                                 int *primary);
  47static int core_alua_set_tg_pt_secondary_state(
  48                struct se_lun *lun, int explicit, int offline);
  49
  50static char *core_alua_dump_state(int state);
  51
  52static void __target_attach_tg_pt_gp(struct se_lun *lun,
  53                struct t10_alua_tg_pt_gp *tg_pt_gp);
  54
  55static u16 alua_lu_gps_counter;
  56static u32 alua_lu_gps_count;
  57
  58static DEFINE_SPINLOCK(lu_gps_lock);
  59static LIST_HEAD(lu_gps_list);
  60
  61struct t10_alua_lu_gp *default_lu_gp;
  62
  63/*
  64 * REPORT REFERRALS
  65 *
  66 * See sbc3r35 section 5.23
  67 */
  68sense_reason_t
  69target_emulate_report_referrals(struct se_cmd *cmd)
  70{
  71        struct se_device *dev = cmd->se_dev;
  72        struct t10_alua_lba_map *map;
  73        struct t10_alua_lba_map_member *map_mem;
  74        unsigned char *buf;
  75        u32 rd_len = 0, off;
  76
  77        if (cmd->data_length < 4) {
  78                pr_warn("REPORT REFERRALS allocation length %u too"
  79                        " small\n", cmd->data_length);
  80                return TCM_INVALID_CDB_FIELD;
  81        }
  82
  83        buf = transport_kmap_data_sg(cmd);
  84        if (!buf)
  85                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  86
  87        off = 4;
  88        spin_lock(&dev->t10_alua.lba_map_lock);
  89        if (list_empty(&dev->t10_alua.lba_map_list)) {
  90                spin_unlock(&dev->t10_alua.lba_map_lock);
  91                transport_kunmap_data_sg(cmd);
  92
  93                return TCM_UNSUPPORTED_SCSI_OPCODE;
  94        }
  95
  96        list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  97                            lba_map_list) {
  98                int desc_num = off + 3;
  99                int pg_num;
 100
 101                off += 4;
 102                if (cmd->data_length > off)
 103                        put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
 104                off += 8;
 105                if (cmd->data_length > off)
 106                        put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
 107                off += 8;
 108                rd_len += 20;
 109                pg_num = 0;
 110                list_for_each_entry(map_mem, &map->lba_map_mem_list,
 111                                    lba_map_mem_list) {
 112                        int alua_state = map_mem->lba_map_mem_alua_state;
 113                        int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 114
 115                        if (cmd->data_length > off)
 116                                buf[off] = alua_state & 0x0f;
 117                        off += 2;
 118                        if (cmd->data_length > off)
 119                                buf[off] = (alua_pg_id >> 8) & 0xff;
 120                        off++;
 121                        if (cmd->data_length > off)
 122                                buf[off] = (alua_pg_id & 0xff);
 123                        off++;
 124                        rd_len += 4;
 125                        pg_num++;
 126                }
 127                if (cmd->data_length > desc_num)
 128                        buf[desc_num] = pg_num;
 129        }
 130        spin_unlock(&dev->t10_alua.lba_map_lock);
 131
 132        /*
 133         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 134         */
 135        put_unaligned_be16(rd_len, &buf[2]);
 136
 137        transport_kunmap_data_sg(cmd);
 138
 139        target_complete_cmd(cmd, GOOD);
 140        return 0;
 141}
 142
 143/*
 144 * REPORT_TARGET_PORT_GROUPS
 145 *
 146 * See spc4r17 section 6.27
 147 */
 148sense_reason_t
 149target_emulate_report_target_port_groups(struct se_cmd *cmd)
 150{
 151        struct se_device *dev = cmd->se_dev;
 152        struct t10_alua_tg_pt_gp *tg_pt_gp;
 153        struct se_lun *lun;
 154        unsigned char *buf;
 155        u32 rd_len = 0, off;
 156        int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 157
 158        /*
 159         * Skip over RESERVED area to first Target port group descriptor
 160         * depending on the PARAMETER DATA FORMAT type..
 161         */
 162        if (ext_hdr != 0)
 163                off = 8;
 164        else
 165                off = 4;
 166
 167        if (cmd->data_length < off) {
 168                pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 169                        " small for %s header\n", cmd->data_length,
 170                        (ext_hdr) ? "extended" : "normal");
 171                return TCM_INVALID_CDB_FIELD;
 172        }
 173        buf = transport_kmap_data_sg(cmd);
 174        if (!buf)
 175                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 176
 177        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 178        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 179                        tg_pt_gp_list) {
 180                /*
 181                 * Check if the Target port group and Target port descriptor list
 182                 * based on tg_pt_gp_members count will fit into the response payload.
 183                 * Otherwise, bump rd_len to let the initiator know we have exceeded
 184                 * the allocation length and the response is truncated.
 185                 */
 186                if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 187                     cmd->data_length) {
 188                        rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 189                        continue;
 190                }
 191                /*
 192                 * PREF: Preferred target port bit, determine if this
 193                 * bit should be set for port group.
 194                 */
 195                if (tg_pt_gp->tg_pt_gp_pref)
 196                        buf[off] = 0x80;
 197                /*
 198                 * Set the ASYMMETRIC ACCESS State
 199                 */
 200                buf[off++] |= (atomic_read(
 201                        &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
 202                /*
 203                 * Set supported ASYMMETRIC ACCESS State bits
 204                 */
 205                buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 206                /*
 207                 * TARGET PORT GROUP
 208                 */
 209                buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
 210                buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
 211
 212                off++; /* Skip over Reserved */
 213                /*
 214                 * STATUS CODE
 215                 */
 216                buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 217                /*
 218                 * Vendor Specific field
 219                 */
 220                buf[off++] = 0x00;
 221                /*
 222                 * TARGET PORT COUNT
 223                 */
 224                buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 225                rd_len += 8;
 226
 227                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 228                list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 229                                lun_tg_pt_gp_link) {
 230                        /*
 231                         * Start Target Port descriptor format
 232                         *
 233                         * See spc4r17 section 6.2.7 Table 247
 234                         */
 235                        off += 2; /* Skip over Obsolete */
 236                        /*
 237                         * Set RELATIVE TARGET PORT IDENTIFIER
 238                         */
 239                        buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
 240                        buf[off++] = (lun->lun_rtpi & 0xff);
 241                        rd_len += 4;
 242                }
 243                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 244        }
 245        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 246        /*
 247         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 248         */
 249        put_unaligned_be32(rd_len, &buf[0]);
 250
 251        /*
 252         * Fill in the Extended header parameter data format if requested
 253         */
 254        if (ext_hdr != 0) {
 255                buf[4] = 0x10;
 256                /*
 257                 * Set the implicit transition time (in seconds) for the application
 258                 * client to use as a base for it's transition timeout value.
 259                 *
 260                 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 261                 * this CDB was received upon to determine this value individually
 262                 * for ALUA target port group.
 263                 */
 264                spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
 265                tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
 266                if (tg_pt_gp)
 267                        buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 268                spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
 269        }
 270        transport_kunmap_data_sg(cmd);
 271
 272        target_complete_cmd(cmd, GOOD);
 273        return 0;
 274}
 275
 276/*
 277 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 278 *
 279 * See spc4r17 section 6.35
 280 */
 281sense_reason_t
 282target_emulate_set_target_port_groups(struct se_cmd *cmd)
 283{
 284        struct se_device *dev = cmd->se_dev;
 285        struct se_lun *l_lun = cmd->se_lun;
 286        struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 287        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 288        unsigned char *buf;
 289        unsigned char *ptr;
 290        sense_reason_t rc = TCM_NO_SENSE;
 291        u32 len = 4; /* Skip over RESERVED area in header */
 292        int alua_access_state, primary = 0, valid_states;
 293        u16 tg_pt_id, rtpi;
 294
 295        if (cmd->data_length < 4) {
 296                pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 297                        " small\n", cmd->data_length);
 298                return TCM_INVALID_PARAMETER_LIST;
 299        }
 300
 301        buf = transport_kmap_data_sg(cmd);
 302        if (!buf)
 303                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 304
 305        /*
 306         * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 307         * for the local tg_pt_gp.
 308         */
 309        spin_lock(&l_lun->lun_tg_pt_gp_lock);
 310        l_tg_pt_gp = l_lun->lun_tg_pt_gp;
 311        if (!l_tg_pt_gp) {
 312                spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 313                pr_err("Unable to access l_lun->tg_pt_gp\n");
 314                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 315                goto out;
 316        }
 317
 318        if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 319                spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 320                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 321                                " while TPGS_EXPLICIT_ALUA is disabled\n");
 322                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 323                goto out;
 324        }
 325        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 326        spin_unlock(&l_lun->lun_tg_pt_gp_lock);
 327
 328        ptr = &buf[4]; /* Skip over RESERVED area in header */
 329
 330        while (len < cmd->data_length) {
 331                bool found = false;
 332                alua_access_state = (ptr[0] & 0x0f);
 333                /*
 334                 * Check the received ALUA access state, and determine if
 335                 * the state is a primary or secondary target port asymmetric
 336                 * access state.
 337                 */
 338                rc = core_alua_check_transition(alua_access_state,
 339                                                valid_states, &primary);
 340                if (rc) {
 341                        /*
 342                         * If the SET TARGET PORT GROUPS attempts to establish
 343                         * an invalid combination of target port asymmetric
 344                         * access states or attempts to establish an
 345                         * unsupported target port asymmetric access state,
 346                         * then the command shall be terminated with CHECK
 347                         * CONDITION status, with the sense key set to ILLEGAL
 348                         * REQUEST, and the additional sense code set to INVALID
 349                         * FIELD IN PARAMETER LIST.
 350                         */
 351                        goto out;
 352                }
 353
 354                /*
 355                 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 356                 * specifies a primary target port asymmetric access state,
 357                 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 358                 * a primary target port group for which the primary target
 359                 * port asymmetric access state shall be changed. If the
 360                 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 361                 * port asymmetric access state, then the TARGET PORT GROUP OR
 362                 * TARGET PORT field specifies the relative target port
 363                 * identifier (see 3.1.120) of the target port for which the
 364                 * secondary target port asymmetric access state shall be
 365                 * changed.
 366                 */
 367                if (primary) {
 368                        tg_pt_id = get_unaligned_be16(ptr + 2);
 369                        /*
 370                         * Locate the matching target port group ID from
 371                         * the global tg_pt_gp list
 372                         */
 373                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 374                        list_for_each_entry(tg_pt_gp,
 375                                        &dev->t10_alua.tg_pt_gps_list,
 376                                        tg_pt_gp_list) {
 377                                if (!tg_pt_gp->tg_pt_gp_valid_id)
 378                                        continue;
 379
 380                                if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 381                                        continue;
 382
 383                                atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 384
 385                                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 386
 387                                if (!core_alua_do_port_transition(tg_pt_gp,
 388                                                dev, l_lun, nacl,
 389                                                alua_access_state, 1))
 390                                        found = true;
 391
 392                                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 393                                atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 394                                break;
 395                        }
 396                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 397                } else {
 398                        struct se_lun *lun;
 399
 400                        /*
 401                         * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 402                         * the Target Port in question for the the incoming
 403                         * SET_TARGET_PORT_GROUPS op.
 404                         */
 405                        rtpi = get_unaligned_be16(ptr + 2);
 406                        /*
 407                         * Locate the matching relative target port identifier
 408                         * for the struct se_device storage object.
 409                         */
 410                        spin_lock(&dev->se_port_lock);
 411                        list_for_each_entry(lun, &dev->dev_sep_list,
 412                                                        lun_dev_link) {
 413                                if (lun->lun_rtpi != rtpi)
 414                                        continue;
 415
 416                                // XXX: racy unlock
 417                                spin_unlock(&dev->se_port_lock);
 418
 419                                if (!core_alua_set_tg_pt_secondary_state(
 420                                                lun, 1, 1))
 421                                        found = true;
 422
 423                                spin_lock(&dev->se_port_lock);
 424                                break;
 425                        }
 426                        spin_unlock(&dev->se_port_lock);
 427                }
 428
 429                if (!found) {
 430                        rc = TCM_INVALID_PARAMETER_LIST;
 431                        goto out;
 432                }
 433
 434                ptr += 4;
 435                len += 4;
 436        }
 437
 438out:
 439        transport_kunmap_data_sg(cmd);
 440        if (!rc)
 441                target_complete_cmd(cmd, GOOD);
 442        return rc;
 443}
 444
 445static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
 446{
 447        /*
 448         * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
 449         * The ALUA additional sense code qualifier (ASCQ) is determined
 450         * by the ALUA primary or secondary access state..
 451         */
 452        pr_debug("[%s]: ALUA TG Port not available, "
 453                "SenseKey: NOT_READY, ASC/ASCQ: "
 454                "0x04/0x%02x\n",
 455                cmd->se_tfo->get_fabric_name(), alua_ascq);
 456
 457        cmd->scsi_asc = 0x04;
 458        cmd->scsi_ascq = alua_ascq;
 459}
 460
 461static inline void core_alua_state_nonoptimized(
 462        struct se_cmd *cmd,
 463        unsigned char *cdb,
 464        int nonop_delay_msecs)
 465{
 466        /*
 467         * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 468         * later to determine if processing of this cmd needs to be
 469         * temporarily delayed for the Active/NonOptimized primary access state.
 470         */
 471        cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 472        cmd->alua_nonop_delay = nonop_delay_msecs;
 473}
 474
 475static inline int core_alua_state_lba_dependent(
 476        struct se_cmd *cmd,
 477        struct t10_alua_tg_pt_gp *tg_pt_gp)
 478{
 479        struct se_device *dev = cmd->se_dev;
 480        u64 segment_size, segment_mult, sectors, lba;
 481
 482        /* Only need to check for cdb actually containing LBAs */
 483        if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 484                return 0;
 485
 486        spin_lock(&dev->t10_alua.lba_map_lock);
 487        segment_size = dev->t10_alua.lba_map_segment_size;
 488        segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 489        sectors = cmd->data_length / dev->dev_attrib.block_size;
 490
 491        lba = cmd->t_task_lba;
 492        while (lba < cmd->t_task_lba + sectors) {
 493                struct t10_alua_lba_map *cur_map = NULL, *map;
 494                struct t10_alua_lba_map_member *map_mem;
 495
 496                list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 497                                    lba_map_list) {
 498                        u64 start_lba, last_lba;
 499                        u64 first_lba = map->lba_map_first_lba;
 500
 501                        if (segment_mult) {
 502                                u64 tmp = lba;
 503                                start_lba = do_div(tmp, segment_size * segment_mult);
 504
 505                                last_lba = first_lba + segment_size - 1;
 506                                if (start_lba >= first_lba &&
 507                                    start_lba <= last_lba) {
 508                                        lba += segment_size;
 509                                        cur_map = map;
 510                                        break;
 511                                }
 512                        } else {
 513                                last_lba = map->lba_map_last_lba;
 514                                if (lba >= first_lba && lba <= last_lba) {
 515                                        lba = last_lba + 1;
 516                                        cur_map = map;
 517                                        break;
 518                                }
 519                        }
 520                }
 521                if (!cur_map) {
 522                        spin_unlock(&dev->t10_alua.lba_map_lock);
 523                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 524                        return 1;
 525                }
 526                list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 527                                    lba_map_mem_list) {
 528                        if (map_mem->lba_map_mem_alua_pg_id !=
 529                            tg_pt_gp->tg_pt_gp_id)
 530                                continue;
 531                        switch(map_mem->lba_map_mem_alua_state) {
 532                        case ALUA_ACCESS_STATE_STANDBY:
 533                                spin_unlock(&dev->t10_alua.lba_map_lock);
 534                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 535                                return 1;
 536                        case ALUA_ACCESS_STATE_UNAVAILABLE:
 537                                spin_unlock(&dev->t10_alua.lba_map_lock);
 538                                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 539                                return 1;
 540                        default:
 541                                break;
 542                        }
 543                }
 544        }
 545        spin_unlock(&dev->t10_alua.lba_map_lock);
 546        return 0;
 547}
 548
 549static inline int core_alua_state_standby(
 550        struct se_cmd *cmd,
 551        unsigned char *cdb)
 552{
 553        /*
 554         * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 555         * spc4r17 section 5.9.2.4.4
 556         */
 557        switch (cdb[0]) {
 558        case INQUIRY:
 559        case LOG_SELECT:
 560        case LOG_SENSE:
 561        case MODE_SELECT:
 562        case MODE_SENSE:
 563        case REPORT_LUNS:
 564        case RECEIVE_DIAGNOSTIC:
 565        case SEND_DIAGNOSTIC:
 566        case READ_CAPACITY:
 567                return 0;
 568        case SERVICE_ACTION_IN_16:
 569                switch (cdb[1] & 0x1f) {
 570                case SAI_READ_CAPACITY_16:
 571                        return 0;
 572                default:
 573                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 574                        return 1;
 575                }
 576        case MAINTENANCE_IN:
 577                switch (cdb[1] & 0x1f) {
 578                case MI_REPORT_TARGET_PGS:
 579                        return 0;
 580                default:
 581                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 582                        return 1;
 583                }
 584        case MAINTENANCE_OUT:
 585                switch (cdb[1]) {
 586                case MO_SET_TARGET_PGS:
 587                        return 0;
 588                default:
 589                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 590                        return 1;
 591                }
 592        case REQUEST_SENSE:
 593        case PERSISTENT_RESERVE_IN:
 594        case PERSISTENT_RESERVE_OUT:
 595        case READ_BUFFER:
 596        case WRITE_BUFFER:
 597                return 0;
 598        default:
 599                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
 600                return 1;
 601        }
 602
 603        return 0;
 604}
 605
 606static inline int core_alua_state_unavailable(
 607        struct se_cmd *cmd,
 608        unsigned char *cdb)
 609{
 610        /*
 611         * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 612         * spc4r17 section 5.9.2.4.5
 613         */
 614        switch (cdb[0]) {
 615        case INQUIRY:
 616        case REPORT_LUNS:
 617                return 0;
 618        case MAINTENANCE_IN:
 619                switch (cdb[1] & 0x1f) {
 620                case MI_REPORT_TARGET_PGS:
 621                        return 0;
 622                default:
 623                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 624                        return 1;
 625                }
 626        case MAINTENANCE_OUT:
 627                switch (cdb[1]) {
 628                case MO_SET_TARGET_PGS:
 629                        return 0;
 630                default:
 631                        set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 632                        return 1;
 633                }
 634        case REQUEST_SENSE:
 635        case READ_BUFFER:
 636        case WRITE_BUFFER:
 637                return 0;
 638        default:
 639                set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
 640                return 1;
 641        }
 642
 643        return 0;
 644}
 645
 646static inline int core_alua_state_transition(
 647        struct se_cmd *cmd,
 648        unsigned char *cdb)
 649{
 650        /*
 651         * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 652         * spc4r17 section 5.9.2.5
 653         */
 654        switch (cdb[0]) {
 655        case INQUIRY:
 656        case REPORT_LUNS:
 657                return 0;
 658        case MAINTENANCE_IN:
 659                switch (cdb[1] & 0x1f) {
 660                case MI_REPORT_TARGET_PGS:
 661                        return 0;
 662                default:
 663                        set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 664                        return 1;
 665                }
 666        case REQUEST_SENSE:
 667        case READ_BUFFER:
 668        case WRITE_BUFFER:
 669                return 0;
 670        default:
 671                set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
 672                return 1;
 673        }
 674
 675        return 0;
 676}
 677
 678/*
 679 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 680 * return 0: Used to signal success
 681 * return -1: Used to signal failure, and invalid cdb field
 682 */
 683sense_reason_t
 684target_alua_state_check(struct se_cmd *cmd)
 685{
 686        struct se_device *dev = cmd->se_dev;
 687        unsigned char *cdb = cmd->t_task_cdb;
 688        struct se_lun *lun = cmd->se_lun;
 689        struct t10_alua_tg_pt_gp *tg_pt_gp;
 690        int out_alua_state, nonop_delay_msecs;
 691
 692        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 693                return 0;
 694        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
 695                return 0;
 696
 697        /*
 698         * First, check for a struct se_port specific secondary ALUA target port
 699         * access state: OFFLINE
 700         */
 701        if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
 702                pr_debug("ALUA: Got secondary offline status for local"
 703                                " target port\n");
 704                set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
 705                return TCM_CHECK_CONDITION_NOT_READY;
 706        }
 707
 708        if (!lun->lun_tg_pt_gp)
 709                return 0;
 710
 711        spin_lock(&lun->lun_tg_pt_gp_lock);
 712        tg_pt_gp = lun->lun_tg_pt_gp;
 713        out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
 714        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 715
 716        // XXX: keeps using tg_pt_gp witout reference after unlock
 717        spin_unlock(&lun->lun_tg_pt_gp_lock);
 718        /*
 719         * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 720         * statement so the compiler knows explicitly to check this case first.
 721         * For the Optimized ALUA access state case, we want to process the
 722         * incoming fabric cmd ASAP..
 723         */
 724        if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 725                return 0;
 726
 727        switch (out_alua_state) {
 728        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 729                core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 730                break;
 731        case ALUA_ACCESS_STATE_STANDBY:
 732                if (core_alua_state_standby(cmd, cdb))
 733                        return TCM_CHECK_CONDITION_NOT_READY;
 734                break;
 735        case ALUA_ACCESS_STATE_UNAVAILABLE:
 736                if (core_alua_state_unavailable(cmd, cdb))
 737                        return TCM_CHECK_CONDITION_NOT_READY;
 738                break;
 739        case ALUA_ACCESS_STATE_TRANSITION:
 740                if (core_alua_state_transition(cmd, cdb))
 741                        return TCM_CHECK_CONDITION_NOT_READY;
 742                break;
 743        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 744                if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
 745                        return TCM_CHECK_CONDITION_NOT_READY;
 746                break;
 747        /*
 748         * OFFLINE is a secondary ALUA target port group access state, that is
 749         * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
 750         */
 751        case ALUA_ACCESS_STATE_OFFLINE:
 752        default:
 753                pr_err("Unknown ALUA access state: 0x%02x\n",
 754                                out_alua_state);
 755                return TCM_INVALID_CDB_FIELD;
 756        }
 757
 758        return 0;
 759}
 760
 761/*
 762 * Check implicit and explicit ALUA state change request.
 763 */
 764static sense_reason_t
 765core_alua_check_transition(int state, int valid, int *primary)
 766{
 767        /*
 768         * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 769         * defined as primary target port asymmetric access states.
 770         */
 771        switch (state) {
 772        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 773                if (!(valid & ALUA_AO_SUP))
 774                        goto not_supported;
 775                *primary = 1;
 776                break;
 777        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 778                if (!(valid & ALUA_AN_SUP))
 779                        goto not_supported;
 780                *primary = 1;
 781                break;
 782        case ALUA_ACCESS_STATE_STANDBY:
 783                if (!(valid & ALUA_S_SUP))
 784                        goto not_supported;
 785                *primary = 1;
 786                break;
 787        case ALUA_ACCESS_STATE_UNAVAILABLE:
 788                if (!(valid & ALUA_U_SUP))
 789                        goto not_supported;
 790                *primary = 1;
 791                break;
 792        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 793                if (!(valid & ALUA_LBD_SUP))
 794                        goto not_supported;
 795                *primary = 1;
 796                break;
 797        case ALUA_ACCESS_STATE_OFFLINE:
 798                /*
 799                 * OFFLINE state is defined as a secondary target port
 800                 * asymmetric access state.
 801                 */
 802                if (!(valid & ALUA_O_SUP))
 803                        goto not_supported;
 804                *primary = 0;
 805                break;
 806        case ALUA_ACCESS_STATE_TRANSITION:
 807                /*
 808                 * Transitioning is set internally, and
 809                 * cannot be selected manually.
 810                 */
 811                goto not_supported;
 812        default:
 813                pr_err("Unknown ALUA access state: 0x%02x\n", state);
 814                return TCM_INVALID_PARAMETER_LIST;
 815        }
 816
 817        return 0;
 818
 819not_supported:
 820        pr_err("ALUA access state %s not supported",
 821               core_alua_dump_state(state));
 822        return TCM_INVALID_PARAMETER_LIST;
 823}
 824
 825static char *core_alua_dump_state(int state)
 826{
 827        switch (state) {
 828        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 829                return "Active/Optimized";
 830        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 831                return "Active/NonOptimized";
 832        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 833                return "LBA Dependent";
 834        case ALUA_ACCESS_STATE_STANDBY:
 835                return "Standby";
 836        case ALUA_ACCESS_STATE_UNAVAILABLE:
 837                return "Unavailable";
 838        case ALUA_ACCESS_STATE_OFFLINE:
 839                return "Offline";
 840        case ALUA_ACCESS_STATE_TRANSITION:
 841                return "Transitioning";
 842        default:
 843                return "Unknown";
 844        }
 845
 846        return NULL;
 847}
 848
 849char *core_alua_dump_status(int status)
 850{
 851        switch (status) {
 852        case ALUA_STATUS_NONE:
 853                return "None";
 854        case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 855                return "Altered by Explicit STPG";
 856        case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 857                return "Altered by Implicit ALUA";
 858        default:
 859                return "Unknown";
 860        }
 861
 862        return NULL;
 863}
 864
 865/*
 866 * Used by fabric modules to determine when we need to delay processing
 867 * for the Active/NonOptimized paths..
 868 */
 869int core_alua_check_nonop_delay(
 870        struct se_cmd *cmd)
 871{
 872        if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 873                return 0;
 874        if (in_interrupt())
 875                return 0;
 876        /*
 877         * The ALUA Active/NonOptimized access state delay can be disabled
 878         * in via configfs with a value of zero
 879         */
 880        if (!cmd->alua_nonop_delay)
 881                return 0;
 882        /*
 883         * struct se_cmd->alua_nonop_delay gets set by a target port group
 884         * defined interval in core_alua_state_nonoptimized()
 885         */
 886        msleep_interruptible(cmd->alua_nonop_delay);
 887        return 0;
 888}
 889EXPORT_SYMBOL(core_alua_check_nonop_delay);
 890
 891static int core_alua_write_tpg_metadata(
 892        const char *path,
 893        unsigned char *md_buf,
 894        u32 md_buf_len)
 895{
 896        struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 897        int ret;
 898
 899        if (IS_ERR(file)) {
 900                pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 901                return -ENODEV;
 902        }
 903        ret = kernel_write(file, md_buf, md_buf_len, 0);
 904        if (ret < 0)
 905                pr_err("Error writing ALUA metadata file: %s\n", path);
 906        fput(file);
 907        return (ret < 0) ? -EIO : 0;
 908}
 909
 910/*
 911 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
 912 */
 913static int core_alua_update_tpg_primary_metadata(
 914        struct t10_alua_tg_pt_gp *tg_pt_gp)
 915{
 916        unsigned char *md_buf;
 917        struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 918        char path[ALUA_METADATA_PATH_LEN];
 919        int len, rc;
 920
 921        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 922        if (!md_buf) {
 923                pr_err("Unable to allocate buf for ALUA metadata\n");
 924                return -ENOMEM;
 925        }
 926
 927        memset(path, 0, ALUA_METADATA_PATH_LEN);
 928
 929        len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 930                        "tg_pt_gp_id=%hu\n"
 931                        "alua_access_state=0x%02x\n"
 932                        "alua_access_status=0x%02x\n",
 933                        tg_pt_gp->tg_pt_gp_id,
 934                        tg_pt_gp->tg_pt_gp_alua_pending_state,
 935                        tg_pt_gp->tg_pt_gp_alua_access_status);
 936
 937        snprintf(path, ALUA_METADATA_PATH_LEN,
 938                "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
 939                config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 940
 941        rc = core_alua_write_tpg_metadata(path, md_buf, len);
 942        kfree(md_buf);
 943        return rc;
 944}
 945
 946static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 947{
 948        struct se_dev_entry *se_deve;
 949        struct se_lun *lun;
 950        struct se_lun_acl *lacl;
 951
 952        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 953        list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 954                                lun_tg_pt_gp_link) {
 955                /*
 956                 * After an implicit target port asymmetric access state
 957                 * change, a device server shall establish a unit attention
 958                 * condition for the initiator port associated with every I_T
 959                 * nexus with the additional sense code set to ASYMMETRIC
 960                 * ACCESS STATE CHANGED.
 961                 *
 962                 * After an explicit target port asymmetric access state
 963                 * change, a device server shall establish a unit attention
 964                 * condition with the additional sense code set to ASYMMETRIC
 965                 * ACCESS STATE CHANGED for the initiator port associated with
 966                 * every I_T nexus other than the I_T nexus on which the SET
 967                 * TARGET PORT GROUPS command
 968                 */
 969                if (!percpu_ref_tryget_live(&lun->lun_ref))
 970                        continue;
 971                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 972
 973                spin_lock(&lun->lun_deve_lock);
 974                list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
 975                        lacl = rcu_dereference_check(se_deve->se_lun_acl,
 976                                        lockdep_is_held(&lun->lun_deve_lock));
 977
 978                        /*
 979                         * spc4r37 p.242:
 980                         * After an explicit target port asymmetric access
 981                         * state change, a device server shall establish a
 982                         * unit attention condition with the additional sense
 983                         * code set to ASYMMETRIC ACCESS STATE CHANGED for
 984                         * the initiator port associated with every I_T nexus
 985                         * other than the I_T nexus on which the SET TARGET
 986                         * PORT GROUPS command was received.
 987                         */
 988                        if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
 989                             ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
 990                           (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
 991                            (tg_pt_gp->tg_pt_gp_alua_lun == lun))
 992                                continue;
 993
 994                        /*
 995                         * se_deve->se_lun_acl pointer may be NULL for a
 996                         * entry created without explicit Node+MappedLUN ACLs
 997                         */
 998                        if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
 999                            (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
1000                                continue;
1001
1002                        core_scsi3_ua_allocate(se_deve, 0x2A,
1003                                ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1004                }
1005                spin_unlock(&lun->lun_deve_lock);
1006
1007                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1008                percpu_ref_put(&lun->lun_ref);
1009        }
1010        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1011}
1012
1013static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1014{
1015        struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1016                struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
1017        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1018        bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1019                         ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
1020
1021        /*
1022         * Update the ALUA metadata buf that has been allocated in
1023         * core_alua_do_port_transition(), this metadata will be written
1024         * to struct file.
1025         *
1026         * Note that there is the case where we do not want to update the
1027         * metadata when the saved metadata is being parsed in userspace
1028         * when setting the existing port access state and access status.
1029         *
1030         * Also note that the failure to write out the ALUA metadata to
1031         * struct file does NOT affect the actual ALUA transition.
1032         */
1033        if (tg_pt_gp->tg_pt_gp_write_metadata) {
1034                mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1035                core_alua_update_tpg_primary_metadata(tg_pt_gp);
1036                mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1037        }
1038        /*
1039         * Set the current primary ALUA access state to the requested new state
1040         */
1041        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1042                   tg_pt_gp->tg_pt_gp_alua_pending_state);
1043
1044        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1045                " from primary access state %s to %s\n", (explicit) ? "explicit" :
1046                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1047                tg_pt_gp->tg_pt_gp_id,
1048                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1049                core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1050
1051        core_alua_queue_state_change_ua(tg_pt_gp);
1052
1053        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1054        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1055        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1056
1057        if (tg_pt_gp->tg_pt_gp_transition_complete)
1058                complete(tg_pt_gp->tg_pt_gp_transition_complete);
1059}
1060
1061static int core_alua_do_transition_tg_pt(
1062        struct t10_alua_tg_pt_gp *tg_pt_gp,
1063        int new_state,
1064        int explicit)
1065{
1066        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1067        DECLARE_COMPLETION_ONSTACK(wait);
1068
1069        /* Nothing to be done here */
1070        if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1071                return 0;
1072
1073        if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1074                return -EAGAIN;
1075
1076        /*
1077         * Flush any pending transitions
1078         */
1079        if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1080            atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1081            ALUA_ACCESS_STATE_TRANSITION) {
1082                /* Just in case */
1083                tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1084                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1085                flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1086                wait_for_completion(&wait);
1087                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1088                return 0;
1089        }
1090
1091        /*
1092         * Save the old primary ALUA access state, and set the current state
1093         * to ALUA_ACCESS_STATE_TRANSITION.
1094         */
1095        tg_pt_gp->tg_pt_gp_alua_previous_state =
1096                atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1097        tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1098
1099        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1100                        ALUA_ACCESS_STATE_TRANSITION);
1101        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1102                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1103                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1104
1105        core_alua_queue_state_change_ua(tg_pt_gp);
1106
1107        /*
1108         * Check for the optional ALUA primary state transition delay
1109         */
1110        if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1111                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1112
1113        /*
1114         * Take a reference for workqueue item
1115         */
1116        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1117        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1118        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1119
1120        if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1121                unsigned long transition_tmo;
1122
1123                transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1124                queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1125                                   &tg_pt_gp->tg_pt_gp_transition_work,
1126                                   transition_tmo);
1127        } else {
1128                tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1129                queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1130                                   &tg_pt_gp->tg_pt_gp_transition_work, 0);
1131                wait_for_completion(&wait);
1132                tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1133        }
1134
1135        return 0;
1136}
1137
1138int core_alua_do_port_transition(
1139        struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1140        struct se_device *l_dev,
1141        struct se_lun *l_lun,
1142        struct se_node_acl *l_nacl,
1143        int new_state,
1144        int explicit)
1145{
1146        struct se_device *dev;
1147        struct t10_alua_lu_gp *lu_gp;
1148        struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1149        struct t10_alua_tg_pt_gp *tg_pt_gp;
1150        int primary, valid_states, rc = 0;
1151
1152        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1153        if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1154                return -EINVAL;
1155
1156        local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1157        spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1158        lu_gp = local_lu_gp_mem->lu_gp;
1159        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1160        spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1161        /*
1162         * For storage objects that are members of the 'default_lu_gp',
1163         * we only do transition on the passed *l_tp_pt_gp, and not
1164         * on all of the matching target port groups IDs in default_lu_gp.
1165         */
1166        if (!lu_gp->lu_gp_id) {
1167                /*
1168                 * core_alua_do_transition_tg_pt() will always return
1169                 * success.
1170                 */
1171                l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1172                l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1173                rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1174                                                   new_state, explicit);
1175                atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1176                return rc;
1177        }
1178        /*
1179         * For all other LU groups aside from 'default_lu_gp', walk all of
1180         * the associated storage objects looking for a matching target port
1181         * group ID from the local target port group.
1182         */
1183        spin_lock(&lu_gp->lu_gp_lock);
1184        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1185                                lu_gp_mem_list) {
1186
1187                dev = lu_gp_mem->lu_gp_mem_dev;
1188                atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1189                spin_unlock(&lu_gp->lu_gp_lock);
1190
1191                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1192                list_for_each_entry(tg_pt_gp,
1193                                &dev->t10_alua.tg_pt_gps_list,
1194                                tg_pt_gp_list) {
1195
1196                        if (!tg_pt_gp->tg_pt_gp_valid_id)
1197                                continue;
1198                        /*
1199                         * If the target behavior port asymmetric access state
1200                         * is changed for any target port group accessible via
1201                         * a logical unit within a LU group, the target port
1202                         * behavior group asymmetric access states for the same
1203                         * target port group accessible via other logical units
1204                         * in that LU group will also change.
1205                         */
1206                        if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1207                                continue;
1208
1209                        if (l_tg_pt_gp == tg_pt_gp) {
1210                                tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1211                                tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1212                        } else {
1213                                tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1214                                tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1215                        }
1216                        atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1217                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1218                        /*
1219                         * core_alua_do_transition_tg_pt() will always return
1220                         * success.
1221                         */
1222                        rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1223                                        new_state, explicit);
1224
1225                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1226                        atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1227                        if (rc)
1228                                break;
1229                }
1230                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1231
1232                spin_lock(&lu_gp->lu_gp_lock);
1233                atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1234        }
1235        spin_unlock(&lu_gp->lu_gp_lock);
1236
1237        if (!rc) {
1238                pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1239                         " Group IDs: %hu %s transition to primary state: %s\n",
1240                         config_item_name(&lu_gp->lu_gp_group.cg_item),
1241                         l_tg_pt_gp->tg_pt_gp_id,
1242                         (explicit) ? "explicit" : "implicit",
1243                         core_alua_dump_state(new_state));
1244        }
1245
1246        atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1247        return rc;
1248}
1249
1250static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1251{
1252        struct se_portal_group *se_tpg = lun->lun_tpg;
1253        unsigned char *md_buf;
1254        char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1255        int len, rc;
1256
1257        mutex_lock(&lun->lun_tg_pt_md_mutex);
1258
1259        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1260        if (!md_buf) {
1261                pr_err("Unable to allocate buf for ALUA metadata\n");
1262                rc = -ENOMEM;
1263                goto out_unlock;
1264        }
1265
1266        memset(path, 0, ALUA_METADATA_PATH_LEN);
1267        memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1268
1269        len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1270                        se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1271
1272        if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1273                snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1274                                se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1275
1276        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1277                        "alua_tg_pt_status=0x%02x\n",
1278                        atomic_read(&lun->lun_tg_pt_secondary_offline),
1279                        lun->lun_tg_pt_secondary_stat);
1280
1281        snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
1282                        db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1283                        lun->unpacked_lun);
1284
1285        rc = core_alua_write_tpg_metadata(path, md_buf, len);
1286        kfree(md_buf);
1287
1288out_unlock:
1289        mutex_unlock(&lun->lun_tg_pt_md_mutex);
1290        return rc;
1291}
1292
1293static int core_alua_set_tg_pt_secondary_state(
1294        struct se_lun *lun,
1295        int explicit,
1296        int offline)
1297{
1298        struct t10_alua_tg_pt_gp *tg_pt_gp;
1299        int trans_delay_msecs;
1300
1301        spin_lock(&lun->lun_tg_pt_gp_lock);
1302        tg_pt_gp = lun->lun_tg_pt_gp;
1303        if (!tg_pt_gp) {
1304                spin_unlock(&lun->lun_tg_pt_gp_lock);
1305                pr_err("Unable to complete secondary state"
1306                                " transition\n");
1307                return -EINVAL;
1308        }
1309        trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1310        /*
1311         * Set the secondary ALUA target port access state to OFFLINE
1312         * or release the previously secondary state for struct se_lun
1313         */
1314        if (offline)
1315                atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1316        else
1317                atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1318
1319        lun->lun_tg_pt_secondary_stat = (explicit) ?
1320                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1321                        ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1322
1323        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1324                " to secondary access state: %s\n", (explicit) ? "explicit" :
1325                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1326                tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1327
1328        spin_unlock(&lun->lun_tg_pt_gp_lock);
1329        /*
1330         * Do the optional transition delay after we set the secondary
1331         * ALUA access state.
1332         */
1333        if (trans_delay_msecs != 0)
1334                msleep_interruptible(trans_delay_msecs);
1335        /*
1336         * See if we need to update the ALUA fabric port metadata for
1337         * secondary state and status
1338         */
1339        if (lun->lun_tg_pt_secondary_write_md)
1340                core_alua_update_tpg_secondary_metadata(lun);
1341
1342        return 0;
1343}
1344
1345struct t10_alua_lba_map *
1346core_alua_allocate_lba_map(struct list_head *list,
1347                           u64 first_lba, u64 last_lba)
1348{
1349        struct t10_alua_lba_map *lba_map;
1350
1351        lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1352        if (!lba_map) {
1353                pr_err("Unable to allocate struct t10_alua_lba_map\n");
1354                return ERR_PTR(-ENOMEM);
1355        }
1356        INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1357        lba_map->lba_map_first_lba = first_lba;
1358        lba_map->lba_map_last_lba = last_lba;
1359
1360        list_add_tail(&lba_map->lba_map_list, list);
1361        return lba_map;
1362}
1363
1364int
1365core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1366                               int pg_id, int state)
1367{
1368        struct t10_alua_lba_map_member *lba_map_mem;
1369
1370        list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1371                            lba_map_mem_list) {
1372                if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1373                        pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1374                        return -EINVAL;
1375                }
1376        }
1377
1378        lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1379        if (!lba_map_mem) {
1380                pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1381                return -ENOMEM;
1382        }
1383        lba_map_mem->lba_map_mem_alua_state = state;
1384        lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1385
1386        list_add_tail(&lba_map_mem->lba_map_mem_list,
1387                      &lba_map->lba_map_mem_list);
1388        return 0;
1389}
1390
1391void
1392core_alua_free_lba_map(struct list_head *lba_list)
1393{
1394        struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1395        struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1396
1397        list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1398                                 lba_map_list) {
1399                list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1400                                         &lba_map->lba_map_mem_list,
1401                                         lba_map_mem_list) {
1402                        list_del(&lba_map_mem->lba_map_mem_list);
1403                        kmem_cache_free(t10_alua_lba_map_mem_cache,
1404                                        lba_map_mem);
1405                }
1406                list_del(&lba_map->lba_map_list);
1407                kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1408        }
1409}
1410
1411void
1412core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1413                      int segment_size, int segment_mult)
1414{
1415        struct list_head old_lba_map_list;
1416        struct t10_alua_tg_pt_gp *tg_pt_gp;
1417        int activate = 0, supported;
1418
1419        INIT_LIST_HEAD(&old_lba_map_list);
1420        spin_lock(&dev->t10_alua.lba_map_lock);
1421        dev->t10_alua.lba_map_segment_size = segment_size;
1422        dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1423        list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1424        if (lba_map_list) {
1425                list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1426                activate = 1;
1427        }
1428        spin_unlock(&dev->t10_alua.lba_map_lock);
1429        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1430        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1431                            tg_pt_gp_list) {
1432
1433                if (!tg_pt_gp->tg_pt_gp_valid_id)
1434                        continue;
1435                supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1436                if (activate)
1437                        supported |= ALUA_LBD_SUP;
1438                else
1439                        supported &= ~ALUA_LBD_SUP;
1440                tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1441        }
1442        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1443        core_alua_free_lba_map(&old_lba_map_list);
1444}
1445
1446struct t10_alua_lu_gp *
1447core_alua_allocate_lu_gp(const char *name, int def_group)
1448{
1449        struct t10_alua_lu_gp *lu_gp;
1450
1451        lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1452        if (!lu_gp) {
1453                pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1454                return ERR_PTR(-ENOMEM);
1455        }
1456        INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1457        INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1458        spin_lock_init(&lu_gp->lu_gp_lock);
1459        atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1460
1461        if (def_group) {
1462                lu_gp->lu_gp_id = alua_lu_gps_counter++;
1463                lu_gp->lu_gp_valid_id = 1;
1464                alua_lu_gps_count++;
1465        }
1466
1467        return lu_gp;
1468}
1469
1470int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1471{
1472        struct t10_alua_lu_gp *lu_gp_tmp;
1473        u16 lu_gp_id_tmp;
1474        /*
1475         * The lu_gp->lu_gp_id may only be set once..
1476         */
1477        if (lu_gp->lu_gp_valid_id) {
1478                pr_warn("ALUA LU Group already has a valid ID,"
1479                        " ignoring request\n");
1480                return -EINVAL;
1481        }
1482
1483        spin_lock(&lu_gps_lock);
1484        if (alua_lu_gps_count == 0x0000ffff) {
1485                pr_err("Maximum ALUA alua_lu_gps_count:"
1486                                " 0x0000ffff reached\n");
1487                spin_unlock(&lu_gps_lock);
1488                kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1489                return -ENOSPC;
1490        }
1491again:
1492        lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1493                                alua_lu_gps_counter++;
1494
1495        list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1496                if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1497                        if (!lu_gp_id)
1498                                goto again;
1499
1500                        pr_warn("ALUA Logical Unit Group ID: %hu"
1501                                " already exists, ignoring request\n",
1502                                lu_gp_id);
1503                        spin_unlock(&lu_gps_lock);
1504                        return -EINVAL;
1505                }
1506        }
1507
1508        lu_gp->lu_gp_id = lu_gp_id_tmp;
1509        lu_gp->lu_gp_valid_id = 1;
1510        list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1511        alua_lu_gps_count++;
1512        spin_unlock(&lu_gps_lock);
1513
1514        return 0;
1515}
1516
1517static struct t10_alua_lu_gp_member *
1518core_alua_allocate_lu_gp_mem(struct se_device *dev)
1519{
1520        struct t10_alua_lu_gp_member *lu_gp_mem;
1521
1522        lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1523        if (!lu_gp_mem) {
1524                pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1525                return ERR_PTR(-ENOMEM);
1526        }
1527        INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1528        spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1529        atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1530
1531        lu_gp_mem->lu_gp_mem_dev = dev;
1532        dev->dev_alua_lu_gp_mem = lu_gp_mem;
1533
1534        return lu_gp_mem;
1535}
1536
1537void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1538{
1539        struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1540        /*
1541         * Once we have reached this point, config_item_put() has
1542         * already been called from target_core_alua_drop_lu_gp().
1543         *
1544         * Here, we remove the *lu_gp from the global list so that
1545         * no associations can be made while we are releasing
1546         * struct t10_alua_lu_gp.
1547         */
1548        spin_lock(&lu_gps_lock);
1549        list_del(&lu_gp->lu_gp_node);
1550        alua_lu_gps_count--;
1551        spin_unlock(&lu_gps_lock);
1552        /*
1553         * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1554         * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1555         * released with core_alua_put_lu_gp_from_name()
1556         */
1557        while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1558                cpu_relax();
1559        /*
1560         * Release reference to struct t10_alua_lu_gp * from all associated
1561         * struct se_device.
1562         */
1563        spin_lock(&lu_gp->lu_gp_lock);
1564        list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1565                                &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1566                if (lu_gp_mem->lu_gp_assoc) {
1567                        list_del(&lu_gp_mem->lu_gp_mem_list);
1568                        lu_gp->lu_gp_members--;
1569                        lu_gp_mem->lu_gp_assoc = 0;
1570                }
1571                spin_unlock(&lu_gp->lu_gp_lock);
1572                /*
1573                 *
1574                 * lu_gp_mem is associated with a single
1575                 * struct se_device->dev_alua_lu_gp_mem, and is released when
1576                 * struct se_device is released via core_alua_free_lu_gp_mem().
1577                 *
1578                 * If the passed lu_gp does NOT match the default_lu_gp, assume
1579                 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1580                 */
1581                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1582                if (lu_gp != default_lu_gp)
1583                        __core_alua_attach_lu_gp_mem(lu_gp_mem,
1584                                        default_lu_gp);
1585                else
1586                        lu_gp_mem->lu_gp = NULL;
1587                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1588
1589                spin_lock(&lu_gp->lu_gp_lock);
1590        }
1591        spin_unlock(&lu_gp->lu_gp_lock);
1592
1593        kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1594}
1595
1596void core_alua_free_lu_gp_mem(struct se_device *dev)
1597{
1598        struct t10_alua_lu_gp *lu_gp;
1599        struct t10_alua_lu_gp_member *lu_gp_mem;
1600
1601        lu_gp_mem = dev->dev_alua_lu_gp_mem;
1602        if (!lu_gp_mem)
1603                return;
1604
1605        while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1606                cpu_relax();
1607
1608        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1609        lu_gp = lu_gp_mem->lu_gp;
1610        if (lu_gp) {
1611                spin_lock(&lu_gp->lu_gp_lock);
1612                if (lu_gp_mem->lu_gp_assoc) {
1613                        list_del(&lu_gp_mem->lu_gp_mem_list);
1614                        lu_gp->lu_gp_members--;
1615                        lu_gp_mem->lu_gp_assoc = 0;
1616                }
1617                spin_unlock(&lu_gp->lu_gp_lock);
1618                lu_gp_mem->lu_gp = NULL;
1619        }
1620        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1621
1622        kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1623}
1624
1625struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1626{
1627        struct t10_alua_lu_gp *lu_gp;
1628        struct config_item *ci;
1629
1630        spin_lock(&lu_gps_lock);
1631        list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1632                if (!lu_gp->lu_gp_valid_id)
1633                        continue;
1634                ci = &lu_gp->lu_gp_group.cg_item;
1635                if (!strcmp(config_item_name(ci), name)) {
1636                        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1637                        spin_unlock(&lu_gps_lock);
1638                        return lu_gp;
1639                }
1640        }
1641        spin_unlock(&lu_gps_lock);
1642
1643        return NULL;
1644}
1645
1646void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1647{
1648        spin_lock(&lu_gps_lock);
1649        atomic_dec(&lu_gp->lu_gp_ref_cnt);
1650        spin_unlock(&lu_gps_lock);
1651}
1652
1653/*
1654 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1655 */
1656void __core_alua_attach_lu_gp_mem(
1657        struct t10_alua_lu_gp_member *lu_gp_mem,
1658        struct t10_alua_lu_gp *lu_gp)
1659{
1660        spin_lock(&lu_gp->lu_gp_lock);
1661        lu_gp_mem->lu_gp = lu_gp;
1662        lu_gp_mem->lu_gp_assoc = 1;
1663        list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1664        lu_gp->lu_gp_members++;
1665        spin_unlock(&lu_gp->lu_gp_lock);
1666}
1667
1668/*
1669 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1670 */
1671void __core_alua_drop_lu_gp_mem(
1672        struct t10_alua_lu_gp_member *lu_gp_mem,
1673        struct t10_alua_lu_gp *lu_gp)
1674{
1675        spin_lock(&lu_gp->lu_gp_lock);
1676        list_del(&lu_gp_mem->lu_gp_mem_list);
1677        lu_gp_mem->lu_gp = NULL;
1678        lu_gp_mem->lu_gp_assoc = 0;
1679        lu_gp->lu_gp_members--;
1680        spin_unlock(&lu_gp->lu_gp_lock);
1681}
1682
1683struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1684                const char *name, int def_group)
1685{
1686        struct t10_alua_tg_pt_gp *tg_pt_gp;
1687
1688        tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1689        if (!tg_pt_gp) {
1690                pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1691                return NULL;
1692        }
1693        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1694        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1695        mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1696        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1697        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1698        INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1699                          core_alua_do_transition_tg_pt_work);
1700        tg_pt_gp->tg_pt_gp_dev = dev;
1701        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1702                ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1703        /*
1704         * Enable both explicit and implicit ALUA support by default
1705         */
1706        tg_pt_gp->tg_pt_gp_alua_access_type =
1707                        TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1708        /*
1709         * Set the default Active/NonOptimized Delay in milliseconds
1710         */
1711        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1712        tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1713        tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1714
1715        /*
1716         * Enable all supported states
1717         */
1718        tg_pt_gp->tg_pt_gp_alua_supported_states =
1719            ALUA_T_SUP | ALUA_O_SUP |
1720            ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1721
1722        if (def_group) {
1723                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1724                tg_pt_gp->tg_pt_gp_id =
1725                                dev->t10_alua.alua_tg_pt_gps_counter++;
1726                tg_pt_gp->tg_pt_gp_valid_id = 1;
1727                dev->t10_alua.alua_tg_pt_gps_count++;
1728                list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1729                              &dev->t10_alua.tg_pt_gps_list);
1730                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1731        }
1732
1733        return tg_pt_gp;
1734}
1735
1736int core_alua_set_tg_pt_gp_id(
1737        struct t10_alua_tg_pt_gp *tg_pt_gp,
1738        u16 tg_pt_gp_id)
1739{
1740        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1741        struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1742        u16 tg_pt_gp_id_tmp;
1743
1744        /*
1745         * The tg_pt_gp->tg_pt_gp_id may only be set once..
1746         */
1747        if (tg_pt_gp->tg_pt_gp_valid_id) {
1748                pr_warn("ALUA TG PT Group already has a valid ID,"
1749                        " ignoring request\n");
1750                return -EINVAL;
1751        }
1752
1753        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1754        if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1755                pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1756                        " 0x0000ffff reached\n");
1757                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1758                kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1759                return -ENOSPC;
1760        }
1761again:
1762        tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1763                        dev->t10_alua.alua_tg_pt_gps_counter++;
1764
1765        list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1766                        tg_pt_gp_list) {
1767                if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1768                        if (!tg_pt_gp_id)
1769                                goto again;
1770
1771                        pr_err("ALUA Target Port Group ID: %hu already"
1772                                " exists, ignoring request\n", tg_pt_gp_id);
1773                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1774                        return -EINVAL;
1775                }
1776        }
1777
1778        tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1779        tg_pt_gp->tg_pt_gp_valid_id = 1;
1780        list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1781                        &dev->t10_alua.tg_pt_gps_list);
1782        dev->t10_alua.alua_tg_pt_gps_count++;
1783        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1784
1785        return 0;
1786}
1787
1788void core_alua_free_tg_pt_gp(
1789        struct t10_alua_tg_pt_gp *tg_pt_gp)
1790{
1791        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1792        struct se_lun *lun, *next;
1793
1794        /*
1795         * Once we have reached this point, config_item_put() has already
1796         * been called from target_core_alua_drop_tg_pt_gp().
1797         *
1798         * Here we remove *tg_pt_gp from the global list so that
1799         * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1800         * can be made while we are releasing struct t10_alua_tg_pt_gp.
1801         */
1802        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1803        list_del(&tg_pt_gp->tg_pt_gp_list);
1804        dev->t10_alua.alua_tg_pt_gps_counter--;
1805        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1806
1807        flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1808
1809        /*
1810         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1811         * core_alua_get_tg_pt_gp_by_name() in
1812         * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1813         * to be released with core_alua_put_tg_pt_gp_from_name().
1814         */
1815        while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1816                cpu_relax();
1817
1818        /*
1819         * Release reference to struct t10_alua_tg_pt_gp from all associated
1820         * struct se_port.
1821         */
1822        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1823        list_for_each_entry_safe(lun, next,
1824                        &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1825                list_del_init(&lun->lun_tg_pt_gp_link);
1826                tg_pt_gp->tg_pt_gp_members--;
1827
1828                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1829                /*
1830                 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1831                 * assume we want to re-associate a given tg_pt_gp_mem with
1832                 * default_tg_pt_gp.
1833                 */
1834                spin_lock(&lun->lun_tg_pt_gp_lock);
1835                if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1836                        __target_attach_tg_pt_gp(lun,
1837                                        dev->t10_alua.default_tg_pt_gp);
1838                } else
1839                        lun->lun_tg_pt_gp = NULL;
1840                spin_unlock(&lun->lun_tg_pt_gp_lock);
1841
1842                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1843        }
1844        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1845
1846        kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1847}
1848
1849static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1850                struct se_device *dev, const char *name)
1851{
1852        struct t10_alua_tg_pt_gp *tg_pt_gp;
1853        struct config_item *ci;
1854
1855        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1856        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1857                        tg_pt_gp_list) {
1858                if (!tg_pt_gp->tg_pt_gp_valid_id)
1859                        continue;
1860                ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1861                if (!strcmp(config_item_name(ci), name)) {
1862                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1863                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1864                        return tg_pt_gp;
1865                }
1866        }
1867        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1868
1869        return NULL;
1870}
1871
1872static void core_alua_put_tg_pt_gp_from_name(
1873        struct t10_alua_tg_pt_gp *tg_pt_gp)
1874{
1875        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1876
1877        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1878        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1879        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1880}
1881
1882static void __target_attach_tg_pt_gp(struct se_lun *lun,
1883                struct t10_alua_tg_pt_gp *tg_pt_gp)
1884{
1885        struct se_dev_entry *se_deve;
1886
1887        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1888
1889        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1890        lun->lun_tg_pt_gp = tg_pt_gp;
1891        list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1892        tg_pt_gp->tg_pt_gp_members++;
1893        spin_lock(&lun->lun_deve_lock);
1894        list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1895                core_scsi3_ua_allocate(se_deve, 0x3f,
1896                                       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1897        spin_unlock(&lun->lun_deve_lock);
1898        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1899}
1900
1901void target_attach_tg_pt_gp(struct se_lun *lun,
1902                struct t10_alua_tg_pt_gp *tg_pt_gp)
1903{
1904        spin_lock(&lun->lun_tg_pt_gp_lock);
1905        __target_attach_tg_pt_gp(lun, tg_pt_gp);
1906        spin_unlock(&lun->lun_tg_pt_gp_lock);
1907}
1908
1909static void __target_detach_tg_pt_gp(struct se_lun *lun,
1910                struct t10_alua_tg_pt_gp *tg_pt_gp)
1911{
1912        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1913
1914        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1915        list_del_init(&lun->lun_tg_pt_gp_link);
1916        tg_pt_gp->tg_pt_gp_members--;
1917        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1918
1919        lun->lun_tg_pt_gp = NULL;
1920}
1921
1922void target_detach_tg_pt_gp(struct se_lun *lun)
1923{
1924        struct t10_alua_tg_pt_gp *tg_pt_gp;
1925
1926        spin_lock(&lun->lun_tg_pt_gp_lock);
1927        tg_pt_gp = lun->lun_tg_pt_gp;
1928        if (tg_pt_gp)
1929                __target_detach_tg_pt_gp(lun, tg_pt_gp);
1930        spin_unlock(&lun->lun_tg_pt_gp_lock);
1931}
1932
1933ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1934{
1935        struct config_item *tg_pt_ci;
1936        struct t10_alua_tg_pt_gp *tg_pt_gp;
1937        ssize_t len = 0;
1938
1939        spin_lock(&lun->lun_tg_pt_gp_lock);
1940        tg_pt_gp = lun->lun_tg_pt_gp;
1941        if (tg_pt_gp) {
1942                tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1943                len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1944                        " %hu\nTG Port Primary Access State: %s\nTG Port "
1945                        "Primary Access Status: %s\nTG Port Secondary Access"
1946                        " State: %s\nTG Port Secondary Access Status: %s\n",
1947                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1948                        core_alua_dump_state(atomic_read(
1949                                        &tg_pt_gp->tg_pt_gp_alua_access_state)),
1950                        core_alua_dump_status(
1951                                tg_pt_gp->tg_pt_gp_alua_access_status),
1952                        atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1953                        "Offline" : "None",
1954                        core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1955        }
1956        spin_unlock(&lun->lun_tg_pt_gp_lock);
1957
1958        return len;
1959}
1960
1961ssize_t core_alua_store_tg_pt_gp_info(
1962        struct se_lun *lun,
1963        const char *page,
1964        size_t count)
1965{
1966        struct se_portal_group *tpg = lun->lun_tpg;
1967        /*
1968         * rcu_dereference_raw protected by se_lun->lun_group symlink
1969         * reference to se_device->dev_group.
1970         */
1971        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1972        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1973        unsigned char buf[TG_PT_GROUP_NAME_BUF];
1974        int move = 0;
1975
1976        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
1977            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1978                return -ENODEV;
1979
1980        if (count > TG_PT_GROUP_NAME_BUF) {
1981                pr_err("ALUA Target Port Group alias too large!\n");
1982                return -EINVAL;
1983        }
1984        memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1985        memcpy(buf, page, count);
1986        /*
1987         * Any ALUA target port group alias besides "NULL" means we will be
1988         * making a new group association.
1989         */
1990        if (strcmp(strstrip(buf), "NULL")) {
1991                /*
1992                 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1993                 * struct t10_alua_tg_pt_gp.  This reference is released with
1994                 * core_alua_put_tg_pt_gp_from_name() below.
1995                 */
1996                tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1997                                        strstrip(buf));
1998                if (!tg_pt_gp_new)
1999                        return -ENODEV;
2000        }
2001
2002        spin_lock(&lun->lun_tg_pt_gp_lock);
2003        tg_pt_gp = lun->lun_tg_pt_gp;
2004        if (tg_pt_gp) {
2005                /*
2006                 * Clearing an existing tg_pt_gp association, and replacing
2007                 * with the default_tg_pt_gp.
2008                 */
2009                if (!tg_pt_gp_new) {
2010                        pr_debug("Target_Core_ConfigFS: Moving"
2011                                " %s/tpgt_%hu/%s from ALUA Target Port Group:"
2012                                " alua/%s, ID: %hu back to"
2013                                " default_tg_pt_gp\n",
2014                                tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2015                                tpg->se_tpg_tfo->tpg_get_tag(tpg),
2016                                config_item_name(&lun->lun_group.cg_item),
2017                                config_item_name(
2018                                        &tg_pt_gp->tg_pt_gp_group.cg_item),
2019                                tg_pt_gp->tg_pt_gp_id);
2020
2021                        __target_detach_tg_pt_gp(lun, tg_pt_gp);
2022                        __target_attach_tg_pt_gp(lun,
2023                                        dev->t10_alua.default_tg_pt_gp);
2024                        spin_unlock(&lun->lun_tg_pt_gp_lock);
2025
2026                        return count;
2027                }
2028                __target_detach_tg_pt_gp(lun, tg_pt_gp);
2029                move = 1;
2030        }
2031
2032        __target_attach_tg_pt_gp(lun, tg_pt_gp_new);
2033        spin_unlock(&lun->lun_tg_pt_gp_lock);
2034        pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2035                " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2036                "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2037                tpg->se_tpg_tfo->tpg_get_tag(tpg),
2038                config_item_name(&lun->lun_group.cg_item),
2039                config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2040                tg_pt_gp_new->tg_pt_gp_id);
2041
2042        core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2043        return count;
2044}
2045
2046ssize_t core_alua_show_access_type(
2047        struct t10_alua_tg_pt_gp *tg_pt_gp,
2048        char *page)
2049{
2050        if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2051            (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2052                return sprintf(page, "Implicit and Explicit\n");
2053        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2054                return sprintf(page, "Implicit\n");
2055        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2056                return sprintf(page, "Explicit\n");
2057        else
2058                return sprintf(page, "None\n");
2059}
2060
2061ssize_t core_alua_store_access_type(
2062        struct t10_alua_tg_pt_gp *tg_pt_gp,
2063        const char *page,
2064        size_t count)
2065{
2066        unsigned long tmp;
2067        int ret;
2068
2069        ret = kstrtoul(page, 0, &tmp);
2070        if (ret < 0) {
2071                pr_err("Unable to extract alua_access_type\n");
2072                return ret;
2073        }
2074        if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2075                pr_err("Illegal value for alua_access_type:"
2076                                " %lu\n", tmp);
2077                return -EINVAL;
2078        }
2079        if (tmp == 3)
2080                tg_pt_gp->tg_pt_gp_alua_access_type =
2081                        TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2082        else if (tmp == 2)
2083                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2084        else if (tmp == 1)
2085                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2086        else
2087                tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2088
2089        return count;
2090}
2091
2092ssize_t core_alua_show_nonop_delay_msecs(
2093        struct t10_alua_tg_pt_gp *tg_pt_gp,
2094        char *page)
2095{
2096        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2097}
2098
2099ssize_t core_alua_store_nonop_delay_msecs(
2100        struct t10_alua_tg_pt_gp *tg_pt_gp,
2101        const char *page,
2102        size_t count)
2103{
2104        unsigned long tmp;
2105        int ret;
2106
2107        ret = kstrtoul(page, 0, &tmp);
2108        if (ret < 0) {
2109                pr_err("Unable to extract nonop_delay_msecs\n");
2110                return ret;
2111        }
2112        if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2113                pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2114                        " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2115                        ALUA_MAX_NONOP_DELAY_MSECS);
2116                return -EINVAL;
2117        }
2118        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2119
2120        return count;
2121}
2122
2123ssize_t core_alua_show_trans_delay_msecs(
2124        struct t10_alua_tg_pt_gp *tg_pt_gp,
2125        char *page)
2126{
2127        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2128}
2129
2130ssize_t core_alua_store_trans_delay_msecs(
2131        struct t10_alua_tg_pt_gp *tg_pt_gp,
2132        const char *page,
2133        size_t count)
2134{
2135        unsigned long tmp;
2136        int ret;
2137
2138        ret = kstrtoul(page, 0, &tmp);
2139        if (ret < 0) {
2140                pr_err("Unable to extract trans_delay_msecs\n");
2141                return ret;
2142        }
2143        if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2144                pr_err("Passed trans_delay_msecs: %lu, exceeds"
2145                        " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2146                        ALUA_MAX_TRANS_DELAY_MSECS);
2147                return -EINVAL;
2148        }
2149        tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2150
2151        return count;
2152}
2153
2154ssize_t core_alua_show_implicit_trans_secs(
2155        struct t10_alua_tg_pt_gp *tg_pt_gp,
2156        char *page)
2157{
2158        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2159}
2160
2161ssize_t core_alua_store_implicit_trans_secs(
2162        struct t10_alua_tg_pt_gp *tg_pt_gp,
2163        const char *page,
2164        size_t count)
2165{
2166        unsigned long tmp;
2167        int ret;
2168
2169        ret = kstrtoul(page, 0, &tmp);
2170        if (ret < 0) {
2171                pr_err("Unable to extract implicit_trans_secs\n");
2172                return ret;
2173        }
2174        if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2175                pr_err("Passed implicit_trans_secs: %lu, exceeds"
2176                        " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2177                        ALUA_MAX_IMPLICIT_TRANS_SECS);
2178                return  -EINVAL;
2179        }
2180        tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2181
2182        return count;
2183}
2184
2185ssize_t core_alua_show_preferred_bit(
2186        struct t10_alua_tg_pt_gp *tg_pt_gp,
2187        char *page)
2188{
2189        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2190}
2191
2192ssize_t core_alua_store_preferred_bit(
2193        struct t10_alua_tg_pt_gp *tg_pt_gp,
2194        const char *page,
2195        size_t count)
2196{
2197        unsigned long tmp;
2198        int ret;
2199
2200        ret = kstrtoul(page, 0, &tmp);
2201        if (ret < 0) {
2202                pr_err("Unable to extract preferred ALUA value\n");
2203                return ret;
2204        }
2205        if ((tmp != 0) && (tmp != 1)) {
2206                pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2207                return -EINVAL;
2208        }
2209        tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2210
2211        return count;
2212}
2213
2214ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2215{
2216        return sprintf(page, "%d\n",
2217                atomic_read(&lun->lun_tg_pt_secondary_offline));
2218}
2219
2220ssize_t core_alua_store_offline_bit(
2221        struct se_lun *lun,
2222        const char *page,
2223        size_t count)
2224{
2225        /*
2226         * rcu_dereference_raw protected by se_lun->lun_group symlink
2227         * reference to se_device->dev_group.
2228         */
2229        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2230        unsigned long tmp;
2231        int ret;
2232
2233        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
2234            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2235                return -ENODEV;
2236
2237        ret = kstrtoul(page, 0, &tmp);
2238        if (ret < 0) {
2239                pr_err("Unable to extract alua_tg_pt_offline value\n");
2240                return ret;
2241        }
2242        if ((tmp != 0) && (tmp != 1)) {
2243                pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2244                                tmp);
2245                return -EINVAL;
2246        }
2247
2248        ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2249        if (ret < 0)
2250                return -EINVAL;
2251
2252        return count;
2253}
2254
2255ssize_t core_alua_show_secondary_status(
2256        struct se_lun *lun,
2257        char *page)
2258{
2259        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2260}
2261
2262ssize_t core_alua_store_secondary_status(
2263        struct se_lun *lun,
2264        const char *page,
2265        size_t count)
2266{
2267        unsigned long tmp;
2268        int ret;
2269
2270        ret = kstrtoul(page, 0, &tmp);
2271        if (ret < 0) {
2272                pr_err("Unable to extract alua_tg_pt_status\n");
2273                return ret;
2274        }
2275        if ((tmp != ALUA_STATUS_NONE) &&
2276            (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2277            (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2278                pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2279                                tmp);
2280                return -EINVAL;
2281        }
2282        lun->lun_tg_pt_secondary_stat = (int)tmp;
2283
2284        return count;
2285}
2286
2287ssize_t core_alua_show_secondary_write_metadata(
2288        struct se_lun *lun,
2289        char *page)
2290{
2291        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2292}
2293
2294ssize_t core_alua_store_secondary_write_metadata(
2295        struct se_lun *lun,
2296        const char *page,
2297        size_t count)
2298{
2299        unsigned long tmp;
2300        int ret;
2301
2302        ret = kstrtoul(page, 0, &tmp);
2303        if (ret < 0) {
2304                pr_err("Unable to extract alua_tg_pt_write_md\n");
2305                return ret;
2306        }
2307        if ((tmp != 0) && (tmp != 1)) {
2308                pr_err("Illegal value for alua_tg_pt_write_md:"
2309                                " %lu\n", tmp);
2310                return -EINVAL;
2311        }
2312        lun->lun_tg_pt_secondary_write_md = (int)tmp;
2313
2314        return count;
2315}
2316
2317int core_setup_alua(struct se_device *dev)
2318{
2319        if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
2320            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2321                struct t10_alua_lu_gp_member *lu_gp_mem;
2322
2323                /*
2324                 * Associate this struct se_device with the default ALUA
2325                 * LUN Group.
2326                 */
2327                lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2328                if (IS_ERR(lu_gp_mem))
2329                        return PTR_ERR(lu_gp_mem);
2330
2331                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2332                __core_alua_attach_lu_gp_mem(lu_gp_mem,
2333                                default_lu_gp);
2334                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2335
2336                pr_debug("%s: Adding to default ALUA LU Group:"
2337                        " core/alua/lu_gps/default_lu_gp\n",
2338                        dev->transport->name);
2339        }
2340
2341        return 0;
2342}
2343