linux/drivers/target/target_core_alua.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * Filename:  target_core_alua.c
   4 *
   5 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
   6 *
   7 * (c) Copyright 2009-2013 Datera, Inc.
   8 *
   9 * Nicholas A. Bellinger <nab@kernel.org>
  10 *
  11 ******************************************************************************/
  12
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/configfs.h>
  16#include <linux/delay.h>
  17#include <linux/export.h>
  18#include <linux/fcntl.h>
  19#include <linux/file.h>
  20#include <linux/fs.h>
  21#include <scsi/scsi_proto.h>
  22#include <asm/unaligned.h>
  23
  24#include <target/target_core_base.h>
  25#include <target/target_core_backend.h>
  26#include <target/target_core_fabric.h>
  27
  28#include "target_core_internal.h"
  29#include "target_core_alua.h"
  30#include "target_core_ua.h"
  31
  32static sense_reason_t core_alua_check_transition(int state, int valid,
  33                                                 int *primary, int explicit);
  34static int core_alua_set_tg_pt_secondary_state(
  35                struct se_lun *lun, int explicit, int offline);
  36
  37static char *core_alua_dump_state(int state);
  38
  39static void __target_attach_tg_pt_gp(struct se_lun *lun,
  40                struct t10_alua_tg_pt_gp *tg_pt_gp);
  41
  42static u16 alua_lu_gps_counter;
  43static u32 alua_lu_gps_count;
  44
  45static DEFINE_SPINLOCK(lu_gps_lock);
  46static LIST_HEAD(lu_gps_list);
  47
  48struct t10_alua_lu_gp *default_lu_gp;
  49
  50/*
  51 * REPORT REFERRALS
  52 *
  53 * See sbc3r35 section 5.23
  54 */
  55sense_reason_t
  56target_emulate_report_referrals(struct se_cmd *cmd)
  57{
  58        struct se_device *dev = cmd->se_dev;
  59        struct t10_alua_lba_map *map;
  60        struct t10_alua_lba_map_member *map_mem;
  61        unsigned char *buf;
  62        u32 rd_len = 0, off;
  63
  64        if (cmd->data_length < 4) {
  65                pr_warn("REPORT REFERRALS allocation length %u too"
  66                        " small\n", cmd->data_length);
  67                return TCM_INVALID_CDB_FIELD;
  68        }
  69
  70        buf = transport_kmap_data_sg(cmd);
  71        if (!buf)
  72                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  73
  74        off = 4;
  75        spin_lock(&dev->t10_alua.lba_map_lock);
  76        if (list_empty(&dev->t10_alua.lba_map_list)) {
  77                spin_unlock(&dev->t10_alua.lba_map_lock);
  78                transport_kunmap_data_sg(cmd);
  79
  80                return TCM_UNSUPPORTED_SCSI_OPCODE;
  81        }
  82
  83        list_for_each_entry(map, &dev->t10_alua.lba_map_list,
  84                            lba_map_list) {
  85                int desc_num = off + 3;
  86                int pg_num;
  87
  88                off += 4;
  89                if (cmd->data_length > off)
  90                        put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
  91                off += 8;
  92                if (cmd->data_length > off)
  93                        put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
  94                off += 8;
  95                rd_len += 20;
  96                pg_num = 0;
  97                list_for_each_entry(map_mem, &map->lba_map_mem_list,
  98                                    lba_map_mem_list) {
  99                        int alua_state = map_mem->lba_map_mem_alua_state;
 100                        int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
 101
 102                        if (cmd->data_length > off)
 103                                buf[off] = alua_state & 0x0f;
 104                        off += 2;
 105                        if (cmd->data_length > off)
 106                                buf[off] = (alua_pg_id >> 8) & 0xff;
 107                        off++;
 108                        if (cmd->data_length > off)
 109                                buf[off] = (alua_pg_id & 0xff);
 110                        off++;
 111                        rd_len += 4;
 112                        pg_num++;
 113                }
 114                if (cmd->data_length > desc_num)
 115                        buf[desc_num] = pg_num;
 116        }
 117        spin_unlock(&dev->t10_alua.lba_map_lock);
 118
 119        /*
 120         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 121         */
 122        put_unaligned_be16(rd_len, &buf[2]);
 123
 124        transport_kunmap_data_sg(cmd);
 125
 126        target_complete_cmd(cmd, SAM_STAT_GOOD);
 127        return 0;
 128}
 129
 130/*
 131 * REPORT_TARGET_PORT_GROUPS
 132 *
 133 * See spc4r17 section 6.27
 134 */
 135sense_reason_t
 136target_emulate_report_target_port_groups(struct se_cmd *cmd)
 137{
 138        struct se_device *dev = cmd->se_dev;
 139        struct t10_alua_tg_pt_gp *tg_pt_gp;
 140        struct se_lun *lun;
 141        unsigned char *buf;
 142        u32 rd_len = 0, off;
 143        int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 144
 145        /*
 146         * Skip over RESERVED area to first Target port group descriptor
 147         * depending on the PARAMETER DATA FORMAT type..
 148         */
 149        if (ext_hdr != 0)
 150                off = 8;
 151        else
 152                off = 4;
 153
 154        if (cmd->data_length < off) {
 155                pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
 156                        " small for %s header\n", cmd->data_length,
 157                        (ext_hdr) ? "extended" : "normal");
 158                return TCM_INVALID_CDB_FIELD;
 159        }
 160        buf = transport_kmap_data_sg(cmd);
 161        if (!buf)
 162                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 163
 164        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 165        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
 166                        tg_pt_gp_list) {
 167                /*
 168                 * Check if the Target port group and Target port descriptor list
 169                 * based on tg_pt_gp_members count will fit into the response payload.
 170                 * Otherwise, bump rd_len to let the initiator know we have exceeded
 171                 * the allocation length and the response is truncated.
 172                 */
 173                if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
 174                     cmd->data_length) {
 175                        rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
 176                        continue;
 177                }
 178                /*
 179                 * PREF: Preferred target port bit, determine if this
 180                 * bit should be set for port group.
 181                 */
 182                if (tg_pt_gp->tg_pt_gp_pref)
 183                        buf[off] = 0x80;
 184                /*
 185                 * Set the ASYMMETRIC ACCESS State
 186                 */
 187                buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
 188                /*
 189                 * Set supported ASYMMETRIC ACCESS State bits
 190                 */
 191                buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
 192                /*
 193                 * TARGET PORT GROUP
 194                 */
 195                put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
 196                off += 2;
 197
 198                off++; /* Skip over Reserved */
 199                /*
 200                 * STATUS CODE
 201                 */
 202                buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
 203                /*
 204                 * Vendor Specific field
 205                 */
 206                buf[off++] = 0x00;
 207                /*
 208                 * TARGET PORT COUNT
 209                 */
 210                buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
 211                rd_len += 8;
 212
 213                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 214                list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 215                                lun_tg_pt_gp_link) {
 216                        /*
 217                         * Start Target Port descriptor format
 218                         *
 219                         * See spc4r17 section 6.2.7 Table 247
 220                         */
 221                        off += 2; /* Skip over Obsolete */
 222                        /*
 223                         * Set RELATIVE TARGET PORT IDENTIFIER
 224                         */
 225                        put_unaligned_be16(lun->lun_rtpi, &buf[off]);
 226                        off += 2;
 227                        rd_len += 4;
 228                }
 229                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 230        }
 231        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 232        /*
 233         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 234         */
 235        put_unaligned_be32(rd_len, &buf[0]);
 236
 237        /*
 238         * Fill in the Extended header parameter data format if requested
 239         */
 240        if (ext_hdr != 0) {
 241                buf[4] = 0x10;
 242                /*
 243                 * Set the implicit transition time (in seconds) for the application
 244                 * client to use as a base for it's transition timeout value.
 245                 *
 246                 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
 247                 * this CDB was received upon to determine this value individually
 248                 * for ALUA target port group.
 249                 */
 250                rcu_read_lock();
 251                tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp);
 252                if (tg_pt_gp)
 253                        buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
 254                rcu_read_unlock();
 255        }
 256        transport_kunmap_data_sg(cmd);
 257
 258        target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4);
 259        return 0;
 260}
 261
 262/*
 263 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
 264 *
 265 * See spc4r17 section 6.35
 266 */
 267sense_reason_t
 268target_emulate_set_target_port_groups(struct se_cmd *cmd)
 269{
 270        struct se_device *dev = cmd->se_dev;
 271        struct se_lun *l_lun = cmd->se_lun;
 272        struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 273        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 274        unsigned char *buf;
 275        unsigned char *ptr;
 276        sense_reason_t rc = TCM_NO_SENSE;
 277        u32 len = 4; /* Skip over RESERVED area in header */
 278        int alua_access_state, primary = 0, valid_states;
 279        u16 tg_pt_id, rtpi;
 280
 281        if (cmd->data_length < 4) {
 282                pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
 283                        " small\n", cmd->data_length);
 284                return TCM_INVALID_PARAMETER_LIST;
 285        }
 286
 287        buf = transport_kmap_data_sg(cmd);
 288        if (!buf)
 289                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 290
 291        /*
 292         * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
 293         * for the local tg_pt_gp.
 294         */
 295        rcu_read_lock();
 296        l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
 297        if (!l_tg_pt_gp) {
 298                rcu_read_unlock();
 299                pr_err("Unable to access l_lun->tg_pt_gp\n");
 300                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 301                goto out;
 302        }
 303
 304        if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
 305                rcu_read_unlock();
 306                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 307                                " while TPGS_EXPLICIT_ALUA is disabled\n");
 308                rc = TCM_UNSUPPORTED_SCSI_OPCODE;
 309                goto out;
 310        }
 311        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
 312        rcu_read_unlock();
 313
 314        ptr = &buf[4]; /* Skip over RESERVED area in header */
 315
 316        while (len < cmd->data_length) {
 317                bool found = false;
 318                alua_access_state = (ptr[0] & 0x0f);
 319                /*
 320                 * Check the received ALUA access state, and determine if
 321                 * the state is a primary or secondary target port asymmetric
 322                 * access state.
 323                 */
 324                rc = core_alua_check_transition(alua_access_state, valid_states,
 325                                                &primary, 1);
 326                if (rc) {
 327                        /*
 328                         * If the SET TARGET PORT GROUPS attempts to establish
 329                         * an invalid combination of target port asymmetric
 330                         * access states or attempts to establish an
 331                         * unsupported target port asymmetric access state,
 332                         * then the command shall be terminated with CHECK
 333                         * CONDITION status, with the sense key set to ILLEGAL
 334                         * REQUEST, and the additional sense code set to INVALID
 335                         * FIELD IN PARAMETER LIST.
 336                         */
 337                        goto out;
 338                }
 339
 340                /*
 341                 * If the ASYMMETRIC ACCESS STATE field (see table 267)
 342                 * specifies a primary target port asymmetric access state,
 343                 * then the TARGET PORT GROUP OR TARGET PORT field specifies
 344                 * a primary target port group for which the primary target
 345                 * port asymmetric access state shall be changed. If the
 346                 * ASYMMETRIC ACCESS STATE field specifies a secondary target
 347                 * port asymmetric access state, then the TARGET PORT GROUP OR
 348                 * TARGET PORT field specifies the relative target port
 349                 * identifier (see 3.1.120) of the target port for which the
 350                 * secondary target port asymmetric access state shall be
 351                 * changed.
 352                 */
 353                if (primary) {
 354                        tg_pt_id = get_unaligned_be16(ptr + 2);
 355                        /*
 356                         * Locate the matching target port group ID from
 357                         * the global tg_pt_gp list
 358                         */
 359                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 360                        list_for_each_entry(tg_pt_gp,
 361                                        &dev->t10_alua.tg_pt_gps_list,
 362                                        tg_pt_gp_list) {
 363                                if (!tg_pt_gp->tg_pt_gp_valid_id)
 364                                        continue;
 365
 366                                if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
 367                                        continue;
 368
 369                                atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 370
 371                                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 372
 373                                if (!core_alua_do_port_transition(tg_pt_gp,
 374                                                dev, l_lun, nacl,
 375                                                alua_access_state, 1))
 376                                        found = true;
 377
 378                                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
 379                                atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
 380                                break;
 381                        }
 382                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 383                } else {
 384                        struct se_lun *lun;
 385
 386                        /*
 387                         * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
 388                         * the Target Port in question for the the incoming
 389                         * SET_TARGET_PORT_GROUPS op.
 390                         */
 391                        rtpi = get_unaligned_be16(ptr + 2);
 392                        /*
 393                         * Locate the matching relative target port identifier
 394                         * for the struct se_device storage object.
 395                         */
 396                        spin_lock(&dev->se_port_lock);
 397                        list_for_each_entry(lun, &dev->dev_sep_list,
 398                                                        lun_dev_link) {
 399                                if (lun->lun_rtpi != rtpi)
 400                                        continue;
 401
 402                                // XXX: racy unlock
 403                                spin_unlock(&dev->se_port_lock);
 404
 405                                if (!core_alua_set_tg_pt_secondary_state(
 406                                                lun, 1, 1))
 407                                        found = true;
 408
 409                                spin_lock(&dev->se_port_lock);
 410                                break;
 411                        }
 412                        spin_unlock(&dev->se_port_lock);
 413                }
 414
 415                if (!found) {
 416                        rc = TCM_INVALID_PARAMETER_LIST;
 417                        goto out;
 418                }
 419
 420                ptr += 4;
 421                len += 4;
 422        }
 423
 424out:
 425        transport_kunmap_data_sg(cmd);
 426        if (!rc)
 427                target_complete_cmd(cmd, SAM_STAT_GOOD);
 428        return rc;
 429}
 430
 431static inline void core_alua_state_nonoptimized(
 432        struct se_cmd *cmd,
 433        unsigned char *cdb,
 434        int nonop_delay_msecs)
 435{
 436        /*
 437         * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
 438         * later to determine if processing of this cmd needs to be
 439         * temporarily delayed for the Active/NonOptimized primary access state.
 440         */
 441        cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
 442        cmd->alua_nonop_delay = nonop_delay_msecs;
 443}
 444
 445static inline sense_reason_t core_alua_state_lba_dependent(
 446        struct se_cmd *cmd,
 447        u16 tg_pt_gp_id)
 448{
 449        struct se_device *dev = cmd->se_dev;
 450        u64 segment_size, segment_mult, sectors, lba;
 451
 452        /* Only need to check for cdb actually containing LBAs */
 453        if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
 454                return 0;
 455
 456        spin_lock(&dev->t10_alua.lba_map_lock);
 457        segment_size = dev->t10_alua.lba_map_segment_size;
 458        segment_mult = dev->t10_alua.lba_map_segment_multiplier;
 459        sectors = cmd->data_length / dev->dev_attrib.block_size;
 460
 461        lba = cmd->t_task_lba;
 462        while (lba < cmd->t_task_lba + sectors) {
 463                struct t10_alua_lba_map *cur_map = NULL, *map;
 464                struct t10_alua_lba_map_member *map_mem;
 465
 466                list_for_each_entry(map, &dev->t10_alua.lba_map_list,
 467                                    lba_map_list) {
 468                        u64 start_lba, last_lba;
 469                        u64 first_lba = map->lba_map_first_lba;
 470
 471                        if (segment_mult) {
 472                                u64 tmp = lba;
 473                                start_lba = do_div(tmp, segment_size * segment_mult);
 474
 475                                last_lba = first_lba + segment_size - 1;
 476                                if (start_lba >= first_lba &&
 477                                    start_lba <= last_lba) {
 478                                        lba += segment_size;
 479                                        cur_map = map;
 480                                        break;
 481                                }
 482                        } else {
 483                                last_lba = map->lba_map_last_lba;
 484                                if (lba >= first_lba && lba <= last_lba) {
 485                                        lba = last_lba + 1;
 486                                        cur_map = map;
 487                                        break;
 488                                }
 489                        }
 490                }
 491                if (!cur_map) {
 492                        spin_unlock(&dev->t10_alua.lba_map_lock);
 493                        return TCM_ALUA_TG_PT_UNAVAILABLE;
 494                }
 495                list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
 496                                    lba_map_mem_list) {
 497                        if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id)
 498                                continue;
 499                        switch(map_mem->lba_map_mem_alua_state) {
 500                        case ALUA_ACCESS_STATE_STANDBY:
 501                                spin_unlock(&dev->t10_alua.lba_map_lock);
 502                                return TCM_ALUA_TG_PT_STANDBY;
 503                        case ALUA_ACCESS_STATE_UNAVAILABLE:
 504                                spin_unlock(&dev->t10_alua.lba_map_lock);
 505                                return TCM_ALUA_TG_PT_UNAVAILABLE;
 506                        default:
 507                                break;
 508                        }
 509                }
 510        }
 511        spin_unlock(&dev->t10_alua.lba_map_lock);
 512        return 0;
 513}
 514
 515static inline sense_reason_t core_alua_state_standby(
 516        struct se_cmd *cmd,
 517        unsigned char *cdb)
 518{
 519        /*
 520         * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
 521         * spc4r17 section 5.9.2.4.4
 522         */
 523        switch (cdb[0]) {
 524        case INQUIRY:
 525        case LOG_SELECT:
 526        case LOG_SENSE:
 527        case MODE_SELECT:
 528        case MODE_SENSE:
 529        case REPORT_LUNS:
 530        case RECEIVE_DIAGNOSTIC:
 531        case SEND_DIAGNOSTIC:
 532        case READ_CAPACITY:
 533                return 0;
 534        case SERVICE_ACTION_IN_16:
 535                switch (cdb[1] & 0x1f) {
 536                case SAI_READ_CAPACITY_16:
 537                        return 0;
 538                default:
 539                        return TCM_ALUA_TG_PT_STANDBY;
 540                }
 541        case MAINTENANCE_IN:
 542                switch (cdb[1] & 0x1f) {
 543                case MI_REPORT_TARGET_PGS:
 544                        return 0;
 545                default:
 546                        return TCM_ALUA_TG_PT_STANDBY;
 547                }
 548        case MAINTENANCE_OUT:
 549                switch (cdb[1]) {
 550                case MO_SET_TARGET_PGS:
 551                        return 0;
 552                default:
 553                        return TCM_ALUA_TG_PT_STANDBY;
 554                }
 555        case REQUEST_SENSE:
 556        case PERSISTENT_RESERVE_IN:
 557        case PERSISTENT_RESERVE_OUT:
 558        case READ_BUFFER:
 559        case WRITE_BUFFER:
 560                return 0;
 561        default:
 562                return TCM_ALUA_TG_PT_STANDBY;
 563        }
 564
 565        return 0;
 566}
 567
 568static inline sense_reason_t core_alua_state_unavailable(
 569        struct se_cmd *cmd,
 570        unsigned char *cdb)
 571{
 572        /*
 573         * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
 574         * spc4r17 section 5.9.2.4.5
 575         */
 576        switch (cdb[0]) {
 577        case INQUIRY:
 578        case REPORT_LUNS:
 579                return 0;
 580        case MAINTENANCE_IN:
 581                switch (cdb[1] & 0x1f) {
 582                case MI_REPORT_TARGET_PGS:
 583                        return 0;
 584                default:
 585                        return TCM_ALUA_TG_PT_UNAVAILABLE;
 586                }
 587        case MAINTENANCE_OUT:
 588                switch (cdb[1]) {
 589                case MO_SET_TARGET_PGS:
 590                        return 0;
 591                default:
 592                        return TCM_ALUA_TG_PT_UNAVAILABLE;
 593                }
 594        case REQUEST_SENSE:
 595        case READ_BUFFER:
 596        case WRITE_BUFFER:
 597                return 0;
 598        default:
 599                return TCM_ALUA_TG_PT_UNAVAILABLE;
 600        }
 601
 602        return 0;
 603}
 604
 605static inline sense_reason_t core_alua_state_transition(
 606        struct se_cmd *cmd,
 607        unsigned char *cdb)
 608{
 609        /*
 610         * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
 611         * spc4r17 section 5.9.2.5
 612         */
 613        switch (cdb[0]) {
 614        case INQUIRY:
 615        case REPORT_LUNS:
 616                return 0;
 617        case MAINTENANCE_IN:
 618                switch (cdb[1] & 0x1f) {
 619                case MI_REPORT_TARGET_PGS:
 620                        return 0;
 621                default:
 622                        return TCM_ALUA_STATE_TRANSITION;
 623                }
 624        case REQUEST_SENSE:
 625        case READ_BUFFER:
 626        case WRITE_BUFFER:
 627                return 0;
 628        default:
 629                return TCM_ALUA_STATE_TRANSITION;
 630        }
 631
 632        return 0;
 633}
 634
 635/*
 636 * return 1: Is used to signal LUN not accessible, and check condition/not ready
 637 * return 0: Used to signal success
 638 * return -1: Used to signal failure, and invalid cdb field
 639 */
 640sense_reason_t
 641target_alua_state_check(struct se_cmd *cmd)
 642{
 643        struct se_device *dev = cmd->se_dev;
 644        unsigned char *cdb = cmd->t_task_cdb;
 645        struct se_lun *lun = cmd->se_lun;
 646        struct t10_alua_tg_pt_gp *tg_pt_gp;
 647        int out_alua_state, nonop_delay_msecs;
 648        u16 tg_pt_gp_id;
 649        sense_reason_t rc = TCM_NO_SENSE;
 650
 651        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
 652                return 0;
 653        if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
 654                return 0;
 655
 656        /*
 657         * First, check for a struct se_port specific secondary ALUA target port
 658         * access state: OFFLINE
 659         */
 660        if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
 661                pr_debug("ALUA: Got secondary offline status for local"
 662                                " target port\n");
 663                return TCM_ALUA_OFFLINE;
 664        }
 665        rcu_read_lock();
 666        tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
 667        if (!tg_pt_gp) {
 668                rcu_read_unlock();
 669                return 0;
 670        }
 671
 672        out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
 673        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 674        tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
 675        rcu_read_unlock();
 676        /*
 677         * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
 678         * statement so the compiler knows explicitly to check this case first.
 679         * For the Optimized ALUA access state case, we want to process the
 680         * incoming fabric cmd ASAP..
 681         */
 682        if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
 683                return 0;
 684
 685        switch (out_alua_state) {
 686        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 687                core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
 688                break;
 689        case ALUA_ACCESS_STATE_STANDBY:
 690                rc = core_alua_state_standby(cmd, cdb);
 691                break;
 692        case ALUA_ACCESS_STATE_UNAVAILABLE:
 693                rc = core_alua_state_unavailable(cmd, cdb);
 694                break;
 695        case ALUA_ACCESS_STATE_TRANSITION:
 696                rc = core_alua_state_transition(cmd, cdb);
 697                break;
 698        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 699                rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id);
 700                break;
 701        /*
 702         * OFFLINE is a secondary ALUA target port group access state, that is
 703         * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
 704         */
 705        case ALUA_ACCESS_STATE_OFFLINE:
 706        default:
 707                pr_err("Unknown ALUA access state: 0x%02x\n",
 708                                out_alua_state);
 709                rc = TCM_INVALID_CDB_FIELD;
 710        }
 711
 712        if (rc && rc != TCM_INVALID_CDB_FIELD) {
 713                pr_debug("[%s]: ALUA TG Port not available, "
 714                        "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n",
 715                        cmd->se_tfo->fabric_name, rc);
 716        }
 717
 718        return rc;
 719}
 720
 721/*
 722 * Check implicit and explicit ALUA state change request.
 723 */
 724static sense_reason_t
 725core_alua_check_transition(int state, int valid, int *primary, int explicit)
 726{
 727        /*
 728         * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
 729         * defined as primary target port asymmetric access states.
 730         */
 731        switch (state) {
 732        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 733                if (!(valid & ALUA_AO_SUP))
 734                        goto not_supported;
 735                *primary = 1;
 736                break;
 737        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 738                if (!(valid & ALUA_AN_SUP))
 739                        goto not_supported;
 740                *primary = 1;
 741                break;
 742        case ALUA_ACCESS_STATE_STANDBY:
 743                if (!(valid & ALUA_S_SUP))
 744                        goto not_supported;
 745                *primary = 1;
 746                break;
 747        case ALUA_ACCESS_STATE_UNAVAILABLE:
 748                if (!(valid & ALUA_U_SUP))
 749                        goto not_supported;
 750                *primary = 1;
 751                break;
 752        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 753                if (!(valid & ALUA_LBD_SUP))
 754                        goto not_supported;
 755                *primary = 1;
 756                break;
 757        case ALUA_ACCESS_STATE_OFFLINE:
 758                /*
 759                 * OFFLINE state is defined as a secondary target port
 760                 * asymmetric access state.
 761                 */
 762                if (!(valid & ALUA_O_SUP))
 763                        goto not_supported;
 764                *primary = 0;
 765                break;
 766        case ALUA_ACCESS_STATE_TRANSITION:
 767                if (!(valid & ALUA_T_SUP) || explicit)
 768                        /*
 769                         * Transitioning is set internally and by tcmu daemon,
 770                         * and cannot be selected through a STPG.
 771                         */
 772                        goto not_supported;
 773                *primary = 0;
 774                break;
 775        default:
 776                pr_err("Unknown ALUA access state: 0x%02x\n", state);
 777                return TCM_INVALID_PARAMETER_LIST;
 778        }
 779
 780        return 0;
 781
 782not_supported:
 783        pr_err("ALUA access state %s not supported",
 784               core_alua_dump_state(state));
 785        return TCM_INVALID_PARAMETER_LIST;
 786}
 787
 788static char *core_alua_dump_state(int state)
 789{
 790        switch (state) {
 791        case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
 792                return "Active/Optimized";
 793        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
 794                return "Active/NonOptimized";
 795        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
 796                return "LBA Dependent";
 797        case ALUA_ACCESS_STATE_STANDBY:
 798                return "Standby";
 799        case ALUA_ACCESS_STATE_UNAVAILABLE:
 800                return "Unavailable";
 801        case ALUA_ACCESS_STATE_OFFLINE:
 802                return "Offline";
 803        case ALUA_ACCESS_STATE_TRANSITION:
 804                return "Transitioning";
 805        default:
 806                return "Unknown";
 807        }
 808
 809        return NULL;
 810}
 811
 812char *core_alua_dump_status(int status)
 813{
 814        switch (status) {
 815        case ALUA_STATUS_NONE:
 816                return "None";
 817        case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
 818                return "Altered by Explicit STPG";
 819        case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
 820                return "Altered by Implicit ALUA";
 821        default:
 822                return "Unknown";
 823        }
 824
 825        return NULL;
 826}
 827
 828/*
 829 * Used by fabric modules to determine when we need to delay processing
 830 * for the Active/NonOptimized paths..
 831 */
 832int core_alua_check_nonop_delay(
 833        struct se_cmd *cmd)
 834{
 835        if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
 836                return 0;
 837        /*
 838         * The ALUA Active/NonOptimized access state delay can be disabled
 839         * in via configfs with a value of zero
 840         */
 841        if (!cmd->alua_nonop_delay)
 842                return 0;
 843        /*
 844         * struct se_cmd->alua_nonop_delay gets set by a target port group
 845         * defined interval in core_alua_state_nonoptimized()
 846         */
 847        msleep_interruptible(cmd->alua_nonop_delay);
 848        return 0;
 849}
 850EXPORT_SYMBOL(core_alua_check_nonop_delay);
 851
 852static int core_alua_write_tpg_metadata(
 853        const char *path,
 854        unsigned char *md_buf,
 855        u32 md_buf_len)
 856{
 857        struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
 858        loff_t pos = 0;
 859        int ret;
 860
 861        if (IS_ERR(file)) {
 862                pr_err("filp_open(%s) for ALUA metadata failed\n", path);
 863                return -ENODEV;
 864        }
 865        ret = kernel_write(file, md_buf, md_buf_len, &pos);
 866        if (ret < 0)
 867                pr_err("Error writing ALUA metadata file: %s\n", path);
 868        fput(file);
 869        return (ret < 0) ? -EIO : 0;
 870}
 871
 872static int core_alua_update_tpg_primary_metadata(
 873        struct t10_alua_tg_pt_gp *tg_pt_gp)
 874{
 875        unsigned char *md_buf;
 876        struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
 877        char *path;
 878        int len, rc;
 879
 880        lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
 881
 882        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
 883        if (!md_buf) {
 884                pr_err("Unable to allocate buf for ALUA metadata\n");
 885                return -ENOMEM;
 886        }
 887
 888        len = snprintf(md_buf, ALUA_MD_BUF_LEN,
 889                        "tg_pt_gp_id=%hu\n"
 890                        "alua_access_state=0x%02x\n"
 891                        "alua_access_status=0x%02x\n",
 892                        tg_pt_gp->tg_pt_gp_id,
 893                        tg_pt_gp->tg_pt_gp_alua_access_state,
 894                        tg_pt_gp->tg_pt_gp_alua_access_status);
 895
 896        rc = -ENOMEM;
 897        path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
 898                        &wwn->unit_serial[0],
 899                        config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
 900        if (path) {
 901                rc = core_alua_write_tpg_metadata(path, md_buf, len);
 902                kfree(path);
 903        }
 904        kfree(md_buf);
 905        return rc;
 906}
 907
 908static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 909{
 910        struct se_dev_entry *se_deve;
 911        struct se_lun *lun;
 912        struct se_lun_acl *lacl;
 913
 914        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 915        list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
 916                                lun_tg_pt_gp_link) {
 917                /*
 918                 * After an implicit target port asymmetric access state
 919                 * change, a device server shall establish a unit attention
 920                 * condition for the initiator port associated with every I_T
 921                 * nexus with the additional sense code set to ASYMMETRIC
 922                 * ACCESS STATE CHANGED.
 923                 *
 924                 * After an explicit target port asymmetric access state
 925                 * change, a device server shall establish a unit attention
 926                 * condition with the additional sense code set to ASYMMETRIC
 927                 * ACCESS STATE CHANGED for the initiator port associated with
 928                 * every I_T nexus other than the I_T nexus on which the SET
 929                 * TARGET PORT GROUPS command
 930                 */
 931                if (!percpu_ref_tryget_live(&lun->lun_ref))
 932                        continue;
 933                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 934
 935                spin_lock(&lun->lun_deve_lock);
 936                list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
 937                        lacl = rcu_dereference_check(se_deve->se_lun_acl,
 938                                        lockdep_is_held(&lun->lun_deve_lock));
 939
 940                        /*
 941                         * spc4r37 p.242:
 942                         * After an explicit target port asymmetric access
 943                         * state change, a device server shall establish a
 944                         * unit attention condition with the additional sense
 945                         * code set to ASYMMETRIC ACCESS STATE CHANGED for
 946                         * the initiator port associated with every I_T nexus
 947                         * other than the I_T nexus on which the SET TARGET
 948                         * PORT GROUPS command was received.
 949                         */
 950                        if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
 951                             ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
 952                           (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
 953                            (tg_pt_gp->tg_pt_gp_alua_lun == lun))
 954                                continue;
 955
 956                        /*
 957                         * se_deve->se_lun_acl pointer may be NULL for a
 958                         * entry created without explicit Node+MappedLUN ACLs
 959                         */
 960                        if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
 961                            (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
 962                                continue;
 963
 964                        core_scsi3_ua_allocate(se_deve, 0x2A,
 965                                ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
 966                }
 967                spin_unlock(&lun->lun_deve_lock);
 968
 969                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 970                percpu_ref_put(&lun->lun_ref);
 971        }
 972        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 973}
 974
 975static int core_alua_do_transition_tg_pt(
 976        struct t10_alua_tg_pt_gp *tg_pt_gp,
 977        int new_state,
 978        int explicit)
 979{
 980        int prev_state;
 981
 982        mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
 983        /* Nothing to be done here */
 984        if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
 985                mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
 986                return 0;
 987        }
 988
 989        if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
 990                mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
 991                return -EAGAIN;
 992        }
 993
 994        /*
 995         * Save the old primary ALUA access state, and set the current state
 996         * to ALUA_ACCESS_STATE_TRANSITION.
 997         */
 998        prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
 999        tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1000        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1001                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1002                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1003
1004        core_alua_queue_state_change_ua(tg_pt_gp);
1005
1006        if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1007                mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1008                return 0;
1009        }
1010
1011        /*
1012         * Check for the optional ALUA primary state transition delay
1013         */
1014        if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1015                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1016
1017        /*
1018         * Set the current primary ALUA access state to the requested new state
1019         */
1020        tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1021
1022        /*
1023         * Update the ALUA metadata buf that has been allocated in
1024         * core_alua_do_port_transition(), this metadata will be written
1025         * to struct file.
1026         *
1027         * Note that there is the case where we do not want to update the
1028         * metadata when the saved metadata is being parsed in userspace
1029         * when setting the existing port access state and access status.
1030         *
1031         * Also note that the failure to write out the ALUA metadata to
1032         * struct file does NOT affect the actual ALUA transition.
1033         */
1034        if (tg_pt_gp->tg_pt_gp_write_metadata) {
1035                core_alua_update_tpg_primary_metadata(tg_pt_gp);
1036        }
1037
1038        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1039                " from primary access state %s to %s\n", (explicit) ? "explicit" :
1040                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1041                tg_pt_gp->tg_pt_gp_id,
1042                core_alua_dump_state(prev_state),
1043                core_alua_dump_state(new_state));
1044
1045        core_alua_queue_state_change_ua(tg_pt_gp);
1046
1047        mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1048        return 0;
1049}
1050
1051int core_alua_do_port_transition(
1052        struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1053        struct se_device *l_dev,
1054        struct se_lun *l_lun,
1055        struct se_node_acl *l_nacl,
1056        int new_state,
1057        int explicit)
1058{
1059        struct se_device *dev;
1060        struct t10_alua_lu_gp *lu_gp;
1061        struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1062        struct t10_alua_tg_pt_gp *tg_pt_gp;
1063        int primary, valid_states, rc = 0;
1064
1065        if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1066                return -ENODEV;
1067
1068        valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1069        if (core_alua_check_transition(new_state, valid_states, &primary,
1070                                       explicit) != 0)
1071                return -EINVAL;
1072
1073        local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1074        spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1075        lu_gp = local_lu_gp_mem->lu_gp;
1076        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1077        spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1078        /*
1079         * For storage objects that are members of the 'default_lu_gp',
1080         * we only do transition on the passed *l_tp_pt_gp, and not
1081         * on all of the matching target port groups IDs in default_lu_gp.
1082         */
1083        if (!lu_gp->lu_gp_id) {
1084                /*
1085                 * core_alua_do_transition_tg_pt() will always return
1086                 * success.
1087                 */
1088                l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1089                l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1090                rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1091                                                   new_state, explicit);
1092                atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1093                return rc;
1094        }
1095        /*
1096         * For all other LU groups aside from 'default_lu_gp', walk all of
1097         * the associated storage objects looking for a matching target port
1098         * group ID from the local target port group.
1099         */
1100        spin_lock(&lu_gp->lu_gp_lock);
1101        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1102                                lu_gp_mem_list) {
1103
1104                dev = lu_gp_mem->lu_gp_mem_dev;
1105                atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1106                spin_unlock(&lu_gp->lu_gp_lock);
1107
1108                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1109                list_for_each_entry(tg_pt_gp,
1110                                &dev->t10_alua.tg_pt_gps_list,
1111                                tg_pt_gp_list) {
1112
1113                        if (!tg_pt_gp->tg_pt_gp_valid_id)
1114                                continue;
1115                        /*
1116                         * If the target behavior port asymmetric access state
1117                         * is changed for any target port group accessible via
1118                         * a logical unit within a LU group, the target port
1119                         * behavior group asymmetric access states for the same
1120                         * target port group accessible via other logical units
1121                         * in that LU group will also change.
1122                         */
1123                        if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1124                                continue;
1125
1126                        if (l_tg_pt_gp == tg_pt_gp) {
1127                                tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1128                                tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1129                        } else {
1130                                tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1131                                tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1132                        }
1133                        atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1134                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1135                        /*
1136                         * core_alua_do_transition_tg_pt() will always return
1137                         * success.
1138                         */
1139                        rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1140                                        new_state, explicit);
1141
1142                        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1143                        atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1144                        if (rc)
1145                                break;
1146                }
1147                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1148
1149                spin_lock(&lu_gp->lu_gp_lock);
1150                atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1151        }
1152        spin_unlock(&lu_gp->lu_gp_lock);
1153
1154        if (!rc) {
1155                pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1156                         " Group IDs: %hu %s transition to primary state: %s\n",
1157                         config_item_name(&lu_gp->lu_gp_group.cg_item),
1158                         l_tg_pt_gp->tg_pt_gp_id,
1159                         (explicit) ? "explicit" : "implicit",
1160                         core_alua_dump_state(new_state));
1161        }
1162
1163        atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1164        return rc;
1165}
1166
1167static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1168{
1169        struct se_portal_group *se_tpg = lun->lun_tpg;
1170        unsigned char *md_buf;
1171        char *path;
1172        int len, rc;
1173
1174        mutex_lock(&lun->lun_tg_pt_md_mutex);
1175
1176        md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1177        if (!md_buf) {
1178                pr_err("Unable to allocate buf for ALUA metadata\n");
1179                rc = -ENOMEM;
1180                goto out_unlock;
1181        }
1182
1183        len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1184                        "alua_tg_pt_status=0x%02x\n",
1185                        atomic_read(&lun->lun_tg_pt_secondary_offline),
1186                        lun->lun_tg_pt_secondary_stat);
1187
1188        if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1189                path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1190                                db_root, se_tpg->se_tpg_tfo->fabric_name,
1191                                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1192                                se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1193                                lun->unpacked_lun);
1194        } else {
1195                path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1196                                db_root, se_tpg->se_tpg_tfo->fabric_name,
1197                                se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1198                                lun->unpacked_lun);
1199        }
1200        if (!path) {
1201                rc = -ENOMEM;
1202                goto out_free;
1203        }
1204
1205        rc = core_alua_write_tpg_metadata(path, md_buf, len);
1206        kfree(path);
1207out_free:
1208        kfree(md_buf);
1209out_unlock:
1210        mutex_unlock(&lun->lun_tg_pt_md_mutex);
1211        return rc;
1212}
1213
1214static int core_alua_set_tg_pt_secondary_state(
1215        struct se_lun *lun,
1216        int explicit,
1217        int offline)
1218{
1219        struct t10_alua_tg_pt_gp *tg_pt_gp;
1220        int trans_delay_msecs;
1221
1222        rcu_read_lock();
1223        tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
1224        if (!tg_pt_gp) {
1225                rcu_read_unlock();
1226                pr_err("Unable to complete secondary state"
1227                                " transition\n");
1228                return -EINVAL;
1229        }
1230        trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1231        /*
1232         * Set the secondary ALUA target port access state to OFFLINE
1233         * or release the previously secondary state for struct se_lun
1234         */
1235        if (offline)
1236                atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1237        else
1238                atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1239
1240        lun->lun_tg_pt_secondary_stat = (explicit) ?
1241                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1242                        ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1243
1244        pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1245                " to secondary access state: %s\n", (explicit) ? "explicit" :
1246                "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1247                tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1248
1249        rcu_read_unlock();
1250        /*
1251         * Do the optional transition delay after we set the secondary
1252         * ALUA access state.
1253         */
1254        if (trans_delay_msecs != 0)
1255                msleep_interruptible(trans_delay_msecs);
1256        /*
1257         * See if we need to update the ALUA fabric port metadata for
1258         * secondary state and status
1259         */
1260        if (lun->lun_tg_pt_secondary_write_md)
1261                core_alua_update_tpg_secondary_metadata(lun);
1262
1263        return 0;
1264}
1265
1266struct t10_alua_lba_map *
1267core_alua_allocate_lba_map(struct list_head *list,
1268                           u64 first_lba, u64 last_lba)
1269{
1270        struct t10_alua_lba_map *lba_map;
1271
1272        lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1273        if (!lba_map) {
1274                pr_err("Unable to allocate struct t10_alua_lba_map\n");
1275                return ERR_PTR(-ENOMEM);
1276        }
1277        INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1278        lba_map->lba_map_first_lba = first_lba;
1279        lba_map->lba_map_last_lba = last_lba;
1280
1281        list_add_tail(&lba_map->lba_map_list, list);
1282        return lba_map;
1283}
1284
1285int
1286core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1287                               int pg_id, int state)
1288{
1289        struct t10_alua_lba_map_member *lba_map_mem;
1290
1291        list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1292                            lba_map_mem_list) {
1293                if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1294                        pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1295                        return -EINVAL;
1296                }
1297        }
1298
1299        lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1300        if (!lba_map_mem) {
1301                pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1302                return -ENOMEM;
1303        }
1304        lba_map_mem->lba_map_mem_alua_state = state;
1305        lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1306
1307        list_add_tail(&lba_map_mem->lba_map_mem_list,
1308                      &lba_map->lba_map_mem_list);
1309        return 0;
1310}
1311
1312void
1313core_alua_free_lba_map(struct list_head *lba_list)
1314{
1315        struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1316        struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1317
1318        list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1319                                 lba_map_list) {
1320                list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1321                                         &lba_map->lba_map_mem_list,
1322                                         lba_map_mem_list) {
1323                        list_del(&lba_map_mem->lba_map_mem_list);
1324                        kmem_cache_free(t10_alua_lba_map_mem_cache,
1325                                        lba_map_mem);
1326                }
1327                list_del(&lba_map->lba_map_list);
1328                kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1329        }
1330}
1331
1332void
1333core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1334                      int segment_size, int segment_mult)
1335{
1336        struct list_head old_lba_map_list;
1337        struct t10_alua_tg_pt_gp *tg_pt_gp;
1338        int activate = 0, supported;
1339
1340        INIT_LIST_HEAD(&old_lba_map_list);
1341        spin_lock(&dev->t10_alua.lba_map_lock);
1342        dev->t10_alua.lba_map_segment_size = segment_size;
1343        dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1344        list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1345        if (lba_map_list) {
1346                list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1347                activate = 1;
1348        }
1349        spin_unlock(&dev->t10_alua.lba_map_lock);
1350        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1351        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1352                            tg_pt_gp_list) {
1353
1354                if (!tg_pt_gp->tg_pt_gp_valid_id)
1355                        continue;
1356                supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1357                if (activate)
1358                        supported |= ALUA_LBD_SUP;
1359                else
1360                        supported &= ~ALUA_LBD_SUP;
1361                tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1362        }
1363        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1364        core_alua_free_lba_map(&old_lba_map_list);
1365}
1366
1367struct t10_alua_lu_gp *
1368core_alua_allocate_lu_gp(const char *name, int def_group)
1369{
1370        struct t10_alua_lu_gp *lu_gp;
1371
1372        lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1373        if (!lu_gp) {
1374                pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1375                return ERR_PTR(-ENOMEM);
1376        }
1377        INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1378        INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1379        spin_lock_init(&lu_gp->lu_gp_lock);
1380        atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1381
1382        if (def_group) {
1383                lu_gp->lu_gp_id = alua_lu_gps_counter++;
1384                lu_gp->lu_gp_valid_id = 1;
1385                alua_lu_gps_count++;
1386        }
1387
1388        return lu_gp;
1389}
1390
1391int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1392{
1393        struct t10_alua_lu_gp *lu_gp_tmp;
1394        u16 lu_gp_id_tmp;
1395        /*
1396         * The lu_gp->lu_gp_id may only be set once..
1397         */
1398        if (lu_gp->lu_gp_valid_id) {
1399                pr_warn("ALUA LU Group already has a valid ID,"
1400                        " ignoring request\n");
1401                return -EINVAL;
1402        }
1403
1404        spin_lock(&lu_gps_lock);
1405        if (alua_lu_gps_count == 0x0000ffff) {
1406                pr_err("Maximum ALUA alua_lu_gps_count:"
1407                                " 0x0000ffff reached\n");
1408                spin_unlock(&lu_gps_lock);
1409                kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1410                return -ENOSPC;
1411        }
1412again:
1413        lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1414                                alua_lu_gps_counter++;
1415
1416        list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1417                if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1418                        if (!lu_gp_id)
1419                                goto again;
1420
1421                        pr_warn("ALUA Logical Unit Group ID: %hu"
1422                                " already exists, ignoring request\n",
1423                                lu_gp_id);
1424                        spin_unlock(&lu_gps_lock);
1425                        return -EINVAL;
1426                }
1427        }
1428
1429        lu_gp->lu_gp_id = lu_gp_id_tmp;
1430        lu_gp->lu_gp_valid_id = 1;
1431        list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1432        alua_lu_gps_count++;
1433        spin_unlock(&lu_gps_lock);
1434
1435        return 0;
1436}
1437
1438static struct t10_alua_lu_gp_member *
1439core_alua_allocate_lu_gp_mem(struct se_device *dev)
1440{
1441        struct t10_alua_lu_gp_member *lu_gp_mem;
1442
1443        lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1444        if (!lu_gp_mem) {
1445                pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1446                return ERR_PTR(-ENOMEM);
1447        }
1448        INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1449        spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1450        atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1451
1452        lu_gp_mem->lu_gp_mem_dev = dev;
1453        dev->dev_alua_lu_gp_mem = lu_gp_mem;
1454
1455        return lu_gp_mem;
1456}
1457
1458void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1459{
1460        struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1461        /*
1462         * Once we have reached this point, config_item_put() has
1463         * already been called from target_core_alua_drop_lu_gp().
1464         *
1465         * Here, we remove the *lu_gp from the global list so that
1466         * no associations can be made while we are releasing
1467         * struct t10_alua_lu_gp.
1468         */
1469        spin_lock(&lu_gps_lock);
1470        list_del(&lu_gp->lu_gp_node);
1471        alua_lu_gps_count--;
1472        spin_unlock(&lu_gps_lock);
1473        /*
1474         * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1475         * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1476         * released with core_alua_put_lu_gp_from_name()
1477         */
1478        while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1479                cpu_relax();
1480        /*
1481         * Release reference to struct t10_alua_lu_gp * from all associated
1482         * struct se_device.
1483         */
1484        spin_lock(&lu_gp->lu_gp_lock);
1485        list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1486                                &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1487                if (lu_gp_mem->lu_gp_assoc) {
1488                        list_del(&lu_gp_mem->lu_gp_mem_list);
1489                        lu_gp->lu_gp_members--;
1490                        lu_gp_mem->lu_gp_assoc = 0;
1491                }
1492                spin_unlock(&lu_gp->lu_gp_lock);
1493                /*
1494                 *
1495                 * lu_gp_mem is associated with a single
1496                 * struct se_device->dev_alua_lu_gp_mem, and is released when
1497                 * struct se_device is released via core_alua_free_lu_gp_mem().
1498                 *
1499                 * If the passed lu_gp does NOT match the default_lu_gp, assume
1500                 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1501                 */
1502                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1503                if (lu_gp != default_lu_gp)
1504                        __core_alua_attach_lu_gp_mem(lu_gp_mem,
1505                                        default_lu_gp);
1506                else
1507                        lu_gp_mem->lu_gp = NULL;
1508                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1509
1510                spin_lock(&lu_gp->lu_gp_lock);
1511        }
1512        spin_unlock(&lu_gp->lu_gp_lock);
1513
1514        kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1515}
1516
1517void core_alua_free_lu_gp_mem(struct se_device *dev)
1518{
1519        struct t10_alua_lu_gp *lu_gp;
1520        struct t10_alua_lu_gp_member *lu_gp_mem;
1521
1522        lu_gp_mem = dev->dev_alua_lu_gp_mem;
1523        if (!lu_gp_mem)
1524                return;
1525
1526        while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1527                cpu_relax();
1528
1529        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1530        lu_gp = lu_gp_mem->lu_gp;
1531        if (lu_gp) {
1532                spin_lock(&lu_gp->lu_gp_lock);
1533                if (lu_gp_mem->lu_gp_assoc) {
1534                        list_del(&lu_gp_mem->lu_gp_mem_list);
1535                        lu_gp->lu_gp_members--;
1536                        lu_gp_mem->lu_gp_assoc = 0;
1537                }
1538                spin_unlock(&lu_gp->lu_gp_lock);
1539                lu_gp_mem->lu_gp = NULL;
1540        }
1541        spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1542
1543        kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1544}
1545
1546struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1547{
1548        struct t10_alua_lu_gp *lu_gp;
1549        struct config_item *ci;
1550
1551        spin_lock(&lu_gps_lock);
1552        list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1553                if (!lu_gp->lu_gp_valid_id)
1554                        continue;
1555                ci = &lu_gp->lu_gp_group.cg_item;
1556                if (!strcmp(config_item_name(ci), name)) {
1557                        atomic_inc(&lu_gp->lu_gp_ref_cnt);
1558                        spin_unlock(&lu_gps_lock);
1559                        return lu_gp;
1560                }
1561        }
1562        spin_unlock(&lu_gps_lock);
1563
1564        return NULL;
1565}
1566
1567void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1568{
1569        spin_lock(&lu_gps_lock);
1570        atomic_dec(&lu_gp->lu_gp_ref_cnt);
1571        spin_unlock(&lu_gps_lock);
1572}
1573
1574/*
1575 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1576 */
1577void __core_alua_attach_lu_gp_mem(
1578        struct t10_alua_lu_gp_member *lu_gp_mem,
1579        struct t10_alua_lu_gp *lu_gp)
1580{
1581        spin_lock(&lu_gp->lu_gp_lock);
1582        lu_gp_mem->lu_gp = lu_gp;
1583        lu_gp_mem->lu_gp_assoc = 1;
1584        list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1585        lu_gp->lu_gp_members++;
1586        spin_unlock(&lu_gp->lu_gp_lock);
1587}
1588
1589/*
1590 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1591 */
1592void __core_alua_drop_lu_gp_mem(
1593        struct t10_alua_lu_gp_member *lu_gp_mem,
1594        struct t10_alua_lu_gp *lu_gp)
1595{
1596        spin_lock(&lu_gp->lu_gp_lock);
1597        list_del(&lu_gp_mem->lu_gp_mem_list);
1598        lu_gp_mem->lu_gp = NULL;
1599        lu_gp_mem->lu_gp_assoc = 0;
1600        lu_gp->lu_gp_members--;
1601        spin_unlock(&lu_gp->lu_gp_lock);
1602}
1603
1604struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1605                const char *name, int def_group)
1606{
1607        struct t10_alua_tg_pt_gp *tg_pt_gp;
1608
1609        tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1610        if (!tg_pt_gp) {
1611                pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1612                return NULL;
1613        }
1614        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1615        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1616        mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1617        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1618        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1619        tg_pt_gp->tg_pt_gp_dev = dev;
1620        tg_pt_gp->tg_pt_gp_alua_access_state =
1621                        ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1622        /*
1623         * Enable both explicit and implicit ALUA support by default
1624         */
1625        tg_pt_gp->tg_pt_gp_alua_access_type =
1626                        TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1627        /*
1628         * Set the default Active/NonOptimized Delay in milliseconds
1629         */
1630        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1631        tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1632        tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1633
1634        /*
1635         * Enable all supported states
1636         */
1637        tg_pt_gp->tg_pt_gp_alua_supported_states =
1638            ALUA_T_SUP | ALUA_O_SUP |
1639            ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1640
1641        if (def_group) {
1642                spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1643                tg_pt_gp->tg_pt_gp_id =
1644                                dev->t10_alua.alua_tg_pt_gps_counter++;
1645                tg_pt_gp->tg_pt_gp_valid_id = 1;
1646                dev->t10_alua.alua_tg_pt_gps_count++;
1647                list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1648                              &dev->t10_alua.tg_pt_gps_list);
1649                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1650        }
1651
1652        return tg_pt_gp;
1653}
1654
1655int core_alua_set_tg_pt_gp_id(
1656        struct t10_alua_tg_pt_gp *tg_pt_gp,
1657        u16 tg_pt_gp_id)
1658{
1659        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1660        struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1661        u16 tg_pt_gp_id_tmp;
1662
1663        /*
1664         * The tg_pt_gp->tg_pt_gp_id may only be set once..
1665         */
1666        if (tg_pt_gp->tg_pt_gp_valid_id) {
1667                pr_warn("ALUA TG PT Group already has a valid ID,"
1668                        " ignoring request\n");
1669                return -EINVAL;
1670        }
1671
1672        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1673        if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1674                pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1675                        " 0x0000ffff reached\n");
1676                spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1677                return -ENOSPC;
1678        }
1679again:
1680        tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1681                        dev->t10_alua.alua_tg_pt_gps_counter++;
1682
1683        list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1684                        tg_pt_gp_list) {
1685                if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1686                        if (!tg_pt_gp_id)
1687                                goto again;
1688
1689                        pr_err("ALUA Target Port Group ID: %hu already"
1690                                " exists, ignoring request\n", tg_pt_gp_id);
1691                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1692                        return -EINVAL;
1693                }
1694        }
1695
1696        tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1697        tg_pt_gp->tg_pt_gp_valid_id = 1;
1698        list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1699                        &dev->t10_alua.tg_pt_gps_list);
1700        dev->t10_alua.alua_tg_pt_gps_count++;
1701        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1702
1703        return 0;
1704}
1705
1706void core_alua_free_tg_pt_gp(
1707        struct t10_alua_tg_pt_gp *tg_pt_gp)
1708{
1709        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1710        struct se_lun *lun, *next;
1711
1712        /*
1713         * Once we have reached this point, config_item_put() has already
1714         * been called from target_core_alua_drop_tg_pt_gp().
1715         *
1716         * Here we remove *tg_pt_gp from the global list so that
1717         * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1718         * can be made while we are releasing struct t10_alua_tg_pt_gp.
1719         */
1720        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1721        if (tg_pt_gp->tg_pt_gp_valid_id) {
1722                list_del(&tg_pt_gp->tg_pt_gp_list);
1723                dev->t10_alua.alua_tg_pt_gps_count--;
1724        }
1725        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1726
1727        /*
1728         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1729         * core_alua_get_tg_pt_gp_by_name() in
1730         * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1731         * to be released with core_alua_put_tg_pt_gp_from_name().
1732         */
1733        while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1734                cpu_relax();
1735
1736        /*
1737         * Release reference to struct t10_alua_tg_pt_gp from all associated
1738         * struct se_port.
1739         */
1740        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1741        list_for_each_entry_safe(lun, next,
1742                        &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1743                list_del_init(&lun->lun_tg_pt_gp_link);
1744                tg_pt_gp->tg_pt_gp_members--;
1745
1746                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1747                /*
1748                 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1749                 * assume we want to re-associate a given tg_pt_gp_mem with
1750                 * default_tg_pt_gp.
1751                 */
1752                spin_lock(&lun->lun_tg_pt_gp_lock);
1753                if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1754                        __target_attach_tg_pt_gp(lun,
1755                                        dev->t10_alua.default_tg_pt_gp);
1756                } else
1757                        rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
1758                spin_unlock(&lun->lun_tg_pt_gp_lock);
1759
1760                spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1761        }
1762        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1763
1764        synchronize_rcu();
1765        kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1766}
1767
1768static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1769                struct se_device *dev, const char *name)
1770{
1771        struct t10_alua_tg_pt_gp *tg_pt_gp;
1772        struct config_item *ci;
1773
1774        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1775        list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1776                        tg_pt_gp_list) {
1777                if (!tg_pt_gp->tg_pt_gp_valid_id)
1778                        continue;
1779                ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1780                if (!strcmp(config_item_name(ci), name)) {
1781                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1782                        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1783                        return tg_pt_gp;
1784                }
1785        }
1786        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1787
1788        return NULL;
1789}
1790
1791static void core_alua_put_tg_pt_gp_from_name(
1792        struct t10_alua_tg_pt_gp *tg_pt_gp)
1793{
1794        struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1795
1796        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1797        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1798        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1799}
1800
1801static void __target_attach_tg_pt_gp(struct se_lun *lun,
1802                struct t10_alua_tg_pt_gp *tg_pt_gp)
1803{
1804        struct se_dev_entry *se_deve;
1805
1806        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1807
1808        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1809        rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp);
1810        list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1811        tg_pt_gp->tg_pt_gp_members++;
1812        spin_lock(&lun->lun_deve_lock);
1813        list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1814                core_scsi3_ua_allocate(se_deve, 0x3f,
1815                                       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1816        spin_unlock(&lun->lun_deve_lock);
1817        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1818}
1819
1820void target_attach_tg_pt_gp(struct se_lun *lun,
1821                struct t10_alua_tg_pt_gp *tg_pt_gp)
1822{
1823        spin_lock(&lun->lun_tg_pt_gp_lock);
1824        __target_attach_tg_pt_gp(lun, tg_pt_gp);
1825        spin_unlock(&lun->lun_tg_pt_gp_lock);
1826        synchronize_rcu();
1827}
1828
1829static void __target_detach_tg_pt_gp(struct se_lun *lun,
1830                struct t10_alua_tg_pt_gp *tg_pt_gp)
1831{
1832        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1833
1834        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1835        list_del_init(&lun->lun_tg_pt_gp_link);
1836        tg_pt_gp->tg_pt_gp_members--;
1837        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1838}
1839
1840void target_detach_tg_pt_gp(struct se_lun *lun)
1841{
1842        struct t10_alua_tg_pt_gp *tg_pt_gp;
1843
1844        spin_lock(&lun->lun_tg_pt_gp_lock);
1845        tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
1846                                lockdep_is_held(&lun->lun_tg_pt_gp_lock));
1847        if (tg_pt_gp) {
1848                __target_detach_tg_pt_gp(lun, tg_pt_gp);
1849                rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
1850        }
1851        spin_unlock(&lun->lun_tg_pt_gp_lock);
1852        synchronize_rcu();
1853}
1854
1855static void target_swap_tg_pt_gp(struct se_lun *lun,
1856                                 struct t10_alua_tg_pt_gp *old_tg_pt_gp,
1857                                 struct t10_alua_tg_pt_gp *new_tg_pt_gp)
1858{
1859        assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1860
1861        if (old_tg_pt_gp)
1862                __target_detach_tg_pt_gp(lun, old_tg_pt_gp);
1863        __target_attach_tg_pt_gp(lun, new_tg_pt_gp);
1864}
1865
1866ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1867{
1868        struct config_item *tg_pt_ci;
1869        struct t10_alua_tg_pt_gp *tg_pt_gp;
1870        ssize_t len = 0;
1871
1872        rcu_read_lock();
1873        tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
1874        if (tg_pt_gp) {
1875                tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1876                len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1877                        " %hu\nTG Port Primary Access State: %s\nTG Port "
1878                        "Primary Access Status: %s\nTG Port Secondary Access"
1879                        " State: %s\nTG Port Secondary Access Status: %s\n",
1880                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1881                        core_alua_dump_state(
1882                                tg_pt_gp->tg_pt_gp_alua_access_state),
1883                        core_alua_dump_status(
1884                                tg_pt_gp->tg_pt_gp_alua_access_status),
1885                        atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1886                        "Offline" : "None",
1887                        core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1888        }
1889        rcu_read_unlock();
1890
1891        return len;
1892}
1893
1894ssize_t core_alua_store_tg_pt_gp_info(
1895        struct se_lun *lun,
1896        const char *page,
1897        size_t count)
1898{
1899        struct se_portal_group *tpg = lun->lun_tpg;
1900        /*
1901         * rcu_dereference_raw protected by se_lun->lun_group symlink
1902         * reference to se_device->dev_group.
1903         */
1904        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1905        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1906        unsigned char buf[TG_PT_GROUP_NAME_BUF];
1907        int move = 0;
1908
1909        if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1910            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1911                return -ENODEV;
1912
1913        if (count > TG_PT_GROUP_NAME_BUF) {
1914                pr_err("ALUA Target Port Group alias too large!\n");
1915                return -EINVAL;
1916        }
1917        memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1918        memcpy(buf, page, count);
1919        /*
1920         * Any ALUA target port group alias besides "NULL" means we will be
1921         * making a new group association.
1922         */
1923        if (strcmp(strstrip(buf), "NULL")) {
1924                /*
1925                 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1926                 * struct t10_alua_tg_pt_gp.  This reference is released with
1927                 * core_alua_put_tg_pt_gp_from_name() below.
1928                 */
1929                tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1930                                        strstrip(buf));
1931                if (!tg_pt_gp_new)
1932                        return -ENODEV;
1933        }
1934
1935        spin_lock(&lun->lun_tg_pt_gp_lock);
1936        tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
1937                                lockdep_is_held(&lun->lun_tg_pt_gp_lock));
1938        if (tg_pt_gp) {
1939                /*
1940                 * Clearing an existing tg_pt_gp association, and replacing
1941                 * with the default_tg_pt_gp.
1942                 */
1943                if (!tg_pt_gp_new) {
1944                        pr_debug("Target_Core_ConfigFS: Moving"
1945                                " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1946                                " alua/%s, ID: %hu back to"
1947                                " default_tg_pt_gp\n",
1948                                tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1949                                tpg->se_tpg_tfo->tpg_get_tag(tpg),
1950                                config_item_name(&lun->lun_group.cg_item),
1951                                config_item_name(
1952                                        &tg_pt_gp->tg_pt_gp_group.cg_item),
1953                                tg_pt_gp->tg_pt_gp_id);
1954
1955                        target_swap_tg_pt_gp(lun, tg_pt_gp,
1956                                        dev->t10_alua.default_tg_pt_gp);
1957                        spin_unlock(&lun->lun_tg_pt_gp_lock);
1958
1959                        goto sync_rcu;
1960                }
1961                move = 1;
1962        }
1963
1964        target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new);
1965        spin_unlock(&lun->lun_tg_pt_gp_lock);
1966        pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1967                " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1968                "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1969                tpg->se_tpg_tfo->tpg_get_tag(tpg),
1970                config_item_name(&lun->lun_group.cg_item),
1971                config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1972                tg_pt_gp_new->tg_pt_gp_id);
1973
1974        core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1975sync_rcu:
1976        synchronize_rcu();
1977        return count;
1978}
1979
1980ssize_t core_alua_show_access_type(
1981        struct t10_alua_tg_pt_gp *tg_pt_gp,
1982        char *page)
1983{
1984        if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1985            (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1986                return sprintf(page, "Implicit and Explicit\n");
1987        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
1988                return sprintf(page, "Implicit\n");
1989        else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
1990                return sprintf(page, "Explicit\n");
1991        else
1992                return sprintf(page, "None\n");
1993}
1994
1995ssize_t core_alua_store_access_type(
1996        struct t10_alua_tg_pt_gp *tg_pt_gp,
1997        const char *page,
1998        size_t count)
1999{
2000        unsigned long tmp;
2001        int ret;
2002
2003        ret = kstrtoul(page, 0, &tmp);
2004        if (ret < 0) {
2005                pr_err("Unable to extract alua_access_type\n");
2006                return ret;
2007        }
2008        if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2009                pr_err("Illegal value for alua_access_type:"
2010                                " %lu\n", tmp);
2011                return -EINVAL;
2012        }
2013        if (tmp == 3)
2014                tg_pt_gp->tg_pt_gp_alua_access_type =
2015                        TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2016        else if (tmp == 2)
2017                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2018        else if (tmp == 1)
2019                tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2020        else
2021                tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2022
2023        return count;
2024}
2025
2026ssize_t core_alua_show_nonop_delay_msecs(
2027        struct t10_alua_tg_pt_gp *tg_pt_gp,
2028        char *page)
2029{
2030        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2031}
2032
2033ssize_t core_alua_store_nonop_delay_msecs(
2034        struct t10_alua_tg_pt_gp *tg_pt_gp,
2035        const char *page,
2036        size_t count)
2037{
2038        unsigned long tmp;
2039        int ret;
2040
2041        ret = kstrtoul(page, 0, &tmp);
2042        if (ret < 0) {
2043                pr_err("Unable to extract nonop_delay_msecs\n");
2044                return ret;
2045        }
2046        if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2047                pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2048                        " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2049                        ALUA_MAX_NONOP_DELAY_MSECS);
2050                return -EINVAL;
2051        }
2052        tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2053
2054        return count;
2055}
2056
2057ssize_t core_alua_show_trans_delay_msecs(
2058        struct t10_alua_tg_pt_gp *tg_pt_gp,
2059        char *page)
2060{
2061        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2062}
2063
2064ssize_t core_alua_store_trans_delay_msecs(
2065        struct t10_alua_tg_pt_gp *tg_pt_gp,
2066        const char *page,
2067        size_t count)
2068{
2069        unsigned long tmp;
2070        int ret;
2071
2072        ret = kstrtoul(page, 0, &tmp);
2073        if (ret < 0) {
2074                pr_err("Unable to extract trans_delay_msecs\n");
2075                return ret;
2076        }
2077        if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2078                pr_err("Passed trans_delay_msecs: %lu, exceeds"
2079                        " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2080                        ALUA_MAX_TRANS_DELAY_MSECS);
2081                return -EINVAL;
2082        }
2083        tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2084
2085        return count;
2086}
2087
2088ssize_t core_alua_show_implicit_trans_secs(
2089        struct t10_alua_tg_pt_gp *tg_pt_gp,
2090        char *page)
2091{
2092        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2093}
2094
2095ssize_t core_alua_store_implicit_trans_secs(
2096        struct t10_alua_tg_pt_gp *tg_pt_gp,
2097        const char *page,
2098        size_t count)
2099{
2100        unsigned long tmp;
2101        int ret;
2102
2103        ret = kstrtoul(page, 0, &tmp);
2104        if (ret < 0) {
2105                pr_err("Unable to extract implicit_trans_secs\n");
2106                return ret;
2107        }
2108        if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2109                pr_err("Passed implicit_trans_secs: %lu, exceeds"
2110                        " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2111                        ALUA_MAX_IMPLICIT_TRANS_SECS);
2112                return  -EINVAL;
2113        }
2114        tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2115
2116        return count;
2117}
2118
2119ssize_t core_alua_show_preferred_bit(
2120        struct t10_alua_tg_pt_gp *tg_pt_gp,
2121        char *page)
2122{
2123        return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2124}
2125
2126ssize_t core_alua_store_preferred_bit(
2127        struct t10_alua_tg_pt_gp *tg_pt_gp,
2128        const char *page,
2129        size_t count)
2130{
2131        unsigned long tmp;
2132        int ret;
2133
2134        ret = kstrtoul(page, 0, &tmp);
2135        if (ret < 0) {
2136                pr_err("Unable to extract preferred ALUA value\n");
2137                return ret;
2138        }
2139        if ((tmp != 0) && (tmp != 1)) {
2140                pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2141                return -EINVAL;
2142        }
2143        tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2144
2145        return count;
2146}
2147
2148ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2149{
2150        return sprintf(page, "%d\n",
2151                atomic_read(&lun->lun_tg_pt_secondary_offline));
2152}
2153
2154ssize_t core_alua_store_offline_bit(
2155        struct se_lun *lun,
2156        const char *page,
2157        size_t count)
2158{
2159        /*
2160         * rcu_dereference_raw protected by se_lun->lun_group symlink
2161         * reference to se_device->dev_group.
2162         */
2163        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2164        unsigned long tmp;
2165        int ret;
2166
2167        if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2168            (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2169                return -ENODEV;
2170
2171        ret = kstrtoul(page, 0, &tmp);
2172        if (ret < 0) {
2173                pr_err("Unable to extract alua_tg_pt_offline value\n");
2174                return ret;
2175        }
2176        if ((tmp != 0) && (tmp != 1)) {
2177                pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2178                                tmp);
2179                return -EINVAL;
2180        }
2181
2182        ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2183        if (ret < 0)
2184                return -EINVAL;
2185
2186        return count;
2187}
2188
2189ssize_t core_alua_show_secondary_status(
2190        struct se_lun *lun,
2191        char *page)
2192{
2193        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2194}
2195
2196ssize_t core_alua_store_secondary_status(
2197        struct se_lun *lun,
2198        const char *page,
2199        size_t count)
2200{
2201        unsigned long tmp;
2202        int ret;
2203
2204        ret = kstrtoul(page, 0, &tmp);
2205        if (ret < 0) {
2206                pr_err("Unable to extract alua_tg_pt_status\n");
2207                return ret;
2208        }
2209        if ((tmp != ALUA_STATUS_NONE) &&
2210            (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2211            (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2212                pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2213                                tmp);
2214                return -EINVAL;
2215        }
2216        lun->lun_tg_pt_secondary_stat = (int)tmp;
2217
2218        return count;
2219}
2220
2221ssize_t core_alua_show_secondary_write_metadata(
2222        struct se_lun *lun,
2223        char *page)
2224{
2225        return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2226}
2227
2228ssize_t core_alua_store_secondary_write_metadata(
2229        struct se_lun *lun,
2230        const char *page,
2231        size_t count)
2232{
2233        unsigned long tmp;
2234        int ret;
2235
2236        ret = kstrtoul(page, 0, &tmp);
2237        if (ret < 0) {
2238                pr_err("Unable to extract alua_tg_pt_write_md\n");
2239                return ret;
2240        }
2241        if ((tmp != 0) && (tmp != 1)) {
2242                pr_err("Illegal value for alua_tg_pt_write_md:"
2243                                " %lu\n", tmp);
2244                return -EINVAL;
2245        }
2246        lun->lun_tg_pt_secondary_write_md = (int)tmp;
2247
2248        return count;
2249}
2250
2251int core_setup_alua(struct se_device *dev)
2252{
2253        if (!(dev->transport_flags &
2254             TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2255            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2256                struct t10_alua_lu_gp_member *lu_gp_mem;
2257
2258                /*
2259                 * Associate this struct se_device with the default ALUA
2260                 * LUN Group.
2261                 */
2262                lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2263                if (IS_ERR(lu_gp_mem))
2264                        return PTR_ERR(lu_gp_mem);
2265
2266                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2267                __core_alua_attach_lu_gp_mem(lu_gp_mem,
2268                                default_lu_gp);
2269                spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2270
2271                pr_debug("%s: Adding to default ALUA LU Group:"
2272                        " core/alua/lu_gps/default_lu_gp\n",
2273                        dev->transport->name);
2274        }
2275
2276        return 0;
2277}
2278