linux/drivers/target/target_core_tmr.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_tmr.c
   3 *
   4 * This file contains SPC-3 task management infrastructure
   5 *
   6 * (c) Copyright 2009-2013 Datera, Inc.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/list.h>
  29#include <linux/export.h>
  30
  31#include <target/target_core_base.h>
  32#include <target/target_core_backend.h>
  33#include <target/target_core_fabric.h>
  34
  35#include "target_core_internal.h"
  36#include "target_core_alua.h"
  37#include "target_core_pr.h"
  38
  39int core_tmr_alloc_req(
  40        struct se_cmd *se_cmd,
  41        void *fabric_tmr_ptr,
  42        u8 function,
  43        gfp_t gfp_flags)
  44{
  45        struct se_tmr_req *tmr;
  46
  47        tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
  48        if (!tmr) {
  49                pr_err("Unable to allocate struct se_tmr_req\n");
  50                return -ENOMEM;
  51        }
  52
  53        se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
  54        se_cmd->se_tmr_req = tmr;
  55        tmr->task_cmd = se_cmd;
  56        tmr->fabric_tmr_ptr = fabric_tmr_ptr;
  57        tmr->function = function;
  58        INIT_LIST_HEAD(&tmr->tmr_list);
  59
  60        return 0;
  61}
  62EXPORT_SYMBOL(core_tmr_alloc_req);
  63
  64void core_tmr_release_req(struct se_tmr_req *tmr)
  65{
  66        struct se_device *dev = tmr->tmr_dev;
  67        unsigned long flags;
  68
  69        if (dev) {
  70                spin_lock_irqsave(&dev->se_tmr_lock, flags);
  71                list_del_init(&tmr->tmr_list);
  72                spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
  73        }
  74
  75        kfree(tmr);
  76}
  77
  78static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
  79{
  80        unsigned long flags;
  81        bool remove = true, send_tas;
  82        /*
  83         * TASK ABORTED status (TAS) bit support
  84         */
  85        spin_lock_irqsave(&cmd->t_state_lock, flags);
  86        send_tas = (cmd->transport_state & CMD_T_TAS);
  87        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  88
  89        if (send_tas) {
  90                remove = false;
  91                transport_send_task_abort(cmd);
  92        }
  93
  94        transport_cmd_finish_abort(cmd, remove);
  95}
  96
  97static int target_check_cdb_and_preempt(struct list_head *list,
  98                struct se_cmd *cmd)
  99{
 100        struct t10_pr_registration *reg;
 101
 102        if (!list)
 103                return 0;
 104        list_for_each_entry(reg, list, pr_reg_abort_list) {
 105                if (reg->pr_res_key == cmd->pr_res_key)
 106                        return 0;
 107        }
 108
 109        return 1;
 110}
 111
 112static bool __target_check_io_state(struct se_cmd *se_cmd,
 113                                    struct se_session *tmr_sess, int tas)
 114{
 115        struct se_session *sess = se_cmd->se_sess;
 116
 117        assert_spin_locked(&sess->sess_cmd_lock);
 118        WARN_ON_ONCE(!irqs_disabled());
 119        /*
 120         * If command already reached CMD_T_COMPLETE state within
 121         * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
 122         * this se_cmd has been passed to fabric driver and will
 123         * not be aborted.
 124         *
 125         * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
 126         * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
 127         * long as se_cmd->cmd_kref is still active unless zero.
 128         */
 129        spin_lock(&se_cmd->t_state_lock);
 130        if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
 131                pr_debug("Attempted to abort io tag: %llu already complete or"
 132                        " fabric stop, skipping\n", se_cmd->tag);
 133                spin_unlock(&se_cmd->t_state_lock);
 134                return false;
 135        }
 136        if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
 137                pr_debug("Attempted to abort io tag: %llu already shutdown,"
 138                        " skipping\n", se_cmd->tag);
 139                spin_unlock(&se_cmd->t_state_lock);
 140                return false;
 141        }
 142        se_cmd->transport_state |= CMD_T_ABORTED;
 143
 144        if ((tmr_sess != se_cmd->se_sess) && tas)
 145                se_cmd->transport_state |= CMD_T_TAS;
 146
 147        spin_unlock(&se_cmd->t_state_lock);
 148
 149        return kref_get_unless_zero(&se_cmd->cmd_kref);
 150}
 151
 152void core_tmr_abort_task(
 153        struct se_device *dev,
 154        struct se_tmr_req *tmr,
 155        struct se_session *se_sess)
 156{
 157        struct se_cmd *se_cmd;
 158        unsigned long flags;
 159        u64 ref_tag;
 160
 161        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 162        list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
 163
 164                if (dev != se_cmd->se_dev)
 165                        continue;
 166
 167                /* skip task management functions, including tmr->task_cmd */
 168                if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
 169                        continue;
 170
 171                ref_tag = se_cmd->tag;
 172                if (tmr->ref_task_tag != ref_tag)
 173                        continue;
 174
 175                printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
 176                        se_cmd->se_tfo->get_fabric_name(), ref_tag);
 177
 178                if (!__target_check_io_state(se_cmd, se_sess, 0)) {
 179                        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 180                        goto out;
 181                }
 182                list_del_init(&se_cmd->se_cmd_list);
 183                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 184
 185                cancel_work_sync(&se_cmd->work);
 186                transport_wait_for_tasks(se_cmd);
 187
 188                transport_cmd_finish_abort(se_cmd, true);
 189                target_put_sess_cmd(se_cmd);
 190
 191                printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
 192                                " ref_tag: %llu\n", ref_tag);
 193                tmr->response = TMR_FUNCTION_COMPLETE;
 194                return;
 195        }
 196        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 197
 198out:
 199        printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
 200                        tmr->ref_task_tag);
 201        tmr->response = TMR_TASK_DOES_NOT_EXIST;
 202}
 203
 204static void core_tmr_drain_tmr_list(
 205        struct se_device *dev,
 206        struct se_tmr_req *tmr,
 207        struct list_head *preempt_and_abort_list)
 208{
 209        LIST_HEAD(drain_tmr_list);
 210        struct se_session *sess;
 211        struct se_tmr_req *tmr_p, *tmr_pp;
 212        struct se_cmd *cmd;
 213        unsigned long flags;
 214        bool rc;
 215        /*
 216         * Release all pending and outgoing TMRs aside from the received
 217         * LUN_RESET tmr..
 218         */
 219        spin_lock_irqsave(&dev->se_tmr_lock, flags);
 220        list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
 221                /*
 222                 * Allow the received TMR to return with FUNCTION_COMPLETE.
 223                 */
 224                if (tmr_p == tmr)
 225                        continue;
 226
 227                cmd = tmr_p->task_cmd;
 228                if (!cmd) {
 229                        pr_err("Unable to locate struct se_cmd for TMR\n");
 230                        continue;
 231                }
 232                /*
 233                 * If this function was called with a valid pr_res_key
 234                 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
 235                 * skip non registration key matching TMRs.
 236                 */
 237                if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 238                        continue;
 239
 240                sess = cmd->se_sess;
 241                if (WARN_ON_ONCE(!sess))
 242                        continue;
 243
 244                spin_lock(&sess->sess_cmd_lock);
 245                spin_lock(&cmd->t_state_lock);
 246                if (!(cmd->transport_state & CMD_T_ACTIVE) ||
 247                     (cmd->transport_state & CMD_T_FABRIC_STOP)) {
 248                        spin_unlock(&cmd->t_state_lock);
 249                        spin_unlock(&sess->sess_cmd_lock);
 250                        continue;
 251                }
 252                if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
 253                        spin_unlock(&cmd->t_state_lock);
 254                        spin_unlock(&sess->sess_cmd_lock);
 255                        continue;
 256                }
 257                if (sess->sess_tearing_down || cmd->cmd_wait_set) {
 258                        spin_unlock(&cmd->t_state_lock);
 259                        spin_unlock(&sess->sess_cmd_lock);
 260                        continue;
 261                }
 262                cmd->transport_state |= CMD_T_ABORTED;
 263                spin_unlock(&cmd->t_state_lock);
 264
 265                rc = kref_get_unless_zero(&cmd->cmd_kref);
 266                if (!rc) {
 267                        printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
 268                        spin_unlock(&sess->sess_cmd_lock);
 269                        continue;
 270                }
 271                spin_unlock(&sess->sess_cmd_lock);
 272
 273                list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
 274        }
 275        spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
 276
 277        list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
 278                list_del_init(&tmr_p->tmr_list);
 279                cmd = tmr_p->task_cmd;
 280
 281                pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
 282                        " Response: 0x%02x, t_state: %d\n",
 283                        (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 284                        tmr_p->function, tmr_p->response, cmd->t_state);
 285
 286                cancel_work_sync(&cmd->work);
 287                transport_wait_for_tasks(cmd);
 288
 289                transport_cmd_finish_abort(cmd, 1);
 290                target_put_sess_cmd(cmd);
 291        }
 292}
 293
 294static void core_tmr_drain_state_list(
 295        struct se_device *dev,
 296        struct se_cmd *prout_cmd,
 297        struct se_session *tmr_sess,
 298        int tas,
 299        struct list_head *preempt_and_abort_list)
 300{
 301        LIST_HEAD(drain_task_list);
 302        struct se_session *sess;
 303        struct se_cmd *cmd, *next;
 304        unsigned long flags;
 305        int rc;
 306
 307        /*
 308         * Complete outstanding commands with TASK_ABORTED SAM status.
 309         *
 310         * This is following sam4r17, section 5.6 Aborting commands, Table 38
 311         * for TMR LUN_RESET:
 312         *
 313         * a) "Yes" indicates that each command that is aborted on an I_T nexus
 314         * other than the one that caused the SCSI device condition is
 315         * completed with TASK ABORTED status, if the TAS bit is set to one in
 316         * the Control mode page (see SPC-4). "No" indicates that no status is
 317         * returned for aborted commands.
 318         *
 319         * d) If the logical unit reset is caused by a particular I_T nexus
 320         * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
 321         * (TASK_ABORTED status) applies.
 322         *
 323         * Otherwise (e.g., if triggered by a hard reset), "no"
 324         * (no TASK_ABORTED SAM status) applies.
 325         *
 326         * Note that this seems to be independent of TAS (Task Aborted Status)
 327         * in the Control Mode Page.
 328         */
 329        spin_lock_irqsave(&dev->execute_task_lock, flags);
 330        list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
 331                /*
 332                 * For PREEMPT_AND_ABORT usage, only process commands
 333                 * with a matching reservation key.
 334                 */
 335                if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 336                        continue;
 337
 338                /*
 339                 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
 340                 */
 341                if (prout_cmd == cmd)
 342                        continue;
 343
 344                sess = cmd->se_sess;
 345                if (WARN_ON_ONCE(!sess))
 346                        continue;
 347
 348                spin_lock(&sess->sess_cmd_lock);
 349                rc = __target_check_io_state(cmd, tmr_sess, tas);
 350                spin_unlock(&sess->sess_cmd_lock);
 351                if (!rc)
 352                        continue;
 353
 354                list_move_tail(&cmd->state_list, &drain_task_list);
 355                cmd->state_active = false;
 356        }
 357        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 358
 359        while (!list_empty(&drain_task_list)) {
 360                cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
 361                list_del_init(&cmd->state_list);
 362
 363                pr_debug("LUN_RESET: %s cmd: %p"
 364                        " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
 365                        "cdb: 0x%02x\n",
 366                        (preempt_and_abort_list) ? "Preempt" : "", cmd,
 367                        cmd->tag, 0,
 368                        cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
 369                        cmd->t_task_cdb[0]);
 370                pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
 371                        " -- CMD_T_ACTIVE: %d"
 372                        " CMD_T_STOP: %d CMD_T_SENT: %d\n",
 373                        cmd->tag, cmd->pr_res_key,
 374                        (cmd->transport_state & CMD_T_ACTIVE) != 0,
 375                        (cmd->transport_state & CMD_T_STOP) != 0,
 376                        (cmd->transport_state & CMD_T_SENT) != 0);
 377
 378                /*
 379                 * If the command may be queued onto a workqueue cancel it now.
 380                 *
 381                 * This is equivalent to removal from the execute queue in the
 382                 * loop above, but we do it down here given that
 383                 * cancel_work_sync may block.
 384                 */
 385                cancel_work_sync(&cmd->work);
 386                transport_wait_for_tasks(cmd);
 387
 388                core_tmr_handle_tas_abort(cmd, tas);
 389                target_put_sess_cmd(cmd);
 390        }
 391}
 392
 393int core_tmr_lun_reset(
 394        struct se_device *dev,
 395        struct se_tmr_req *tmr,
 396        struct list_head *preempt_and_abort_list,
 397        struct se_cmd *prout_cmd)
 398{
 399        struct se_node_acl *tmr_nacl = NULL;
 400        struct se_portal_group *tmr_tpg = NULL;
 401        struct se_session *tmr_sess = NULL;
 402        int tas;
 403        /*
 404         * TASK_ABORTED status bit, this is configurable via ConfigFS
 405         * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
 406         *
 407         * A task aborted status (TAS) bit set to zero specifies that aborted
 408         * tasks shall be terminated by the device server without any response
 409         * to the application client. A TAS bit set to one specifies that tasks
 410         * aborted by the actions of an I_T nexus other than the I_T nexus on
 411         * which the command was received shall be completed with TASK ABORTED
 412         * status (see SAM-4).
 413         */
 414        tas = dev->dev_attrib.emulate_tas;
 415        /*
 416         * Determine if this se_tmr is coming from a $FABRIC_MOD
 417         * or struct se_device passthrough..
 418         */
 419        if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
 420                tmr_sess = tmr->task_cmd->se_sess;
 421                tmr_nacl = tmr_sess->se_node_acl;
 422                tmr_tpg = tmr_sess->se_tpg;
 423                if (tmr_nacl && tmr_tpg) {
 424                        pr_debug("LUN_RESET: TMR caller fabric: %s"
 425                                " initiator port %s\n",
 426                                tmr_tpg->se_tpg_tfo->get_fabric_name(),
 427                                tmr_nacl->initiatorname);
 428                }
 429        }
 430        pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
 431                (preempt_and_abort_list) ? "Preempt" : "TMR",
 432                dev->transport->name, tas);
 433
 434        core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
 435        core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
 436                                preempt_and_abort_list);
 437
 438        /*
 439         * Clear any legacy SPC-2 reservation when called during
 440         * LOGICAL UNIT RESET
 441         */
 442        if (!preempt_and_abort_list &&
 443             (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
 444                spin_lock(&dev->dev_reservation_lock);
 445                dev->dev_reserved_node_acl = NULL;
 446                dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
 447                spin_unlock(&dev->dev_reservation_lock);
 448                pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
 449        }
 450
 451        atomic_long_inc(&dev->num_resets);
 452
 453        pr_debug("LUN_RESET: %s for [%s] Complete\n",
 454                        (preempt_and_abort_list) ? "Preempt" : "TMR",
 455                        dev->transport->name);
 456        return 0;
 457}
 458
 459