linux/drivers/target/target_core_tmr.c
<<
>>
Prefs
   1/*******************************************************************************
   2 * Filename:  target_core_tmr.c
   3 *
   4 * This file contains SPC-3 task management infrastructure
   5 *
   6 * (c) Copyright 2009-2013 Datera, Inc.
   7 *
   8 * Nicholas A. Bellinger <nab@kernel.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23 *
  24 ******************************************************************************/
  25
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/list.h>
  29#include <linux/export.h>
  30
  31#include <target/target_core_base.h>
  32#include <target/target_core_backend.h>
  33#include <target/target_core_fabric.h>
  34
  35#include "target_core_internal.h"
  36#include "target_core_alua.h"
  37#include "target_core_pr.h"
  38
  39int core_tmr_alloc_req(
  40        struct se_cmd *se_cmd,
  41        void *fabric_tmr_ptr,
  42        u8 function,
  43        gfp_t gfp_flags)
  44{
  45        struct se_tmr_req *tmr;
  46
  47        tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
  48        if (!tmr) {
  49                pr_err("Unable to allocate struct se_tmr_req\n");
  50                return -ENOMEM;
  51        }
  52
  53        se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
  54        se_cmd->se_tmr_req = tmr;
  55        tmr->task_cmd = se_cmd;
  56        tmr->fabric_tmr_ptr = fabric_tmr_ptr;
  57        tmr->function = function;
  58        INIT_LIST_HEAD(&tmr->tmr_list);
  59
  60        return 0;
  61}
  62EXPORT_SYMBOL(core_tmr_alloc_req);
  63
  64void core_tmr_release_req(struct se_tmr_req *tmr)
  65{
  66        struct se_device *dev = tmr->tmr_dev;
  67        unsigned long flags;
  68
  69        if (dev) {
  70                spin_lock_irqsave(&dev->se_tmr_lock, flags);
  71                list_del_init(&tmr->tmr_list);
  72                spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
  73        }
  74
  75        kfree(tmr);
  76}
  77
  78static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
  79{
  80        unsigned long flags;
  81        bool remove = true, send_tas;
  82        /*
  83         * TASK ABORTED status (TAS) bit support
  84         */
  85        spin_lock_irqsave(&cmd->t_state_lock, flags);
  86        send_tas = (cmd->transport_state & CMD_T_TAS);
  87        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  88
  89        if (send_tas) {
  90                remove = false;
  91                transport_send_task_abort(cmd);
  92        }
  93
  94        return transport_cmd_finish_abort(cmd, remove);
  95}
  96
  97static int target_check_cdb_and_preempt(struct list_head *list,
  98                struct se_cmd *cmd)
  99{
 100        struct t10_pr_registration *reg;
 101
 102        if (!list)
 103                return 0;
 104        list_for_each_entry(reg, list, pr_reg_abort_list) {
 105                if (reg->pr_res_key == cmd->pr_res_key)
 106                        return 0;
 107        }
 108
 109        return 1;
 110}
 111
 112static bool __target_check_io_state(struct se_cmd *se_cmd,
 113                                    struct se_session *tmr_sess, int tas)
 114{
 115        struct se_session *sess = se_cmd->se_sess;
 116
 117        assert_spin_locked(&sess->sess_cmd_lock);
 118        WARN_ON_ONCE(!irqs_disabled());
 119        /*
 120         * If command already reached CMD_T_COMPLETE state within
 121         * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
 122         * this se_cmd has been passed to fabric driver and will
 123         * not be aborted.
 124         *
 125         * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
 126         * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
 127         * long as se_cmd->cmd_kref is still active unless zero.
 128         */
 129        spin_lock(&se_cmd->t_state_lock);
 130        if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
 131                pr_debug("Attempted to abort io tag: %llu already complete or"
 132                        " fabric stop, skipping\n", se_cmd->tag);
 133                spin_unlock(&se_cmd->t_state_lock);
 134                return false;
 135        }
 136        if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
 137                if (se_cmd->scsi_status) {
 138                        pr_debug("Attempted to abort io tag: %llu early failure"
 139                                 " status: 0x%02x\n", se_cmd->tag,
 140                                 se_cmd->scsi_status);
 141                        spin_unlock(&se_cmd->t_state_lock);
 142                        return false;
 143                }
 144        }
 145        if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
 146                pr_debug("Attempted to abort io tag: %llu already shutdown,"
 147                        " skipping\n", se_cmd->tag);
 148                spin_unlock(&se_cmd->t_state_lock);
 149                return false;
 150        }
 151        se_cmd->transport_state |= CMD_T_ABORTED;
 152
 153        if ((tmr_sess != se_cmd->se_sess) && tas)
 154                se_cmd->transport_state |= CMD_T_TAS;
 155
 156        spin_unlock(&se_cmd->t_state_lock);
 157
 158        return kref_get_unless_zero(&se_cmd->cmd_kref);
 159}
 160
 161void core_tmr_abort_task(
 162        struct se_device *dev,
 163        struct se_tmr_req *tmr,
 164        struct se_session *se_sess)
 165{
 166        struct se_cmd *se_cmd;
 167        unsigned long flags;
 168        u64 ref_tag;
 169
 170        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 171        list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
 172
 173                if (dev != se_cmd->se_dev)
 174                        continue;
 175
 176                /* skip task management functions, including tmr->task_cmd */
 177                if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
 178                        continue;
 179
 180                ref_tag = se_cmd->tag;
 181                if (tmr->ref_task_tag != ref_tag)
 182                        continue;
 183
 184                printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
 185                        se_cmd->se_tfo->get_fabric_name(), ref_tag);
 186
 187                if (!__target_check_io_state(se_cmd, se_sess, 0))
 188                        continue;
 189
 190                list_del_init(&se_cmd->se_cmd_list);
 191                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 192
 193                cancel_work_sync(&se_cmd->work);
 194                transport_wait_for_tasks(se_cmd);
 195
 196                if (!transport_cmd_finish_abort(se_cmd, true))
 197                        target_put_sess_cmd(se_cmd);
 198
 199                printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
 200                                " ref_tag: %llu\n", ref_tag);
 201                tmr->response = TMR_FUNCTION_COMPLETE;
 202                atomic_long_inc(&dev->aborts_complete);
 203                return;
 204        }
 205        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 206
 207        printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
 208                        tmr->ref_task_tag);
 209        tmr->response = TMR_TASK_DOES_NOT_EXIST;
 210        atomic_long_inc(&dev->aborts_no_task);
 211}
 212
 213static void core_tmr_drain_tmr_list(
 214        struct se_device *dev,
 215        struct se_tmr_req *tmr,
 216        struct list_head *preempt_and_abort_list)
 217{
 218        LIST_HEAD(drain_tmr_list);
 219        struct se_session *sess;
 220        struct se_tmr_req *tmr_p, *tmr_pp;
 221        struct se_cmd *cmd;
 222        unsigned long flags;
 223        bool rc;
 224        /*
 225         * Release all pending and outgoing TMRs aside from the received
 226         * LUN_RESET tmr..
 227         */
 228        spin_lock_irqsave(&dev->se_tmr_lock, flags);
 229        if (tmr)
 230                list_del_init(&tmr->tmr_list);
 231        list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
 232                cmd = tmr_p->task_cmd;
 233                if (!cmd) {
 234                        pr_err("Unable to locate struct se_cmd for TMR\n");
 235                        continue;
 236                }
 237                /*
 238                 * If this function was called with a valid pr_res_key
 239                 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
 240                 * skip non registration key matching TMRs.
 241                 */
 242                if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 243                        continue;
 244
 245                sess = cmd->se_sess;
 246                if (WARN_ON_ONCE(!sess))
 247                        continue;
 248
 249                spin_lock(&sess->sess_cmd_lock);
 250                spin_lock(&cmd->t_state_lock);
 251                if (!(cmd->transport_state & CMD_T_ACTIVE) ||
 252                     (cmd->transport_state & CMD_T_FABRIC_STOP)) {
 253                        spin_unlock(&cmd->t_state_lock);
 254                        spin_unlock(&sess->sess_cmd_lock);
 255                        continue;
 256                }
 257                if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
 258                        spin_unlock(&cmd->t_state_lock);
 259                        spin_unlock(&sess->sess_cmd_lock);
 260                        continue;
 261                }
 262                if (sess->sess_tearing_down || cmd->cmd_wait_set) {
 263                        spin_unlock(&cmd->t_state_lock);
 264                        spin_unlock(&sess->sess_cmd_lock);
 265                        continue;
 266                }
 267                cmd->transport_state |= CMD_T_ABORTED;
 268                spin_unlock(&cmd->t_state_lock);
 269
 270                rc = kref_get_unless_zero(&cmd->cmd_kref);
 271                if (!rc) {
 272                        printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
 273                        spin_unlock(&sess->sess_cmd_lock);
 274                        continue;
 275                }
 276                spin_unlock(&sess->sess_cmd_lock);
 277
 278                list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
 279        }
 280        spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
 281
 282        list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
 283                list_del_init(&tmr_p->tmr_list);
 284                cmd = tmr_p->task_cmd;
 285
 286                pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
 287                        " Response: 0x%02x, t_state: %d\n",
 288                        (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 289                        tmr_p->function, tmr_p->response, cmd->t_state);
 290
 291                cancel_work_sync(&cmd->work);
 292                transport_wait_for_tasks(cmd);
 293
 294                if (!transport_cmd_finish_abort(cmd, 1))
 295                        target_put_sess_cmd(cmd);
 296        }
 297}
 298
 299static void core_tmr_drain_state_list(
 300        struct se_device *dev,
 301        struct se_cmd *prout_cmd,
 302        struct se_session *tmr_sess,
 303        int tas,
 304        struct list_head *preempt_and_abort_list)
 305{
 306        LIST_HEAD(drain_task_list);
 307        struct se_session *sess;
 308        struct se_cmd *cmd, *next;
 309        unsigned long flags;
 310        int rc;
 311
 312        /*
 313         * Complete outstanding commands with TASK_ABORTED SAM status.
 314         *
 315         * This is following sam4r17, section 5.6 Aborting commands, Table 38
 316         * for TMR LUN_RESET:
 317         *
 318         * a) "Yes" indicates that each command that is aborted on an I_T nexus
 319         * other than the one that caused the SCSI device condition is
 320         * completed with TASK ABORTED status, if the TAS bit is set to one in
 321         * the Control mode page (see SPC-4). "No" indicates that no status is
 322         * returned for aborted commands.
 323         *
 324         * d) If the logical unit reset is caused by a particular I_T nexus
 325         * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
 326         * (TASK_ABORTED status) applies.
 327         *
 328         * Otherwise (e.g., if triggered by a hard reset), "no"
 329         * (no TASK_ABORTED SAM status) applies.
 330         *
 331         * Note that this seems to be independent of TAS (Task Aborted Status)
 332         * in the Control Mode Page.
 333         */
 334        spin_lock_irqsave(&dev->execute_task_lock, flags);
 335        list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
 336                /*
 337                 * For PREEMPT_AND_ABORT usage, only process commands
 338                 * with a matching reservation key.
 339                 */
 340                if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 341                        continue;
 342
 343                /*
 344                 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
 345                 */
 346                if (prout_cmd == cmd)
 347                        continue;
 348
 349                sess = cmd->se_sess;
 350                if (WARN_ON_ONCE(!sess))
 351                        continue;
 352
 353                spin_lock(&sess->sess_cmd_lock);
 354                rc = __target_check_io_state(cmd, tmr_sess, tas);
 355                spin_unlock(&sess->sess_cmd_lock);
 356                if (!rc)
 357                        continue;
 358
 359                list_move_tail(&cmd->state_list, &drain_task_list);
 360                cmd->state_active = false;
 361        }
 362        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 363
 364        while (!list_empty(&drain_task_list)) {
 365                cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
 366                list_del_init(&cmd->state_list);
 367
 368                target_show_cmd("LUN_RESET: ", cmd);
 369                pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n",
 370                         cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
 371                         cmd->pr_res_key);
 372
 373                /*
 374                 * If the command may be queued onto a workqueue cancel it now.
 375                 *
 376                 * This is equivalent to removal from the execute queue in the
 377                 * loop above, but we do it down here given that
 378                 * cancel_work_sync may block.
 379                 */
 380                cancel_work_sync(&cmd->work);
 381                transport_wait_for_tasks(cmd);
 382
 383                if (!core_tmr_handle_tas_abort(cmd, tas))
 384                        target_put_sess_cmd(cmd);
 385        }
 386}
 387
 388int core_tmr_lun_reset(
 389        struct se_device *dev,
 390        struct se_tmr_req *tmr,
 391        struct list_head *preempt_and_abort_list,
 392        struct se_cmd *prout_cmd)
 393{
 394        struct se_node_acl *tmr_nacl = NULL;
 395        struct se_portal_group *tmr_tpg = NULL;
 396        struct se_session *tmr_sess = NULL;
 397        int tas;
 398        /*
 399         * TASK_ABORTED status bit, this is configurable via ConfigFS
 400         * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
 401         *
 402         * A task aborted status (TAS) bit set to zero specifies that aborted
 403         * tasks shall be terminated by the device server without any response
 404         * to the application client. A TAS bit set to one specifies that tasks
 405         * aborted by the actions of an I_T nexus other than the I_T nexus on
 406         * which the command was received shall be completed with TASK ABORTED
 407         * status (see SAM-4).
 408         */
 409        tas = dev->dev_attrib.emulate_tas;
 410        /*
 411         * Determine if this se_tmr is coming from a $FABRIC_MOD
 412         * or struct se_device passthrough..
 413         */
 414        if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
 415                tmr_sess = tmr->task_cmd->se_sess;
 416                tmr_nacl = tmr_sess->se_node_acl;
 417                tmr_tpg = tmr_sess->se_tpg;
 418                if (tmr_nacl && tmr_tpg) {
 419                        pr_debug("LUN_RESET: TMR caller fabric: %s"
 420                                " initiator port %s\n",
 421                                tmr_tpg->se_tpg_tfo->get_fabric_name(),
 422                                tmr_nacl->initiatorname);
 423                }
 424        }
 425        pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
 426                (preempt_and_abort_list) ? "Preempt" : "TMR",
 427                dev->transport->name, tas);
 428
 429        core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
 430        core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
 431                                preempt_and_abort_list);
 432
 433        /*
 434         * Clear any legacy SPC-2 reservation when called during
 435         * LOGICAL UNIT RESET
 436         */
 437        if (!preempt_and_abort_list &&
 438             (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
 439                spin_lock(&dev->dev_reservation_lock);
 440                dev->dev_reserved_node_acl = NULL;
 441                dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
 442                spin_unlock(&dev->dev_reservation_lock);
 443                pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
 444        }
 445
 446        atomic_long_inc(&dev->num_resets);
 447
 448        pr_debug("LUN_RESET: %s for [%s] Complete\n",
 449                        (preempt_and_abort_list) ? "Preempt" : "TMR",
 450                        dev->transport->name);
 451        return 0;
 452}
 453
 454