linux/drivers/scsi/qla2xxx/qla_target.c
<<
>>
Prefs
   1/*
   2 *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
   3 *
   4 *  based on qla2x00t.c code:
   5 *
   6 *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
   7 *  Copyright (C) 2004 - 2005 Leonid Stoljar
   8 *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
   9 *  Copyright (C) 2006 - 2010 ID7 Ltd.
  10 *
  11 *  Forward port and refactoring to modern qla2xxx and target/configfs
  12 *
  13 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation, version 2
  18 *  of the License.
  19 *
  20 *  This program is distributed in the hope that it will be useful,
  21 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  22 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23 *  GNU General Public License for more details.
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/init.h>
  28#include <linux/types.h>
  29#include <linux/blkdev.h>
  30#include <linux/interrupt.h>
  31#include <linux/pci.h>
  32#include <linux/delay.h>
  33#include <linux/list.h>
  34#include <linux/workqueue.h>
  35#include <asm/unaligned.h>
  36#include <scsi/scsi.h>
  37#include <scsi/scsi_host.h>
  38#include <scsi/scsi_tcq.h>
  39#include <target/target_core_base.h>
  40#include <target/target_core_fabric.h>
  41
  42#include "qla_def.h"
  43#include "qla_target.h"
  44
  45static int ql2xtgt_tape_enable;
  46module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
  47MODULE_PARM_DESC(ql2xtgt_tape_enable,
  48                "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
  49
  50static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
  51module_param(qlini_mode, charp, S_IRUGO);
  52MODULE_PARM_DESC(qlini_mode,
  53        "Determines when initiator mode will be enabled. Possible values: "
  54        "\"exclusive\" - initiator mode will be enabled on load, "
  55        "disabled on enabling target mode and then on disabling target mode "
  56        "enabled back; "
  57        "\"disabled\" - initiator mode will never be enabled; "
  58        "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
  59        "when ready "
  60        "\"enabled\" (default) - initiator mode will always stay enabled.");
  61
  62static int ql_dm_tgt_ex_pct = 0;
  63module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
  64MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
  65        "For Dual Mode (qlini_mode=dual), this parameter determines "
  66        "the percentage of exchanges/cmds FW will allocate resources "
  67        "for Target mode.");
  68
  69int ql2xuctrlirq = 1;
  70module_param(ql2xuctrlirq, int, 0644);
  71MODULE_PARM_DESC(ql2xuctrlirq,
  72    "User to control IRQ placement via smp_affinity."
  73    "Valid with qlini_mode=disabled."
  74    "1(default): enable");
  75
  76int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
  77
  78static int temp_sam_status = SAM_STAT_BUSY;
  79
  80/*
  81 * From scsi/fc/fc_fcp.h
  82 */
  83enum fcp_resp_rsp_codes {
  84        FCP_TMF_CMPL = 0,
  85        FCP_DATA_LEN_INVALID = 1,
  86        FCP_CMND_FIELDS_INVALID = 2,
  87        FCP_DATA_PARAM_MISMATCH = 3,
  88        FCP_TMF_REJECTED = 4,
  89        FCP_TMF_FAILED = 5,
  90        FCP_TMF_INVALID_LUN = 9,
  91};
  92
  93/*
  94 * fc_pri_ta from scsi/fc/fc_fcp.h
  95 */
  96#define FCP_PTA_SIMPLE      0   /* simple task attribute */
  97#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
  98#define FCP_PTA_ORDERED     2   /* ordered task attribute */
  99#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
 100#define FCP_PTA_MASK        7   /* mask for task attribute field */
 101#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
 102#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
 103
 104/*
 105 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
 106 * must be called under HW lock and could unlock/lock it inside.
 107 * It isn't an issue, since in the current implementation on the time when
 108 * those functions are called:
 109 *
 110 *   - Either context is IRQ and only IRQ handler can modify HW data,
 111 *     including rings related fields,
 112 *
 113 *   - Or access to target mode variables from struct qla_tgt doesn't
 114 *     cross those functions boundaries, except tgt_stop, which
 115 *     additionally protected by irq_cmd_count.
 116 */
 117/* Predefs for callbacks handed to qla2xxx LLD */
 118static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
 119        struct atio_from_isp *pkt, uint8_t);
 120static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
 121        response_t *pkt);
 122static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
 123        int fn, void *iocb, int flags);
 124static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
 125        *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
 126static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
 127        struct atio_from_isp *atio, uint16_t status, int qfull);
 128static void qlt_disable_vha(struct scsi_qla_host *vha);
 129static void qlt_clear_tgt_db(struct qla_tgt *tgt);
 130static void qlt_send_notify_ack(struct qla_qpair *qpair,
 131        struct imm_ntfy_from_isp *ntfy,
 132        uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
 133        uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
 134static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 135        struct imm_ntfy_from_isp *imm, int ha_locked);
 136static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
 137        fc_port_t *fcport, bool local);
 138void qlt_unreg_sess(struct fc_port *sess);
 139static void qlt_24xx_handle_abts(struct scsi_qla_host *,
 140        struct abts_recv_from_24xx *);
 141static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
 142    uint16_t);
 143
 144/*
 145 * Global Variables
 146 */
 147static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
 148struct kmem_cache *qla_tgt_plogi_cachep;
 149static mempool_t *qla_tgt_mgmt_cmd_mempool;
 150static struct workqueue_struct *qla_tgt_wq;
 151static DEFINE_MUTEX(qla_tgt_mutex);
 152static LIST_HEAD(qla_tgt_glist);
 153
 154static const char *prot_op_str(u32 prot_op)
 155{
 156        switch (prot_op) {
 157        case TARGET_PROT_NORMAL:        return "NORMAL";
 158        case TARGET_PROT_DIN_INSERT:    return "DIN_INSERT";
 159        case TARGET_PROT_DOUT_INSERT:   return "DOUT_INSERT";
 160        case TARGET_PROT_DIN_STRIP:     return "DIN_STRIP";
 161        case TARGET_PROT_DOUT_STRIP:    return "DOUT_STRIP";
 162        case TARGET_PROT_DIN_PASS:      return "DIN_PASS";
 163        case TARGET_PROT_DOUT_PASS:     return "DOUT_PASS";
 164        default:                        return "UNKNOWN";
 165        }
 166}
 167
 168/* This API intentionally takes dest as a parameter, rather than returning
 169 * int value to avoid caller forgetting to issue wmb() after the store */
 170void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
 171{
 172        scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
 173        *dest = atomic_inc_return(&base_vha->generation_tick);
 174        /* memory barrier */
 175        wmb();
 176}
 177
 178/* Might release hw lock, then reaquire!! */
 179static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
 180{
 181        /* Send marker if required */
 182        if (unlikely(vha->marker_needed != 0)) {
 183                int rc = qla2x00_issue_marker(vha, vha_locked);
 184                if (rc != QLA_SUCCESS) {
 185                        ql_dbg(ql_dbg_tgt, vha, 0xe03d,
 186                            "qla_target(%d): issue_marker() failed\n",
 187                            vha->vp_idx);
 188                }
 189                return rc;
 190        }
 191        return QLA_SUCCESS;
 192}
 193
 194static inline
 195struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
 196        uint8_t *d_id)
 197{
 198        struct scsi_qla_host *host;
 199        uint32_t key = 0;
 200
 201        if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
 202            (vha->d_id.b.al_pa == d_id[2]))
 203                return vha;
 204
 205        key  = (uint32_t)d_id[0] << 16;
 206        key |= (uint32_t)d_id[1] <<  8;
 207        key |= (uint32_t)d_id[2];
 208
 209        host = btree_lookup32(&vha->hw->tgt.host_map, key);
 210        if (!host)
 211                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
 212                    "Unable to find host %06x\n", key);
 213
 214        return host;
 215}
 216
 217static inline
 218struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
 219        uint16_t vp_idx)
 220{
 221        struct qla_hw_data *ha = vha->hw;
 222
 223        if (vha->vp_idx == vp_idx)
 224                return vha;
 225
 226        BUG_ON(ha->tgt.tgt_vp_map == NULL);
 227        if (likely(test_bit(vp_idx, ha->vp_idx_map)))
 228                return ha->tgt.tgt_vp_map[vp_idx].vha;
 229
 230        return NULL;
 231}
 232
 233static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
 234{
 235        unsigned long flags;
 236
 237        spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
 238
 239        vha->hw->tgt.num_pend_cmds++;
 240        if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
 241                vha->qla_stats.stat_max_pend_cmds =
 242                        vha->hw->tgt.num_pend_cmds;
 243        spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
 244}
 245static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
 246{
 247        unsigned long flags;
 248
 249        spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
 250        vha->hw->tgt.num_pend_cmds--;
 251        spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
 252}
 253
 254
 255static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
 256        struct atio_from_isp *atio, uint8_t ha_locked)
 257{
 258        struct qla_tgt_sess_op *u;
 259        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 260        unsigned long flags;
 261
 262        if (tgt->tgt_stop) {
 263                ql_dbg(ql_dbg_async, vha, 0x502c,
 264                    "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
 265                    vha->vp_idx);
 266                goto out_term;
 267        }
 268
 269        u = kzalloc(sizeof(*u), GFP_ATOMIC);
 270        if (u == NULL)
 271                goto out_term;
 272
 273        u->vha = vha;
 274        memcpy(&u->atio, atio, sizeof(*atio));
 275        INIT_LIST_HEAD(&u->cmd_list);
 276
 277        spin_lock_irqsave(&vha->cmd_list_lock, flags);
 278        list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
 279        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
 280
 281        schedule_delayed_work(&vha->unknown_atio_work, 1);
 282
 283out:
 284        return;
 285
 286out_term:
 287        qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
 288        goto out;
 289}
 290
 291static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
 292        uint8_t ha_locked)
 293{
 294        struct qla_tgt_sess_op *u, *t;
 295        scsi_qla_host_t *host;
 296        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 297        unsigned long flags;
 298        uint8_t queued = 0;
 299
 300        list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
 301                if (u->aborted) {
 302                        ql_dbg(ql_dbg_async, vha, 0x502e,
 303                            "Freeing unknown %s %p, because of Abort\n",
 304                            "ATIO_TYPE7", u);
 305                        qlt_send_term_exchange(vha->hw->base_qpair, NULL,
 306                            &u->atio, ha_locked, 0);
 307                        goto abort;
 308                }
 309
 310                host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
 311                if (host != NULL) {
 312                        ql_dbg(ql_dbg_async, vha, 0x502f,
 313                            "Requeuing unknown ATIO_TYPE7 %p\n", u);
 314                        qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
 315                } else if (tgt->tgt_stop) {
 316                        ql_dbg(ql_dbg_async, vha, 0x503a,
 317                            "Freeing unknown %s %p, because tgt is being stopped\n",
 318                            "ATIO_TYPE7", u);
 319                        qlt_send_term_exchange(vha->hw->base_qpair, NULL,
 320                            &u->atio, ha_locked, 0);
 321                } else {
 322                        ql_dbg(ql_dbg_async, vha, 0x503d,
 323                            "Reschedule u %p, vha %p, host %p\n", u, vha, host);
 324                        if (!queued) {
 325                                queued = 1;
 326                                schedule_delayed_work(&vha->unknown_atio_work,
 327                                    1);
 328                        }
 329                        continue;
 330                }
 331
 332abort:
 333                spin_lock_irqsave(&vha->cmd_list_lock, flags);
 334                list_del(&u->cmd_list);
 335                spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
 336                kfree(u);
 337        }
 338}
 339
 340void qlt_unknown_atio_work_fn(struct work_struct *work)
 341{
 342        struct scsi_qla_host *vha = container_of(to_delayed_work(work),
 343            struct scsi_qla_host, unknown_atio_work);
 344
 345        qlt_try_to_dequeue_unknown_atios(vha, 0);
 346}
 347
 348static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
 349        struct atio_from_isp *atio, uint8_t ha_locked)
 350{
 351        ql_dbg(ql_dbg_tgt, vha, 0xe072,
 352                "%s: qla_target(%d): type %x ox_id %04x\n",
 353                __func__, vha->vp_idx, atio->u.raw.entry_type,
 354                be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
 355
 356        switch (atio->u.raw.entry_type) {
 357        case ATIO_TYPE7:
 358        {
 359                struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
 360                    atio->u.isp24.fcp_hdr.d_id);
 361                if (unlikely(NULL == host)) {
 362                        ql_dbg(ql_dbg_tgt, vha, 0xe03e,
 363                            "qla_target(%d): Received ATIO_TYPE7 "
 364                            "with unknown d_id %x:%x:%x\n", vha->vp_idx,
 365                            atio->u.isp24.fcp_hdr.d_id[0],
 366                            atio->u.isp24.fcp_hdr.d_id[1],
 367                            atio->u.isp24.fcp_hdr.d_id[2]);
 368
 369
 370                        qlt_queue_unknown_atio(vha, atio, ha_locked);
 371                        break;
 372                }
 373                if (unlikely(!list_empty(&vha->unknown_atio_list)))
 374                        qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
 375
 376                qlt_24xx_atio_pkt(host, atio, ha_locked);
 377                break;
 378        }
 379
 380        case IMMED_NOTIFY_TYPE:
 381        {
 382                struct scsi_qla_host *host = vha;
 383                struct imm_ntfy_from_isp *entry =
 384                    (struct imm_ntfy_from_isp *)atio;
 385
 386                qlt_issue_marker(vha, ha_locked);
 387
 388                if ((entry->u.isp24.vp_index != 0xFF) &&
 389                    (entry->u.isp24.nport_handle != 0xFFFF)) {
 390                        host = qlt_find_host_by_vp_idx(vha,
 391                            entry->u.isp24.vp_index);
 392                        if (unlikely(!host)) {
 393                                ql_dbg(ql_dbg_tgt, vha, 0xe03f,
 394                                    "qla_target(%d): Received "
 395                                    "ATIO (IMMED_NOTIFY_TYPE) "
 396                                    "with unknown vp_index %d\n",
 397                                    vha->vp_idx, entry->u.isp24.vp_index);
 398                                break;
 399                        }
 400                }
 401                qlt_24xx_atio_pkt(host, atio, ha_locked);
 402                break;
 403        }
 404
 405        case VP_RPT_ID_IOCB_TYPE:
 406                qla24xx_report_id_acquisition(vha,
 407                        (struct vp_rpt_id_entry_24xx *)atio);
 408                break;
 409
 410        case ABTS_RECV_24XX:
 411        {
 412                struct abts_recv_from_24xx *entry =
 413                        (struct abts_recv_from_24xx *)atio;
 414                struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
 415                        entry->vp_index);
 416                unsigned long flags;
 417
 418                if (unlikely(!host)) {
 419                        ql_dbg(ql_dbg_tgt, vha, 0xe00a,
 420                            "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
 421                            "received, with unknown vp_index %d\n",
 422                            vha->vp_idx, entry->vp_index);
 423                        break;
 424                }
 425                if (!ha_locked)
 426                        spin_lock_irqsave(&host->hw->hardware_lock, flags);
 427                qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
 428                if (!ha_locked)
 429                        spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
 430                break;
 431        }
 432
 433        /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
 434
 435        default:
 436                ql_dbg(ql_dbg_tgt, vha, 0xe040,
 437                    "qla_target(%d): Received unknown ATIO atio "
 438                    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
 439                break;
 440        }
 441
 442        return false;
 443}
 444
 445void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
 446        struct rsp_que *rsp, response_t *pkt)
 447{
 448        switch (pkt->entry_type) {
 449        case CTIO_CRC2:
 450                ql_dbg(ql_dbg_tgt, vha, 0xe073,
 451                        "qla_target(%d):%s: CRC2 Response pkt\n",
 452                        vha->vp_idx, __func__);
 453        case CTIO_TYPE7:
 454        {
 455                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
 456                struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
 457                    entry->vp_index);
 458                if (unlikely(!host)) {
 459                        ql_dbg(ql_dbg_tgt, vha, 0xe041,
 460                            "qla_target(%d): Response pkt (CTIO_TYPE7) "
 461                            "received, with unknown vp_index %d\n",
 462                            vha->vp_idx, entry->vp_index);
 463                        break;
 464                }
 465                qlt_response_pkt(host, rsp, pkt);
 466                break;
 467        }
 468
 469        case IMMED_NOTIFY_TYPE:
 470        {
 471                struct scsi_qla_host *host = vha;
 472                struct imm_ntfy_from_isp *entry =
 473                    (struct imm_ntfy_from_isp *)pkt;
 474
 475                host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
 476                if (unlikely(!host)) {
 477                        ql_dbg(ql_dbg_tgt, vha, 0xe042,
 478                            "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
 479                            "received, with unknown vp_index %d\n",
 480                            vha->vp_idx, entry->u.isp24.vp_index);
 481                        break;
 482                }
 483                qlt_response_pkt(host, rsp, pkt);
 484                break;
 485        }
 486
 487        case NOTIFY_ACK_TYPE:
 488        {
 489                struct scsi_qla_host *host = vha;
 490                struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
 491
 492                if (0xFF != entry->u.isp24.vp_index) {
 493                        host = qlt_find_host_by_vp_idx(vha,
 494                            entry->u.isp24.vp_index);
 495                        if (unlikely(!host)) {
 496                                ql_dbg(ql_dbg_tgt, vha, 0xe043,
 497                                    "qla_target(%d): Response "
 498                                    "pkt (NOTIFY_ACK_TYPE) "
 499                                    "received, with unknown "
 500                                    "vp_index %d\n", vha->vp_idx,
 501                                    entry->u.isp24.vp_index);
 502                                break;
 503                        }
 504                }
 505                qlt_response_pkt(host, rsp, pkt);
 506                break;
 507        }
 508
 509        case ABTS_RECV_24XX:
 510        {
 511                struct abts_recv_from_24xx *entry =
 512                    (struct abts_recv_from_24xx *)pkt;
 513                struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
 514                    entry->vp_index);
 515                if (unlikely(!host)) {
 516                        ql_dbg(ql_dbg_tgt, vha, 0xe044,
 517                            "qla_target(%d): Response pkt "
 518                            "(ABTS_RECV_24XX) received, with unknown "
 519                            "vp_index %d\n", vha->vp_idx, entry->vp_index);
 520                        break;
 521                }
 522                qlt_response_pkt(host, rsp, pkt);
 523                break;
 524        }
 525
 526        case ABTS_RESP_24XX:
 527        {
 528                struct abts_resp_to_24xx *entry =
 529                    (struct abts_resp_to_24xx *)pkt;
 530                struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
 531                    entry->vp_index);
 532                if (unlikely(!host)) {
 533                        ql_dbg(ql_dbg_tgt, vha, 0xe045,
 534                            "qla_target(%d): Response pkt "
 535                            "(ABTS_RECV_24XX) received, with unknown "
 536                            "vp_index %d\n", vha->vp_idx, entry->vp_index);
 537                        break;
 538                }
 539                qlt_response_pkt(host, rsp, pkt);
 540                break;
 541        }
 542
 543        default:
 544                qlt_response_pkt(vha, rsp, pkt);
 545                break;
 546        }
 547
 548}
 549
 550/*
 551 * All qlt_plogi_ack_t operations are protected by hardware_lock
 552 */
 553static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
 554        struct imm_ntfy_from_isp *ntfy, int type)
 555{
 556        struct qla_work_evt *e;
 557        e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
 558        if (!e)
 559                return QLA_FUNCTION_FAILED;
 560
 561        e->u.nack.fcport = fcport;
 562        e->u.nack.type = type;
 563        memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
 564        return qla2x00_post_work(vha, e);
 565}
 566
 567static
 568void qla2x00_async_nack_sp_done(void *s, int res)
 569{
 570        struct srb *sp = (struct srb *)s;
 571        struct scsi_qla_host *vha = sp->vha;
 572        unsigned long flags;
 573
 574        ql_dbg(ql_dbg_disc, vha, 0x20f2,
 575            "Async done-%s res %x %8phC  type %d\n",
 576            sp->name, res, sp->fcport->port_name, sp->type);
 577
 578        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 579        sp->fcport->flags &= ~FCF_ASYNC_SENT;
 580        sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
 581
 582        switch (sp->type) {
 583        case SRB_NACK_PLOGI:
 584                sp->fcport->login_gen++;
 585                sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
 586                sp->fcport->logout_on_delete = 1;
 587                sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
 588                sp->fcport->send_els_logo = 0;
 589                break;
 590
 591        case SRB_NACK_PRLI:
 592                sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
 593                sp->fcport->deleted = 0;
 594                sp->fcport->send_els_logo = 0;
 595
 596                if (!sp->fcport->login_succ &&
 597                    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
 598                        sp->fcport->login_succ = 1;
 599
 600                        vha->fcport_count++;
 601
 602                        if (!IS_IIDMA_CAPABLE(vha->hw) ||
 603                            !vha->hw->flags.gpsc_supported) {
 604                                ql_dbg(ql_dbg_disc, vha, 0x20f3,
 605                                    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
 606                                    __func__, __LINE__,
 607                                    sp->fcport->port_name,
 608                                    vha->fcport_count);
 609
 610                                qla24xx_post_upd_fcport_work(vha, sp->fcport);
 611                        } else {
 612                                ql_dbg(ql_dbg_disc, vha, 0x20f5,
 613                                    "%s %d %8phC post gpsc fcp_cnt %d\n",
 614                                    __func__, __LINE__,
 615                                    sp->fcport->port_name,
 616                                    vha->fcport_count);
 617
 618                                qla24xx_post_gpsc_work(vha, sp->fcport);
 619                        }
 620                }
 621                break;
 622
 623        case SRB_NACK_LOGO:
 624                sp->fcport->login_gen++;
 625                sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
 626                qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
 627                break;
 628        }
 629        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 630
 631        sp->free(sp);
 632}
 633
 634int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
 635        struct imm_ntfy_from_isp *ntfy, int type)
 636{
 637        int rval = QLA_FUNCTION_FAILED;
 638        srb_t *sp;
 639        char *c = NULL;
 640
 641        fcport->flags |= FCF_ASYNC_SENT;
 642        switch (type) {
 643        case SRB_NACK_PLOGI:
 644                fcport->fw_login_state = DSC_LS_PLOGI_PEND;
 645                c = "PLOGI";
 646                break;
 647        case SRB_NACK_PRLI:
 648                fcport->fw_login_state = DSC_LS_PRLI_PEND;
 649                fcport->deleted = 0;
 650                c = "PRLI";
 651                break;
 652        case SRB_NACK_LOGO:
 653                fcport->fw_login_state = DSC_LS_LOGO_PEND;
 654                c = "LOGO";
 655                break;
 656        }
 657
 658        sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
 659        if (!sp)
 660                goto done;
 661
 662        sp->type = type;
 663        sp->name = "nack";
 664
 665        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
 666
 667        sp->u.iocb_cmd.u.nack.ntfy = ntfy;
 668
 669        sp->done = qla2x00_async_nack_sp_done;
 670
 671        rval = qla2x00_start_sp(sp);
 672        if (rval != QLA_SUCCESS)
 673                goto done_free_sp;
 674
 675        ql_dbg(ql_dbg_disc, vha, 0x20f4,
 676            "Async-%s %8phC hndl %x %s\n",
 677            sp->name, fcport->port_name, sp->handle, c);
 678
 679        return rval;
 680
 681done_free_sp:
 682        sp->free(sp);
 683done:
 684        fcport->flags &= ~FCF_ASYNC_SENT;
 685        return rval;
 686}
 687
 688void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
 689{
 690        fc_port_t *t;
 691        unsigned long flags;
 692
 693        switch (e->u.nack.type) {
 694        case SRB_NACK_PRLI:
 695                mutex_lock(&vha->vha_tgt.tgt_mutex);
 696                t = qlt_create_sess(vha, e->u.nack.fcport, 0);
 697                mutex_unlock(&vha->vha_tgt.tgt_mutex);
 698                if (t) {
 699                        ql_log(ql_log_info, vha, 0xd034,
 700                            "%s create sess success %p", __func__, t);
 701                        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 702                        /* create sess has an extra kref */
 703                        vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
 704                        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 705                }
 706                break;
 707        }
 708        qla24xx_async_notify_ack(vha, e->u.nack.fcport,
 709            (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
 710}
 711
 712void qla24xx_delete_sess_fn(struct work_struct *work)
 713{
 714        fc_port_t *fcport = container_of(work, struct fc_port, del_work);
 715        struct qla_hw_data *ha = fcport->vha->hw;
 716        unsigned long flags;
 717
 718        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 719
 720        if (fcport->se_sess) {
 721                ha->tgt.tgt_ops->shutdown_sess(fcport);
 722                ha->tgt.tgt_ops->put_sess(fcport);
 723        } else {
 724                qlt_unreg_sess(fcport);
 725        }
 726        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 727}
 728
 729/*
 730 * Called from qla2x00_reg_remote_port()
 731 */
 732void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
 733{
 734        struct qla_hw_data *ha = vha->hw;
 735        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 736        struct fc_port *sess = fcport;
 737        unsigned long flags;
 738
 739        if (!vha->hw->tgt.tgt_ops)
 740                return;
 741
 742        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 743        if (tgt->tgt_stop) {
 744                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 745                return;
 746        }
 747
 748        if (fcport->disc_state == DSC_DELETE_PEND) {
 749                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 750                return;
 751        }
 752
 753        if (!sess->se_sess) {
 754                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 755
 756                mutex_lock(&vha->vha_tgt.tgt_mutex);
 757                sess = qlt_create_sess(vha, fcport, false);
 758                mutex_unlock(&vha->vha_tgt.tgt_mutex);
 759
 760                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 761        } else {
 762                if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
 763                        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 764                        return;
 765                }
 766
 767                if (!kref_get_unless_zero(&sess->sess_kref)) {
 768                        ql_dbg(ql_dbg_disc, vha, 0x2107,
 769                            "%s: kref_get fail sess %8phC \n",
 770                            __func__, sess->port_name);
 771                        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 772                        return;
 773                }
 774
 775                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
 776                    "qla_target(%u): %ssession for port %8phC "
 777                    "(loop ID %d) reappeared\n", vha->vp_idx,
 778                    sess->local ? "local " : "", sess->port_name, sess->loop_id);
 779
 780                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
 781                    "Reappeared sess %p\n", sess);
 782
 783                ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
 784                    fcport->loop_id,
 785                    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
 786        }
 787
 788        if (sess && sess->local) {
 789                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
 790                    "qla_target(%u): local session for "
 791                    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
 792                    fcport->port_name, sess->loop_id);
 793                sess->local = 0;
 794        }
 795        ha->tgt.tgt_ops->put_sess(sess);
 796        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 797}
 798
 799/*
 800 * This is a zero-base ref-counting solution, since hardware_lock
 801 * guarantees that ref_count is not modified concurrently.
 802 * Upon successful return content of iocb is undefined
 803 */
 804static struct qlt_plogi_ack_t *
 805qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
 806                       struct imm_ntfy_from_isp *iocb)
 807{
 808        struct qlt_plogi_ack_t *pla;
 809
 810        list_for_each_entry(pla, &vha->plogi_ack_list, list) {
 811                if (pla->id.b24 == id->b24) {
 812                        qlt_send_term_imm_notif(vha, &pla->iocb, 1);
 813                        memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
 814                        return pla;
 815                }
 816        }
 817
 818        pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
 819        if (!pla) {
 820                ql_dbg(ql_dbg_async, vha, 0x5088,
 821                       "qla_target(%d): Allocation of plogi_ack failed\n",
 822                       vha->vp_idx);
 823                return NULL;
 824        }
 825
 826        memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
 827        pla->id = *id;
 828        list_add_tail(&pla->list, &vha->plogi_ack_list);
 829
 830        return pla;
 831}
 832
 833void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
 834    struct qlt_plogi_ack_t *pla)
 835{
 836        struct imm_ntfy_from_isp *iocb = &pla->iocb;
 837        port_id_t port_id;
 838        uint16_t loop_id;
 839        fc_port_t *fcport = pla->fcport;
 840
 841        BUG_ON(!pla->ref_count);
 842        pla->ref_count--;
 843
 844        if (pla->ref_count)
 845                return;
 846
 847        ql_dbg(ql_dbg_disc, vha, 0x5089,
 848            "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
 849            " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
 850            iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
 851            iocb->u.isp24.port_id[0],
 852            le16_to_cpu(iocb->u.isp24.nport_handle),
 853            iocb->u.isp24.exchange_address, iocb->ox_id);
 854
 855        port_id.b.domain = iocb->u.isp24.port_id[2];
 856        port_id.b.area   = iocb->u.isp24.port_id[1];
 857        port_id.b.al_pa  = iocb->u.isp24.port_id[0];
 858        port_id.b.rsvd_1 = 0;
 859
 860        loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
 861
 862        fcport->loop_id = loop_id;
 863        fcport->d_id = port_id;
 864        qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
 865
 866        list_for_each_entry(fcport, &vha->vp_fcports, list) {
 867                if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
 868                        fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
 869                if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
 870                        fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
 871        }
 872
 873        list_del(&pla->list);
 874        kmem_cache_free(qla_tgt_plogi_cachep, pla);
 875}
 876
 877void
 878qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
 879    struct fc_port *sess, enum qlt_plogi_link_t link)
 880{
 881        struct imm_ntfy_from_isp *iocb = &pla->iocb;
 882        /* Inc ref_count first because link might already be pointing at pla */
 883        pla->ref_count++;
 884
 885        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
 886                "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
 887                " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
 888                sess, link, sess->port_name,
 889                iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
 890                iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
 891                pla->ref_count, pla, link);
 892
 893        if (sess->plogi_link[link])
 894                qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
 895
 896        if (link == QLT_PLOGI_LINK_SAME_WWN)
 897                pla->fcport = sess;
 898
 899        sess->plogi_link[link] = pla;
 900}
 901
 902typedef struct {
 903        /* These fields must be initialized by the caller */
 904        port_id_t id;
 905        /*
 906         * number of cmds dropped while we were waiting for
 907         * initiator to ack LOGO initialize to 1 if LOGO is
 908         * triggered by a command, otherwise, to 0
 909         */
 910        int cmd_count;
 911
 912        /* These fields are used by callee */
 913        struct list_head list;
 914} qlt_port_logo_t;
 915
 916static void
 917qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
 918{
 919        qlt_port_logo_t *tmp;
 920        int res;
 921
 922        mutex_lock(&vha->vha_tgt.tgt_mutex);
 923
 924        list_for_each_entry(tmp, &vha->logo_list, list) {
 925                if (tmp->id.b24 == logo->id.b24) {
 926                        tmp->cmd_count += logo->cmd_count;
 927                        mutex_unlock(&vha->vha_tgt.tgt_mutex);
 928                        return;
 929                }
 930        }
 931
 932        list_add_tail(&logo->list, &vha->logo_list);
 933
 934        mutex_unlock(&vha->vha_tgt.tgt_mutex);
 935
 936        res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
 937
 938        mutex_lock(&vha->vha_tgt.tgt_mutex);
 939        list_del(&logo->list);
 940        mutex_unlock(&vha->vha_tgt.tgt_mutex);
 941
 942        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
 943            "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
 944            logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
 945            logo->cmd_count, res);
 946}
 947
 948static void qlt_free_session_done(struct work_struct *work)
 949{
 950        struct fc_port *sess = container_of(work, struct fc_port,
 951            free_work);
 952        struct qla_tgt *tgt = sess->tgt;
 953        struct scsi_qla_host *vha = sess->vha;
 954        struct qla_hw_data *ha = vha->hw;
 955        unsigned long flags;
 956        bool logout_started = false;
 957        struct event_arg ea;
 958        scsi_qla_host_t *base_vha;
 959
 960        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
 961                "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
 962                " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
 963                __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
 964                sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
 965                sess->logout_on_delete, sess->keep_nport_handle,
 966                sess->send_els_logo);
 967
 968        if (!IS_SW_RESV_ADDR(sess->d_id)) {
 969                if (sess->send_els_logo) {
 970                        qlt_port_logo_t logo;
 971
 972                        logo.id = sess->d_id;
 973                        logo.cmd_count = 0;
 974                        qlt_send_first_logo(vha, &logo);
 975                }
 976
 977                if (sess->logout_on_delete) {
 978                        int rc;
 979
 980                        rc = qla2x00_post_async_logout_work(vha, sess, NULL);
 981                        if (rc != QLA_SUCCESS)
 982                                ql_log(ql_log_warn, vha, 0xf085,
 983                                    "Schedule logo failed sess %p rc %d\n",
 984                                    sess, rc);
 985                        else
 986                                logout_started = true;
 987                }
 988        }
 989
 990        /*
 991         * Release the target session for FC Nexus from fabric module code.
 992         */
 993        if (sess->se_sess != NULL)
 994                ha->tgt.tgt_ops->free_session(sess);
 995
 996        if (logout_started) {
 997                bool traced = false;
 998
 999                while (!ACCESS_ONCE(sess->logout_completed)) {
1000                        if (!traced) {
1001                                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
1002                                        "%s: waiting for sess %p logout\n",
1003                                        __func__, sess);
1004                                traced = true;
1005                        }
1006                        msleep(100);
1007                }
1008
1009                ql_dbg(ql_dbg_disc, vha, 0xf087,
1010                    "%s: sess %p logout completed\n",__func__, sess);
1011        }
1012
1013        if (sess->logo_ack_needed) {
1014                sess->logo_ack_needed = 0;
1015                qla24xx_async_notify_ack(vha, sess,
1016                        (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1017        }
1018
1019        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1020        if (sess->se_sess) {
1021                sess->se_sess = NULL;
1022                if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1023                        tgt->sess_count--;
1024        }
1025
1026        sess->disc_state = DSC_DELETED;
1027        sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1028        sess->deleted = QLA_SESS_DELETED;
1029        sess->login_retry = vha->hw->login_retry_count;
1030
1031        if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1032                vha->fcport_count--;
1033                sess->login_succ = 0;
1034        }
1035
1036        if (sess->chip_reset != ha->base_qpair->chip_reset)
1037                qla2x00_clear_loop_id(sess);
1038
1039        if (sess->conflict) {
1040                sess->conflict->login_pause = 0;
1041                sess->conflict = NULL;
1042                if (!test_bit(UNLOADING, &vha->dpc_flags))
1043                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1044        }
1045
1046        {
1047                struct qlt_plogi_ack_t *own =
1048                    sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1049                struct qlt_plogi_ack_t *con =
1050                    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1051                struct imm_ntfy_from_isp *iocb;
1052
1053                if (con) {
1054                        iocb = &con->iocb;
1055                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1056                                 "se_sess %p / sess %p port %8phC is gone,"
1057                                 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1058                                 sess->se_sess, sess, sess->port_name,
1059                                 own ? "releasing own PLOGI" : "no own PLOGI pending",
1060                                 own ? own->ref_count : -1,
1061                                 iocb->u.isp24.port_name, con->ref_count);
1062                        qlt_plogi_ack_unref(vha, con);
1063                        sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1064                } else {
1065                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1066                            "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1067                            sess->se_sess, sess, sess->port_name,
1068                            own ? "releasing own PLOGI" :
1069                            "no own PLOGI pending",
1070                            own ? own->ref_count : -1);
1071                }
1072
1073                if (own) {
1074                        sess->fw_login_state = DSC_LS_PLOGI_PEND;
1075                        qlt_plogi_ack_unref(vha, own);
1076                        sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1077                }
1078        }
1079        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1080
1081        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
1082            "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1083                sess, sess->port_name, vha->fcport_count);
1084
1085        if (tgt && (tgt->sess_count == 0))
1086                wake_up_all(&tgt->waitQ);
1087
1088        if (vha->fcport_count == 0)
1089                wake_up_all(&vha->fcport_waitQ);
1090
1091        base_vha = pci_get_drvdata(ha->pdev);
1092        if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
1093                return;
1094
1095        if (!tgt || !tgt->tgt_stop) {
1096                memset(&ea, 0, sizeof(ea));
1097                ea.event = FCME_DELETE_DONE;
1098                ea.fcport = sess;
1099                qla2x00_fcport_event_handler(vha, &ea);
1100        }
1101}
1102
1103/* ha->tgt.sess_lock supposed to be held on entry */
1104void qlt_unreg_sess(struct fc_port *sess)
1105{
1106        struct scsi_qla_host *vha = sess->vha;
1107
1108        ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1109            "%s sess %p for deletion %8phC\n",
1110            __func__, sess, sess->port_name);
1111
1112        if (sess->se_sess)
1113                vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1114
1115        qla2x00_mark_device_lost(vha, sess, 1, 1);
1116
1117        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1118        sess->disc_state = DSC_DELETE_PEND;
1119        sess->last_rscn_gen = sess->rscn_gen;
1120        sess->last_login_gen = sess->login_gen;
1121
1122        if (sess->nvme_flag & NVME_FLAG_REGISTERED)
1123                schedule_work(&sess->nvme_del_work);
1124
1125        INIT_WORK(&sess->free_work, qlt_free_session_done);
1126        schedule_work(&sess->free_work);
1127}
1128EXPORT_SYMBOL(qlt_unreg_sess);
1129
1130static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1131{
1132        struct qla_hw_data *ha = vha->hw;
1133        struct fc_port *sess = NULL;
1134        uint16_t loop_id;
1135        int res = 0;
1136        struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1137        unsigned long flags;
1138
1139        loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1140        if (loop_id == 0xFFFF) {
1141                /* Global event */
1142                atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1143                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1144                qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1145                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1146        } else {
1147                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1148                sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1149                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1150        }
1151
1152        ql_dbg(ql_dbg_tgt, vha, 0xe000,
1153            "Using sess for qla_tgt_reset: %p\n", sess);
1154        if (!sess) {
1155                res = -ESRCH;
1156                return res;
1157        }
1158
1159        ql_dbg(ql_dbg_tgt, vha, 0xe047,
1160            "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1161            "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1162            mcmd, loop_id);
1163
1164        return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1165}
1166
1167static void qla24xx_chk_fcp_state(struct fc_port *sess)
1168{
1169        if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1170                sess->logout_on_delete = 0;
1171                sess->logo_ack_needed = 0;
1172                sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1173                sess->scan_state = 0;
1174        }
1175}
1176
1177/* ha->tgt.sess_lock supposed to be held on entry */
1178void qlt_schedule_sess_for_deletion(struct fc_port *sess,
1179        bool immediate)
1180{
1181        struct qla_tgt *tgt = sess->tgt;
1182
1183        if (sess->disc_state == DSC_DELETE_PEND)
1184                return;
1185
1186        if (sess->disc_state == DSC_DELETED) {
1187                if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
1188                        wake_up_all(&tgt->waitQ);
1189                if (sess->vha->fcport_count == 0)
1190                        wake_up_all(&sess->vha->fcport_waitQ);
1191
1192                if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1193                        !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1194                        return;
1195        }
1196
1197        sess->disc_state = DSC_DELETE_PEND;
1198
1199        if (sess->deleted == QLA_SESS_DELETED)
1200                sess->logout_on_delete = 0;
1201
1202        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1203        qla24xx_chk_fcp_state(sess);
1204
1205        ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1206            "Scheduling sess %p for deletion\n", sess);
1207
1208        schedule_work(&sess->del_work);
1209}
1210
1211void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
1212{
1213        unsigned long flags;
1214        struct qla_hw_data *ha = sess->vha->hw;
1215        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1216        qlt_schedule_sess_for_deletion(sess, 1);
1217        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1218}
1219
1220/* ha->tgt.sess_lock supposed to be held on entry */
1221static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1222{
1223        struct fc_port *sess;
1224        scsi_qla_host_t *vha = tgt->vha;
1225
1226        list_for_each_entry(sess, &vha->vp_fcports, list) {
1227                if (sess->se_sess)
1228                        qlt_schedule_sess_for_deletion(sess, 1);
1229        }
1230
1231        /* At this point tgt could be already dead */
1232}
1233
1234static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
1235        uint16_t *loop_id)
1236{
1237        struct qla_hw_data *ha = vha->hw;
1238        dma_addr_t gid_list_dma;
1239        struct gid_list_info *gid_list;
1240        char *id_iter;
1241        int res, rc, i;
1242        uint16_t entries;
1243
1244        gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1245            &gid_list_dma, GFP_KERNEL);
1246        if (!gid_list) {
1247                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1248                    "qla_target(%d): DMA Alloc failed of %u\n",
1249                    vha->vp_idx, qla2x00_gid_list_size(ha));
1250                return -ENOMEM;
1251        }
1252
1253        /* Get list of logged in devices */
1254        rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1255        if (rc != QLA_SUCCESS) {
1256                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1257                    "qla_target(%d): get_id_list() failed: %x\n",
1258                    vha->vp_idx, rc);
1259                res = -EBUSY;
1260                goto out_free_id_list;
1261        }
1262
1263        id_iter = (char *)gid_list;
1264        res = -ENOENT;
1265        for (i = 0; i < entries; i++) {
1266                struct gid_list_info *gid = (struct gid_list_info *)id_iter;
1267                if ((gid->al_pa == s_id[2]) &&
1268                    (gid->area == s_id[1]) &&
1269                    (gid->domain == s_id[0])) {
1270                        *loop_id = le16_to_cpu(gid->loop_id);
1271                        res = 0;
1272                        break;
1273                }
1274                id_iter += ha->gid_list_info_size;
1275        }
1276
1277out_free_id_list:
1278        dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1279            gid_list, gid_list_dma);
1280        return res;
1281}
1282
1283/*
1284 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1285 * Caller must put it.
1286 */
1287static struct fc_port *qlt_create_sess(
1288        struct scsi_qla_host *vha,
1289        fc_port_t *fcport,
1290        bool local)
1291{
1292        struct qla_hw_data *ha = vha->hw;
1293        struct fc_port *sess = fcport;
1294        unsigned long flags;
1295
1296        if (vha->vha_tgt.qla_tgt->tgt_stop)
1297                return NULL;
1298
1299        if (fcport->se_sess) {
1300                if (!kref_get_unless_zero(&sess->sess_kref)) {
1301                        ql_dbg(ql_dbg_disc, vha, 0x20f6,
1302                            "%s: kref_get_unless_zero failed for %8phC\n",
1303                            __func__, sess->port_name);
1304                        return NULL;
1305                }
1306                return fcport;
1307        }
1308        sess->tgt = vha->vha_tgt.qla_tgt;
1309        sess->local = local;
1310
1311        /*
1312         * Under normal circumstances we want to logout from firmware when
1313         * session eventually ends and release corresponding nport handle.
1314         * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1315         * code will adjust these flags as necessary.
1316         */
1317        sess->logout_on_delete = 1;
1318        sess->keep_nport_handle = 0;
1319        sess->logout_completed = 0;
1320
1321        if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1322            &fcport->port_name[0], sess) < 0) {
1323                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1324                    "(%d) %8phC check_initiator_node_acl failed\n",
1325                    vha->vp_idx, fcport->port_name);
1326                return NULL;
1327        } else {
1328                kref_init(&fcport->sess_kref);
1329                /*
1330                 * Take an extra reference to ->sess_kref here to handle
1331                 * fc_port access across ->tgt.sess_lock reaquire.
1332                 */
1333                if (!kref_get_unless_zero(&sess->sess_kref)) {
1334                        ql_dbg(ql_dbg_disc, vha, 0x20f7,
1335                            "%s: kref_get_unless_zero failed for %8phC\n",
1336                            __func__, sess->port_name);
1337                        return NULL;
1338                }
1339
1340                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1341                if (!IS_SW_RESV_ADDR(sess->d_id))
1342                        vha->vha_tgt.qla_tgt->sess_count++;
1343
1344                qlt_do_generation_tick(vha, &sess->generation);
1345                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1346        }
1347
1348        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1349            "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
1350            sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1351            vha->vha_tgt.qla_tgt->sess_count);
1352
1353        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1354            "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1355            "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1356            vha->vp_idx, local ?  "local " : "", fcport->port_name,
1357            fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1358            sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1359
1360        return sess;
1361}
1362
1363/*
1364 * max_gen - specifies maximum session generation
1365 * at which this deletion requestion is still valid
1366 */
1367void
1368qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1369{
1370        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1371        struct fc_port *sess = fcport;
1372        unsigned long flags;
1373
1374        if (!vha->hw->tgt.tgt_ops)
1375                return;
1376
1377        if (!tgt)
1378                return;
1379
1380        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1381        if (tgt->tgt_stop) {
1382                spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1383                return;
1384        }
1385        if (!sess->se_sess) {
1386                spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1387                return;
1388        }
1389
1390        if (max_gen - sess->generation < 0) {
1391                spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1392                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1393                    "Ignoring stale deletion request for se_sess %p / sess %p"
1394                    " for port %8phC, req_gen %d, sess_gen %d\n",
1395                    sess->se_sess, sess, sess->port_name, max_gen,
1396                    sess->generation);
1397                return;
1398        }
1399
1400        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1401
1402        sess->local = 1;
1403        qlt_schedule_sess_for_deletion(sess, false);
1404        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1405}
1406
1407static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1408{
1409        struct qla_hw_data *ha = tgt->ha;
1410        unsigned long flags;
1411        int res;
1412        /*
1413         * We need to protect against race, when tgt is freed before or
1414         * inside wake_up()
1415         */
1416        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1417        ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1418            "tgt %p, sess_count=%d\n",
1419            tgt, tgt->sess_count);
1420        res = (tgt->sess_count == 0);
1421        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1422
1423        return res;
1424}
1425
1426/* Called by tcm_qla2xxx configfs code */
1427int qlt_stop_phase1(struct qla_tgt *tgt)
1428{
1429        struct scsi_qla_host *vha = tgt->vha;
1430        struct qla_hw_data *ha = tgt->ha;
1431        unsigned long flags;
1432
1433        mutex_lock(&qla_tgt_mutex);
1434        if (!vha->fc_vport) {
1435                struct Scsi_Host *sh = vha->host;
1436                struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
1437                bool npiv_vports;
1438
1439                spin_lock_irqsave(sh->host_lock, flags);
1440                npiv_vports = (fc_host->npiv_vports_inuse);
1441                spin_unlock_irqrestore(sh->host_lock, flags);
1442
1443                if (npiv_vports) {
1444                        mutex_unlock(&qla_tgt_mutex);
1445                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
1446                            "NPIV is in use. Can not stop target\n");
1447                        return -EPERM;
1448                }
1449        }
1450        if (tgt->tgt_stop || tgt->tgt_stopped) {
1451                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1452                    "Already in tgt->tgt_stop or tgt_stopped state\n");
1453                mutex_unlock(&qla_tgt_mutex);
1454                return -EPERM;
1455        }
1456
1457        ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1458            vha->host_no, vha);
1459        /*
1460         * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1461         * Lock is needed, because we still can get an incoming packet.
1462         */
1463        mutex_lock(&vha->vha_tgt.tgt_mutex);
1464        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1465        tgt->tgt_stop = 1;
1466        qlt_clear_tgt_db(tgt);
1467        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1468        mutex_unlock(&vha->vha_tgt.tgt_mutex);
1469        mutex_unlock(&qla_tgt_mutex);
1470
1471        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1472            "Waiting for sess works (tgt %p)", tgt);
1473        spin_lock_irqsave(&tgt->sess_work_lock, flags);
1474        while (!list_empty(&tgt->sess_works_list)) {
1475                spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1476                flush_scheduled_work();
1477                spin_lock_irqsave(&tgt->sess_work_lock, flags);
1478        }
1479        spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1480
1481        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1482            "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1483
1484        wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1485
1486        /* Big hammer */
1487        if (!ha->flags.host_shutting_down &&
1488            (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1489                qlt_disable_vha(vha);
1490
1491        /* Wait for sessions to clear out (just in case) */
1492        wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1493        return 0;
1494}
1495EXPORT_SYMBOL(qlt_stop_phase1);
1496
1497/* Called by tcm_qla2xxx configfs code */
1498void qlt_stop_phase2(struct qla_tgt *tgt)
1499{
1500        scsi_qla_host_t *vha = tgt->vha;
1501
1502        if (tgt->tgt_stopped) {
1503                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1504                    "Already in tgt->tgt_stopped state\n");
1505                dump_stack();
1506                return;
1507        }
1508        if (!tgt->tgt_stop) {
1509                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1510                    "%s: phase1 stop is not completed\n", __func__);
1511                dump_stack();
1512                return;
1513        }
1514
1515        mutex_lock(&vha->vha_tgt.tgt_mutex);
1516        tgt->tgt_stop = 0;
1517        tgt->tgt_stopped = 1;
1518        mutex_unlock(&vha->vha_tgt.tgt_mutex);
1519
1520        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1521            tgt);
1522}
1523EXPORT_SYMBOL(qlt_stop_phase2);
1524
1525/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1526static void qlt_release(struct qla_tgt *tgt)
1527{
1528        scsi_qla_host_t *vha = tgt->vha;
1529        void *node;
1530        u64 key = 0;
1531        u16 i;
1532        struct qla_qpair_hint *h;
1533        struct qla_hw_data *ha = vha->hw;
1534
1535        if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
1536            !tgt->tgt_stopped)
1537                qlt_stop_phase1(tgt);
1538
1539        if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1540                qlt_stop_phase2(tgt);
1541
1542        for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1543                unsigned long flags;
1544
1545                h = &tgt->qphints[i];
1546                if (h->qpair) {
1547                        spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1548                        list_del(&h->hint_elem);
1549                        spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1550                        h->qpair = NULL;
1551                }
1552        }
1553        kfree(tgt->qphints);
1554        mutex_lock(&qla_tgt_mutex);
1555        list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1556        mutex_unlock(&qla_tgt_mutex);
1557
1558        btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1559                btree_remove64(&tgt->lun_qpair_map, key);
1560
1561        btree_destroy64(&tgt->lun_qpair_map);
1562
1563        if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
1564                ha->tgt.tgt_ops->remove_target(vha);
1565
1566        vha->vha_tgt.qla_tgt = NULL;
1567
1568        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1569            "Release of tgt %p finished\n", tgt);
1570
1571        kfree(tgt);
1572}
1573
1574/* ha->hardware_lock supposed to be held on entry */
1575static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1576        const void *param, unsigned int param_size)
1577{
1578        struct qla_tgt_sess_work_param *prm;
1579        unsigned long flags;
1580
1581        prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1582        if (!prm) {
1583                ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1584                    "qla_target(%d): Unable to create session "
1585                    "work, command will be refused", 0);
1586                return -ENOMEM;
1587        }
1588
1589        ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1590            "Scheduling work (type %d, prm %p)"
1591            " to find session for param %p (size %d, tgt %p)\n",
1592            type, prm, param, param_size, tgt);
1593
1594        prm->type = type;
1595        memcpy(&prm->tm_iocb, param, param_size);
1596
1597        spin_lock_irqsave(&tgt->sess_work_lock, flags);
1598        list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1599        spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1600
1601        schedule_work(&tgt->sess_work);
1602
1603        return 0;
1604}
1605
1606/*
1607 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1608 */
1609static void qlt_send_notify_ack(struct qla_qpair *qpair,
1610        struct imm_ntfy_from_isp *ntfy,
1611        uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1612        uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1613{
1614        struct scsi_qla_host *vha = qpair->vha;
1615        struct qla_hw_data *ha = vha->hw;
1616        request_t *pkt;
1617        struct nack_to_isp *nack;
1618
1619        if (!ha->flags.fw_started)
1620                return;
1621
1622        ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1623
1624        pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1625        if (!pkt) {
1626                ql_dbg(ql_dbg_tgt, vha, 0xe049,
1627                    "qla_target(%d): %s failed: unable to allocate "
1628                    "request packet\n", vha->vp_idx, __func__);
1629                return;
1630        }
1631
1632        if (vha->vha_tgt.qla_tgt != NULL)
1633                vha->vha_tgt.qla_tgt->notify_ack_expected++;
1634
1635        pkt->entry_type = NOTIFY_ACK_TYPE;
1636        pkt->entry_count = 1;
1637
1638        nack = (struct nack_to_isp *)pkt;
1639        nack->ox_id = ntfy->ox_id;
1640
1641        nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1642        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1643        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1644                nack->u.isp24.flags = ntfy->u.isp24.flags &
1645                        cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1646        }
1647        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1648        nack->u.isp24.status = ntfy->u.isp24.status;
1649        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1650        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1651        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1652        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1653        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1654        nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1655        nack->u.isp24.srr_reject_code = srr_reject_code;
1656        nack->u.isp24.srr_reject_code_expl = srr_explan;
1657        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1658
1659        ql_dbg(ql_dbg_tgt, vha, 0xe005,
1660            "qla_target(%d): Sending 24xx Notify Ack %d\n",
1661            vha->vp_idx, nack->u.isp24.status);
1662
1663        /* Memory Barrier */
1664        wmb();
1665        qla2x00_start_iocbs(vha, qpair->req);
1666}
1667
1668/*
1669 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1670 */
1671static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1672        struct abts_recv_from_24xx *abts, uint32_t status,
1673        bool ids_reversed)
1674{
1675        struct scsi_qla_host *vha = qpair->vha;
1676        struct qla_hw_data *ha = vha->hw;
1677        struct abts_resp_to_24xx *resp;
1678        uint32_t f_ctl;
1679        uint8_t *p;
1680
1681        ql_dbg(ql_dbg_tgt, vha, 0xe006,
1682            "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1683            ha, abts, status);
1684
1685        resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1686            NULL);
1687        if (!resp) {
1688                ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1689                    "qla_target(%d): %s failed: unable to allocate "
1690                    "request packet", vha->vp_idx, __func__);
1691                return;
1692        }
1693
1694        resp->entry_type = ABTS_RESP_24XX;
1695        resp->entry_count = 1;
1696        resp->nport_handle = abts->nport_handle;
1697        resp->vp_index = vha->vp_idx;
1698        resp->sof_type = abts->sof_type;
1699        resp->exchange_address = abts->exchange_address;
1700        resp->fcp_hdr_le = abts->fcp_hdr_le;
1701        f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1702            F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1703            F_CTL_SEQ_INITIATIVE);
1704        p = (uint8_t *)&f_ctl;
1705        resp->fcp_hdr_le.f_ctl[0] = *p++;
1706        resp->fcp_hdr_le.f_ctl[1] = *p++;
1707        resp->fcp_hdr_le.f_ctl[2] = *p;
1708        if (ids_reversed) {
1709                resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1710                resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1711                resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1712                resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1713                resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1714                resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1715        } else {
1716                resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1717                resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1718                resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1719                resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1720                resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1721                resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1722        }
1723        resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1724        if (status == FCP_TMF_CMPL) {
1725                resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1726                resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1727                resp->payload.ba_acct.low_seq_cnt = 0x0000;
1728                resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1729                resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1730                resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1731        } else {
1732                resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1733                resp->payload.ba_rjt.reason_code =
1734                        BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1735                /* Other bytes are zero */
1736        }
1737
1738        vha->vha_tgt.qla_tgt->abts_resp_expected++;
1739
1740        /* Memory Barrier */
1741        wmb();
1742        if (qpair->reqq_start_iocbs)
1743                qpair->reqq_start_iocbs(qpair);
1744        else
1745                qla2x00_start_iocbs(vha, qpair->req);
1746}
1747
1748/*
1749 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1750 */
1751static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1752        struct abts_resp_from_24xx_fw *entry)
1753{
1754        struct ctio7_to_24xx *ctio;
1755
1756        ql_dbg(ql_dbg_tgt, vha, 0xe007,
1757            "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1758
1759        ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
1760            vha->hw->base_qpair, NULL);
1761        if (ctio == NULL) {
1762                ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1763                    "qla_target(%d): %s failed: unable to allocate "
1764                    "request packet\n", vha->vp_idx, __func__);
1765                return;
1766        }
1767
1768        /*
1769         * We've got on entrance firmware's response on by us generated
1770         * ABTS response. So, in it ID fields are reversed.
1771         */
1772
1773        ctio->entry_type = CTIO_TYPE7;
1774        ctio->entry_count = 1;
1775        ctio->nport_handle = entry->nport_handle;
1776        ctio->handle = QLA_TGT_SKIP_HANDLE |    CTIO_COMPLETION_HANDLE_MARK;
1777        ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1778        ctio->vp_index = vha->vp_idx;
1779        ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1780        ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1781        ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1782        ctio->exchange_addr = entry->exchange_addr_to_abort;
1783        ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1784                                            CTIO7_FLAGS_TERMINATE);
1785        ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1786
1787        /* Memory Barrier */
1788        wmb();
1789        qla2x00_start_iocbs(vha, vha->req);
1790
1791        qlt_24xx_send_abts_resp(vha->hw->base_qpair,
1792            (struct abts_recv_from_24xx *)entry,
1793            FCP_TMF_CMPL, true);
1794}
1795
1796static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1797{
1798        struct qla_tgt_sess_op *op;
1799        struct qla_tgt_cmd *cmd;
1800        unsigned long flags;
1801
1802        spin_lock_irqsave(&vha->cmd_list_lock, flags);
1803        list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1804                if (tag == op->atio.u.isp24.exchange_addr) {
1805                        op->aborted = true;
1806                        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1807                        return 1;
1808                }
1809        }
1810
1811        list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1812                if (tag == op->atio.u.isp24.exchange_addr) {
1813                        op->aborted = true;
1814                        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1815                        return 1;
1816                }
1817        }
1818
1819        list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1820                if (tag == cmd->atio.u.isp24.exchange_addr) {
1821                        cmd->aborted = 1;
1822                        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1823                        return 1;
1824                }
1825        }
1826        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1827
1828        return 0;
1829}
1830
1831/* drop cmds for the given lun
1832 * XXX only looks for cmds on the port through which lun reset was recieved
1833 * XXX does not go through the list of other port (which may have cmds
1834 *     for the same lun)
1835 */
1836static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1837                                u64 lun, uint8_t *s_id)
1838{
1839        struct qla_tgt_sess_op *op;
1840        struct qla_tgt_cmd *cmd;
1841        uint32_t key;
1842        unsigned long flags;
1843
1844        key = sid_to_key(s_id);
1845        spin_lock_irqsave(&vha->cmd_list_lock, flags);
1846        list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1847                uint32_t op_key;
1848                u64 op_lun;
1849
1850                op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1851                op_lun = scsilun_to_int(
1852                        (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1853                if (op_key == key && op_lun == lun)
1854                        op->aborted = true;
1855        }
1856
1857        list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1858                uint32_t op_key;
1859                u64 op_lun;
1860
1861                op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1862                op_lun = scsilun_to_int(
1863                        (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1864                if (op_key == key && op_lun == lun)
1865                        op->aborted = true;
1866        }
1867
1868        list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1869                uint32_t cmd_key;
1870                u64 cmd_lun;
1871
1872                cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1873                cmd_lun = scsilun_to_int(
1874                        (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1875                if (cmd_key == key && cmd_lun == lun)
1876                        cmd->aborted = 1;
1877        }
1878        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1879}
1880
1881/* ha->hardware_lock supposed to be held on entry */
1882static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1883        struct abts_recv_from_24xx *abts, struct fc_port *sess)
1884{
1885        struct qla_hw_data *ha = vha->hw;
1886        struct qla_tgt_mgmt_cmd *mcmd;
1887        int rc;
1888
1889        if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1890                /* send TASK_ABORT response immediately */
1891                qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
1892                return 0;
1893        }
1894
1895        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1896            "qla_target(%d): task abort (tag=%d)\n",
1897            vha->vp_idx, abts->exchange_addr_to_abort);
1898
1899        mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1900        if (mcmd == NULL) {
1901                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1902                    "qla_target(%d): %s: Allocation of ABORT cmd failed",
1903                    vha->vp_idx, __func__);
1904                return -ENOMEM;
1905        }
1906        memset(mcmd, 0, sizeof(*mcmd));
1907
1908        mcmd->sess = sess;
1909        memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1910        mcmd->reset_count = ha->base_qpair->chip_reset;
1911        mcmd->tmr_func = QLA_TGT_ABTS;
1912        mcmd->qpair = ha->base_qpair;
1913        mcmd->vha = vha;
1914
1915        /*
1916         * LUN is looked up by target-core internally based on the passed
1917         * abts->exchange_addr_to_abort tag.
1918         */
1919        rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func,
1920            abts->exchange_addr_to_abort);
1921        if (rc != 0) {
1922                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1923                    "qla_target(%d):  tgt_ops->handle_tmr()"
1924                    " failed: %d", vha->vp_idx, rc);
1925                mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1926                return -EFAULT;
1927        }
1928
1929        return 0;
1930}
1931
1932/*
1933 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1934 */
1935static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1936        struct abts_recv_from_24xx *abts)
1937{
1938        struct qla_hw_data *ha = vha->hw;
1939        struct fc_port *sess;
1940        uint32_t tag = abts->exchange_addr_to_abort;
1941        uint8_t s_id[3];
1942        int rc;
1943        unsigned long flags;
1944
1945        if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1946                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1947                    "qla_target(%d): ABTS: Abort Sequence not "
1948                    "supported\n", vha->vp_idx);
1949                qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
1950                    false);
1951                return;
1952        }
1953
1954        if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1955                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1956                    "qla_target(%d): ABTS: Unknown Exchange "
1957                    "Address received\n", vha->vp_idx);
1958                qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
1959                    false);
1960                return;
1961        }
1962
1963        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1964            "qla_target(%d): task abort (s_id=%x:%x:%x, "
1965            "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1966            abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1967            le32_to_cpu(abts->fcp_hdr_le.parameter));
1968
1969        s_id[0] = abts->fcp_hdr_le.s_id[2];
1970        s_id[1] = abts->fcp_hdr_le.s_id[1];
1971        s_id[2] = abts->fcp_hdr_le.s_id[0];
1972
1973        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1974        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1975        if (!sess) {
1976                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1977                    "qla_target(%d): task abort for non-existant session\n",
1978                    vha->vp_idx);
1979                rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1980                    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1981
1982                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1983
1984                if (rc != 0) {
1985                        qlt_24xx_send_abts_resp(ha->base_qpair, abts,
1986                            FCP_TMF_REJECTED, false);
1987                }
1988                return;
1989        }
1990        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1991
1992
1993        if (sess->deleted) {
1994                qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
1995                    false);
1996                return;
1997        }
1998
1999        rc = __qlt_24xx_handle_abts(vha, abts, sess);
2000        if (rc != 0) {
2001                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2002                    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2003                    vha->vp_idx, rc);
2004                qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2005                    false);
2006                return;
2007        }
2008}
2009
2010/*
2011 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2012 */
2013static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2014        struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2015{
2016        struct scsi_qla_host *ha = mcmd->vha;
2017        struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2018        struct ctio7_to_24xx *ctio;
2019        uint16_t temp;
2020
2021        ql_dbg(ql_dbg_tgt, ha, 0xe008,
2022            "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2023            ha, atio, resp_code);
2024
2025
2026        ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2027        if (ctio == NULL) {
2028                ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2029                    "qla_target(%d): %s failed: unable to allocate "
2030                    "request packet\n", ha->vp_idx, __func__);
2031                return;
2032        }
2033
2034        ctio->entry_type = CTIO_TYPE7;
2035        ctio->entry_count = 1;
2036        ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2037        ctio->nport_handle = mcmd->sess->loop_id;
2038        ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2039        ctio->vp_index = ha->vp_idx;
2040        ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2041        ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2042        ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2043        ctio->exchange_addr = atio->u.isp24.exchange_addr;
2044        temp = (atio->u.isp24.attr << 9)|
2045                CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2046        ctio->u.status1.flags = cpu_to_le16(temp);
2047        temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2048        ctio->u.status1.ox_id = cpu_to_le16(temp);
2049        ctio->u.status1.scsi_status =
2050            cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2051        ctio->u.status1.response_len = cpu_to_le16(8);
2052        ctio->u.status1.sense_data[0] = resp_code;
2053
2054        /* Memory Barrier */
2055        wmb();
2056        if (qpair->reqq_start_iocbs)
2057                qpair->reqq_start_iocbs(qpair);
2058        else
2059                qla2x00_start_iocbs(ha, qpair->req);
2060}
2061
2062void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2063{
2064        mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2065}
2066EXPORT_SYMBOL(qlt_free_mcmd);
2067
2068/*
2069 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2070 * reacquire
2071 */
2072void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2073    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2074{
2075        struct atio_from_isp *atio = &cmd->atio;
2076        struct ctio7_to_24xx *ctio;
2077        uint16_t temp;
2078        struct scsi_qla_host *vha = cmd->vha;
2079
2080        ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2081            "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2082            "sense_key=%02x, asc=%02x, ascq=%02x",
2083            vha, atio, scsi_status, sense_key, asc, ascq);
2084
2085        ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2086        if (!ctio) {
2087                ql_dbg(ql_dbg_async, vha, 0x3067,
2088                    "qla2x00t(%ld): %s failed: unable to allocate request packet",
2089                    vha->host_no, __func__);
2090                goto out;
2091        }
2092
2093        ctio->entry_type = CTIO_TYPE7;
2094        ctio->entry_count = 1;
2095        ctio->handle = QLA_TGT_SKIP_HANDLE;
2096        ctio->nport_handle = cmd->sess->loop_id;
2097        ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2098        ctio->vp_index = vha->vp_idx;
2099        ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2100        ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2101        ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2102        ctio->exchange_addr = atio->u.isp24.exchange_addr;
2103        temp = (atio->u.isp24.attr << 9) |
2104            CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2105        ctio->u.status1.flags = cpu_to_le16(temp);
2106        temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2107        ctio->u.status1.ox_id = cpu_to_le16(temp);
2108        ctio->u.status1.scsi_status =
2109            cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2110        ctio->u.status1.response_len = cpu_to_le16(18);
2111        ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2112
2113        if (ctio->u.status1.residual != 0)
2114                ctio->u.status1.scsi_status |=
2115                    cpu_to_le16(SS_RESIDUAL_UNDER);
2116
2117        /* Response code and sense key */
2118        put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
2119            (&ctio->u.status1.sense_data)[0]);
2120        /* Additional sense length */
2121        put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
2122        /* ASC and ASCQ */
2123        put_unaligned_le32(((asc << 24) | (ascq << 16)),
2124            (&ctio->u.status1.sense_data)[3]);
2125
2126        /* Memory Barrier */
2127        wmb();
2128
2129        if (qpair->reqq_start_iocbs)
2130                qpair->reqq_start_iocbs(qpair);
2131        else
2132                qla2x00_start_iocbs(vha, qpair->req);
2133
2134out:
2135        return;
2136}
2137
2138/* callback from target fabric module code */
2139void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2140{
2141        struct scsi_qla_host *vha = mcmd->sess->vha;
2142        struct qla_hw_data *ha = vha->hw;
2143        unsigned long flags;
2144        struct qla_qpair *qpair = mcmd->qpair;
2145
2146        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2147            "TM response mcmd (%p) status %#x state %#x",
2148            mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2149
2150        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2151
2152        if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2153                /*
2154                 * Either the port is not online or this request was from
2155                 * previous life, just abort the processing.
2156                 */
2157                ql_dbg(ql_dbg_async, vha, 0xe100,
2158                        "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2159                        vha->flags.online, qla2x00_reset_active(vha),
2160                        mcmd->reset_count, qpair->chip_reset);
2161                ha->tgt.tgt_ops->free_mcmd(mcmd);
2162                spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2163                return;
2164        }
2165
2166        if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2167                if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2168                    ELS_LOGO ||
2169                    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2170                    ELS_PRLO ||
2171                    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2172                    ELS_TPRLO) {
2173                        ql_dbg(ql_dbg_disc, vha, 0x2106,
2174                            "TM response logo %phC status %#x state %#x",
2175                            mcmd->sess->port_name, mcmd->fc_tm_rsp,
2176                            mcmd->flags);
2177                        qlt_schedule_sess_for_deletion_lock(mcmd->sess);
2178                } else {
2179                        qlt_send_notify_ack(vha->hw->base_qpair,
2180                            &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2181                }
2182        } else {
2183                if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
2184                        qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
2185                            mcmd->fc_tm_rsp, false);
2186                else
2187                        qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2188                            mcmd->fc_tm_rsp);
2189        }
2190        /*
2191         * Make the callback for ->free_mcmd() to queue_work() and invoke
2192         * target_put_sess_cmd() to drop cmd_kref to 1.  The final
2193         * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2194         * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2195         * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2196         * qlt_xmit_tm_rsp() returns here..
2197         */
2198        ha->tgt.tgt_ops->free_mcmd(mcmd);
2199        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2200}
2201EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2202
2203/* No locks */
2204static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2205{
2206        struct qla_tgt_cmd *cmd = prm->cmd;
2207
2208        BUG_ON(cmd->sg_cnt == 0);
2209
2210        prm->sg = (struct scatterlist *)cmd->sg;
2211        prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
2212            cmd->sg_cnt, cmd->dma_data_direction);
2213        if (unlikely(prm->seg_cnt == 0))
2214                goto out_err;
2215
2216        prm->cmd->sg_mapped = 1;
2217
2218        if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2219                /*
2220                 * If greater than four sg entries then we need to allocate
2221                 * the continuation entries
2222                 */
2223                if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2224                        prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2225                        QLA_TGT_DATASEGS_PER_CMD_24XX,
2226                        QLA_TGT_DATASEGS_PER_CONT_24XX);
2227        } else {
2228                /* DIF */
2229                if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2230                    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2231                        prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2232                        prm->tot_dsds = prm->seg_cnt;
2233                } else
2234                        prm->tot_dsds = prm->seg_cnt;
2235
2236                if (cmd->prot_sg_cnt) {
2237                        prm->prot_sg      = cmd->prot_sg;
2238                        prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
2239                                cmd->prot_sg, cmd->prot_sg_cnt,
2240                                cmd->dma_data_direction);
2241                        if (unlikely(prm->prot_seg_cnt == 0))
2242                                goto out_err;
2243
2244                        if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2245                            (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2246                                /* Dif Bundling not support here */
2247                                prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2248                                                                cmd->blk_sz);
2249                                prm->tot_dsds += prm->prot_seg_cnt;
2250                        } else
2251                                prm->tot_dsds += prm->prot_seg_cnt;
2252                }
2253        }
2254
2255        return 0;
2256
2257out_err:
2258        ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2259            "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2260            0, prm->cmd->sg_cnt);
2261        return -1;
2262}
2263
2264static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2265{
2266        struct qla_hw_data *ha;
2267        struct qla_qpair *qpair;
2268        if (!cmd->sg_mapped)
2269                return;
2270
2271        qpair = cmd->qpair;
2272
2273        pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
2274            cmd->dma_data_direction);
2275        cmd->sg_mapped = 0;
2276
2277        if (cmd->prot_sg_cnt)
2278                pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
2279                        cmd->dma_data_direction);
2280
2281        if (!cmd->ctx)
2282                return;
2283        ha = vha->hw;
2284        if (cmd->ctx_dsd_alloced)
2285                qla2x00_clean_dsd_pool(ha, cmd->ctx);
2286
2287        dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2288}
2289
2290static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2291        uint32_t req_cnt)
2292{
2293        uint32_t cnt;
2294        struct req_que *req = qpair->req;
2295
2296        if (req->cnt < (req_cnt + 2)) {
2297                cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2298                    RD_REG_DWORD_RELAXED(req->req_q_out));
2299
2300                if  (req->ring_index < cnt)
2301                        req->cnt = cnt - req->ring_index;
2302                else
2303                        req->cnt = req->length - (req->ring_index - cnt);
2304
2305                if (unlikely(req->cnt < (req_cnt + 2)))
2306                        return -EAGAIN;
2307        }
2308
2309        req->cnt -= req_cnt;
2310
2311        return 0;
2312}
2313
2314/*
2315 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2316 */
2317static inline void *qlt_get_req_pkt(struct req_que *req)
2318{
2319        /* Adjust ring index. */
2320        req->ring_index++;
2321        if (req->ring_index == req->length) {
2322                req->ring_index = 0;
2323                req->ring_ptr = req->ring;
2324        } else {
2325                req->ring_ptr++;
2326        }
2327        return (cont_entry_t *)req->ring_ptr;
2328}
2329
2330/* ha->hardware_lock supposed to be held on entry */
2331static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2332{
2333        uint32_t h;
2334        int index;
2335        uint8_t found = 0;
2336        struct req_que *req = qpair->req;
2337
2338        h = req->current_outstanding_cmd;
2339
2340        for (index = 1; index < req->num_outstanding_cmds; index++) {
2341                h++;
2342                if (h == req->num_outstanding_cmds)
2343                        h = 1;
2344
2345                if (h == QLA_TGT_SKIP_HANDLE)
2346                        continue;
2347
2348                if (!req->outstanding_cmds[h]) {
2349                        found = 1;
2350                        break;
2351                }
2352        }
2353
2354        if (found) {
2355                req->current_outstanding_cmd = h;
2356        } else {
2357                ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2358                    "qla_target(%d): Ran out of empty cmd slots\n",
2359                    qpair->vha->vp_idx);
2360                h = QLA_TGT_NULL_HANDLE;
2361        }
2362
2363        return h;
2364}
2365
2366/* ha->hardware_lock supposed to be held on entry */
2367static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2368        struct qla_tgt_prm *prm)
2369{
2370        uint32_t h;
2371        struct ctio7_to_24xx *pkt;
2372        struct atio_from_isp *atio = &prm->cmd->atio;
2373        uint16_t temp;
2374
2375        pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2376        prm->pkt = pkt;
2377        memset(pkt, 0, sizeof(*pkt));
2378
2379        pkt->entry_type = CTIO_TYPE7;
2380        pkt->entry_count = (uint8_t)prm->req_cnt;
2381        pkt->vp_index = prm->cmd->vp_idx;
2382
2383        h = qlt_make_handle(qpair);
2384        if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2385                /*
2386                 * CTIO type 7 from the firmware doesn't provide a way to
2387                 * know the initiator's LOOP ID, hence we can't find
2388                 * the session and, so, the command.
2389                 */
2390                return -EAGAIN;
2391        } else
2392                qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2393
2394        pkt->handle = MAKE_HANDLE(qpair->req->id, h);
2395        pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2396        pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2397        pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2398        pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2399        pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2400        pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2401        pkt->exchange_addr = atio->u.isp24.exchange_addr;
2402        temp = atio->u.isp24.attr << 9;
2403        pkt->u.status0.flags |= cpu_to_le16(temp);
2404        temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2405        pkt->u.status0.ox_id = cpu_to_le16(temp);
2406        pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2407
2408        return 0;
2409}
2410
2411/*
2412 * ha->hardware_lock supposed to be held on entry. We have already made sure
2413 * that there is sufficient amount of request entries to not drop it.
2414 */
2415static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2416{
2417        int cnt;
2418        uint32_t *dword_ptr;
2419
2420        /* Build continuation packets */
2421        while (prm->seg_cnt > 0) {
2422                cont_a64_entry_t *cont_pkt64 =
2423                        (cont_a64_entry_t *)qlt_get_req_pkt(
2424                           prm->cmd->qpair->req);
2425
2426                /*
2427                 * Make sure that from cont_pkt64 none of
2428                 * 64-bit specific fields used for 32-bit
2429                 * addressing. Cast to (cont_entry_t *) for
2430                 * that.
2431                 */
2432
2433                memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2434
2435                cont_pkt64->entry_count = 1;
2436                cont_pkt64->sys_define = 0;
2437
2438                cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2439                dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
2440
2441                /* Load continuation entry data segments */
2442                for (cnt = 0;
2443                    cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2444                    cnt++, prm->seg_cnt--) {
2445                        *dword_ptr++ =
2446                            cpu_to_le32(pci_dma_lo32
2447                                (sg_dma_address(prm->sg)));
2448                        *dword_ptr++ = cpu_to_le32(pci_dma_hi32
2449                            (sg_dma_address(prm->sg)));
2450                        *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2451
2452                        prm->sg = sg_next(prm->sg);
2453                }
2454        }
2455}
2456
2457/*
2458 * ha->hardware_lock supposed to be held on entry. We have already made sure
2459 * that there is sufficient amount of request entries to not drop it.
2460 */
2461static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2462{
2463        int cnt;
2464        uint32_t *dword_ptr;
2465        struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2466
2467        pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2468
2469        /* Setup packet address segment pointer */
2470        dword_ptr = pkt24->u.status0.dseg_0_address;
2471
2472        /* Set total data segment count */
2473        if (prm->seg_cnt)
2474                pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2475
2476        if (prm->seg_cnt == 0) {
2477                /* No data transfer */
2478                *dword_ptr++ = 0;
2479                *dword_ptr = 0;
2480                return;
2481        }
2482
2483        /* If scatter gather */
2484
2485        /* Load command entry data segments */
2486        for (cnt = 0;
2487            (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2488            cnt++, prm->seg_cnt--) {
2489                *dword_ptr++ =
2490                    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
2491
2492                *dword_ptr++ = cpu_to_le32(pci_dma_hi32(
2493                        sg_dma_address(prm->sg)));
2494
2495                *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2496
2497                prm->sg = sg_next(prm->sg);
2498        }
2499
2500        qlt_load_cont_data_segments(prm);
2501}
2502
2503static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2504{
2505        return cmd->bufflen > 0;
2506}
2507
2508static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2509{
2510        struct qla_tgt_cmd *cmd;
2511        struct scsi_qla_host *vha;
2512
2513        /* asc 0x10=dif error */
2514        if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2515                cmd = prm->cmd;
2516                vha = cmd->vha;
2517                /* ASCQ */
2518                switch (prm->sense_buffer[13]) {
2519                case 1:
2520                        ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2521                            "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2522                            "se_cmd=%p tag[%x]",
2523                            cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2524                            cmd->atio.u.isp24.exchange_addr);
2525                        break;
2526                case 2:
2527                        ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2528                            "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2529                            "se_cmd=%p tag[%x]",
2530                            cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2531                            cmd->atio.u.isp24.exchange_addr);
2532                        break;
2533                case 3:
2534                        ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2535                            "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2536                            "se_cmd=%p tag[%x]",
2537                            cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2538                            cmd->atio.u.isp24.exchange_addr);
2539                        break;
2540                default:
2541                        ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2542                            "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2543                            "se_cmd=%p tag[%x]",
2544                            cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2545                            cmd->atio.u.isp24.exchange_addr);
2546                        break;
2547                }
2548                ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2549        }
2550}
2551
2552/*
2553 * Called without ha->hardware_lock held
2554 */
2555static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2556        struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2557        uint32_t *full_req_cnt)
2558{
2559        struct se_cmd *se_cmd = &cmd->se_cmd;
2560        struct qla_qpair *qpair = cmd->qpair;
2561
2562        prm->cmd = cmd;
2563        prm->tgt = cmd->tgt;
2564        prm->pkt = NULL;
2565        prm->rq_result = scsi_status;
2566        prm->sense_buffer = &cmd->sense_buffer[0];
2567        prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2568        prm->sg = NULL;
2569        prm->seg_cnt = -1;
2570        prm->req_cnt = 1;
2571        prm->residual = 0;
2572        prm->add_status_pkt = 0;
2573        prm->prot_sg = NULL;
2574        prm->prot_seg_cnt = 0;
2575        prm->tot_dsds = 0;
2576
2577        if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2578                if  (qlt_pci_map_calc_cnt(prm) != 0)
2579                        return -EAGAIN;
2580        }
2581
2582        *full_req_cnt = prm->req_cnt;
2583
2584        if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2585                prm->residual = se_cmd->residual_count;
2586                ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2587                    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2588                       prm->residual, se_cmd->tag,
2589                       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2590                       cmd->bufflen, prm->rq_result);
2591                prm->rq_result |= SS_RESIDUAL_UNDER;
2592        } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2593                prm->residual = se_cmd->residual_count;
2594                ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2595                    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2596                       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2597                       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2598                prm->rq_result |= SS_RESIDUAL_OVER;
2599        }
2600
2601        if (xmit_type & QLA_TGT_XMIT_STATUS) {
2602                /*
2603                 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2604                 * ignored in *xmit_response() below
2605                 */
2606                if (qlt_has_data(cmd)) {
2607                        if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2608                            (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2609                            (prm->rq_result != 0))) {
2610                                prm->add_status_pkt = 1;
2611                                (*full_req_cnt)++;
2612                        }
2613                }
2614        }
2615
2616        return 0;
2617}
2618
2619static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2620    int sending_sense)
2621{
2622        if (cmd->qpair->enable_class_2)
2623                return 0;
2624
2625        if (sending_sense)
2626                return cmd->conf_compl_supported;
2627        else
2628                return cmd->qpair->enable_explicit_conf &&
2629                    cmd->conf_compl_supported;
2630}
2631
2632static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2633        struct qla_tgt_prm *prm)
2634{
2635        prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2636            (uint32_t)sizeof(ctio->u.status1.sense_data));
2637        ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2638        if (qlt_need_explicit_conf(prm->cmd, 0)) {
2639                ctio->u.status0.flags |= cpu_to_le16(
2640                    CTIO7_FLAGS_EXPLICIT_CONFORM |
2641                    CTIO7_FLAGS_CONFORM_REQ);
2642        }
2643        ctio->u.status0.residual = cpu_to_le32(prm->residual);
2644        ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2645        if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2646                int i;
2647
2648                if (qlt_need_explicit_conf(prm->cmd, 1)) {
2649                        if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2650                                ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2651                                    "Skipping EXPLICIT_CONFORM and "
2652                                    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2653                                    "non GOOD status\n");
2654                                goto skip_explict_conf;
2655                        }
2656                        ctio->u.status1.flags |= cpu_to_le16(
2657                            CTIO7_FLAGS_EXPLICIT_CONFORM |
2658                            CTIO7_FLAGS_CONFORM_REQ);
2659                }
2660skip_explict_conf:
2661                ctio->u.status1.flags &=
2662                    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2663                ctio->u.status1.flags |=
2664                    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2665                ctio->u.status1.scsi_status |=
2666                    cpu_to_le16(SS_SENSE_LEN_VALID);
2667                ctio->u.status1.sense_length =
2668                    cpu_to_le16(prm->sense_buffer_len);
2669                for (i = 0; i < prm->sense_buffer_len/4; i++)
2670                        ((uint32_t *)ctio->u.status1.sense_data)[i] =
2671                                cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2672
2673                qlt_print_dif_err(prm);
2674
2675        } else {
2676                ctio->u.status1.flags &=
2677                    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2678                ctio->u.status1.flags |=
2679                    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2680                ctio->u.status1.sense_length = 0;
2681                memset(ctio->u.status1.sense_data, 0,
2682                    sizeof(ctio->u.status1.sense_data));
2683        }
2684
2685        /* Sense with len > 24, is it possible ??? */
2686}
2687
2688static inline int
2689qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2690{
2691        switch (se_cmd->prot_op) {
2692        case TARGET_PROT_DOUT_INSERT:
2693        case TARGET_PROT_DIN_STRIP:
2694                if (ql2xenablehba_err_chk >= 1)
2695                        return 1;
2696                break;
2697        case TARGET_PROT_DOUT_PASS:
2698        case TARGET_PROT_DIN_PASS:
2699                if (ql2xenablehba_err_chk >= 2)
2700                        return 1;
2701                break;
2702        case TARGET_PROT_DIN_INSERT:
2703        case TARGET_PROT_DOUT_STRIP:
2704                return 1;
2705        default:
2706                break;
2707        }
2708        return 0;
2709}
2710
2711static inline int
2712qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2713{
2714        switch (se_cmd->prot_op) {
2715        case TARGET_PROT_DIN_INSERT:
2716        case TARGET_PROT_DOUT_INSERT:
2717        case TARGET_PROT_DIN_STRIP:
2718        case TARGET_PROT_DOUT_STRIP:
2719        case TARGET_PROT_DIN_PASS:
2720        case TARGET_PROT_DOUT_PASS:
2721            return 1;
2722        default:
2723            return 0;
2724        }
2725        return 0;
2726}
2727
2728/*
2729 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2730 */
2731static void
2732qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2733    uint16_t *pfw_prot_opts)
2734{
2735        struct se_cmd *se_cmd = &cmd->se_cmd;
2736        uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2737        scsi_qla_host_t *vha = cmd->tgt->vha;
2738        struct qla_hw_data *ha = vha->hw;
2739        uint32_t t32 = 0;
2740
2741        /*
2742         * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2743         * have been immplemented by TCM, before AppTag is avail.
2744         * Look for modesense_handlers[]
2745         */
2746        ctx->app_tag = 0;
2747        ctx->app_tag_mask[0] = 0x0;
2748        ctx->app_tag_mask[1] = 0x0;
2749
2750        if (IS_PI_UNINIT_CAPABLE(ha)) {
2751                if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2752                    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2753                        *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2754                else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2755                        *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2756        }
2757
2758        t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2759
2760        switch (se_cmd->prot_type) {
2761        case TARGET_DIF_TYPE0_PROT:
2762                /*
2763                 * No check for ql2xenablehba_err_chk, as it
2764                 * would be an I/O error if hba tag generation
2765                 * is not done.
2766                 */
2767                ctx->ref_tag = cpu_to_le32(lba);
2768                /* enable ALL bytes of the ref tag */
2769                ctx->ref_tag_mask[0] = 0xff;
2770                ctx->ref_tag_mask[1] = 0xff;
2771                ctx->ref_tag_mask[2] = 0xff;
2772                ctx->ref_tag_mask[3] = 0xff;
2773                break;
2774        case TARGET_DIF_TYPE1_PROT:
2775            /*
2776             * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2777             * REF tag, and 16 bit app tag.
2778             */
2779            ctx->ref_tag = cpu_to_le32(lba);
2780            if (!qla_tgt_ref_mask_check(se_cmd) ||
2781                !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2782                    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2783                    break;
2784            }
2785            /* enable ALL bytes of the ref tag */
2786            ctx->ref_tag_mask[0] = 0xff;
2787            ctx->ref_tag_mask[1] = 0xff;
2788            ctx->ref_tag_mask[2] = 0xff;
2789            ctx->ref_tag_mask[3] = 0xff;
2790            break;
2791        case TARGET_DIF_TYPE2_PROT:
2792            /*
2793             * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2794             * tag has to match LBA in CDB + N
2795             */
2796            ctx->ref_tag = cpu_to_le32(lba);
2797            if (!qla_tgt_ref_mask_check(se_cmd) ||
2798                !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2799                    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2800                    break;
2801            }
2802            /* enable ALL bytes of the ref tag */
2803            ctx->ref_tag_mask[0] = 0xff;
2804            ctx->ref_tag_mask[1] = 0xff;
2805            ctx->ref_tag_mask[2] = 0xff;
2806            ctx->ref_tag_mask[3] = 0xff;
2807            break;
2808        case TARGET_DIF_TYPE3_PROT:
2809            /* For TYPE 3 protection: 16 bit GUARD only */
2810            *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2811            ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2812                ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2813            break;
2814        }
2815}
2816
2817static inline int
2818qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
2819{
2820        uint32_t                *cur_dsd;
2821        uint32_t                transfer_length = 0;
2822        uint32_t                data_bytes;
2823        uint32_t                dif_bytes;
2824        uint8_t                 bundling = 1;
2825        uint8_t                 *clr_ptr;
2826        struct crc_context      *crc_ctx_pkt = NULL;
2827        struct qla_hw_data      *ha;
2828        struct ctio_crc2_to_fw  *pkt;
2829        dma_addr_t              crc_ctx_dma;
2830        uint16_t                fw_prot_opts = 0;
2831        struct qla_tgt_cmd      *cmd = prm->cmd;
2832        struct se_cmd           *se_cmd = &cmd->se_cmd;
2833        uint32_t h;
2834        struct atio_from_isp *atio = &prm->cmd->atio;
2835        struct qla_tc_param     tc;
2836        uint16_t t16;
2837        scsi_qla_host_t *vha = cmd->vha;
2838
2839        ha = vha->hw;
2840
2841        pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
2842        prm->pkt = pkt;
2843        memset(pkt, 0, sizeof(*pkt));
2844
2845        ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
2846                "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2847                cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2848                prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2849
2850        if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2851            (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2852                bundling = 0;
2853
2854        /* Compute dif len and adjust data len to incude protection */
2855        data_bytes = cmd->bufflen;
2856        dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
2857
2858        switch (se_cmd->prot_op) {
2859        case TARGET_PROT_DIN_INSERT:
2860        case TARGET_PROT_DOUT_STRIP:
2861                transfer_length = data_bytes;
2862                if (cmd->prot_sg_cnt)
2863                        data_bytes += dif_bytes;
2864                break;
2865        case TARGET_PROT_DIN_STRIP:
2866        case TARGET_PROT_DOUT_INSERT:
2867        case TARGET_PROT_DIN_PASS:
2868        case TARGET_PROT_DOUT_PASS:
2869                transfer_length = data_bytes + dif_bytes;
2870                break;
2871        default:
2872                BUG();
2873                break;
2874        }
2875
2876        if (!qlt_hba_err_chk_enabled(se_cmd))
2877                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2878        /* HBA error checking enabled */
2879        else if (IS_PI_UNINIT_CAPABLE(ha)) {
2880                if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2881                    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2882                        fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2883                else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2884                        fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2885        }
2886
2887        switch (se_cmd->prot_op) {
2888        case TARGET_PROT_DIN_INSERT:
2889        case TARGET_PROT_DOUT_INSERT:
2890                fw_prot_opts |= PO_MODE_DIF_INSERT;
2891                break;
2892        case TARGET_PROT_DIN_STRIP:
2893        case TARGET_PROT_DOUT_STRIP:
2894                fw_prot_opts |= PO_MODE_DIF_REMOVE;
2895                break;
2896        case TARGET_PROT_DIN_PASS:
2897        case TARGET_PROT_DOUT_PASS:
2898                fw_prot_opts |= PO_MODE_DIF_PASS;
2899                /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2900                break;
2901        default:/* Normal Request */
2902                fw_prot_opts |= PO_MODE_DIF_PASS;
2903                break;
2904        }
2905
2906        /* ---- PKT ---- */
2907        /* Update entry type to indicate Command Type CRC_2 IOCB */
2908        pkt->entry_type  = CTIO_CRC2;
2909        pkt->entry_count = 1;
2910        pkt->vp_index = cmd->vp_idx;
2911
2912        h = qlt_make_handle(qpair);
2913        if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2914                /*
2915                 * CTIO type 7 from the firmware doesn't provide a way to
2916                 * know the initiator's LOOP ID, hence we can't find
2917                 * the session and, so, the command.
2918                 */
2919                return -EAGAIN;
2920        } else
2921                qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2922
2923        pkt->handle  = MAKE_HANDLE(qpair->req->id, h);
2924        pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2925        pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2926        pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2927        pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2928        pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2929        pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2930        pkt->exchange_addr   = atio->u.isp24.exchange_addr;
2931
2932        /* silence compile warning */
2933        t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2934        pkt->ox_id  = cpu_to_le16(t16);
2935
2936        t16 = (atio->u.isp24.attr << 9);
2937        pkt->flags |= cpu_to_le16(t16);
2938        pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2939
2940        /* Set transfer direction */
2941        if (cmd->dma_data_direction == DMA_TO_DEVICE)
2942                pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2943        else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2944                pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2945
2946        pkt->dseg_count = prm->tot_dsds;
2947        /* Fibre channel byte count */
2948        pkt->transfer_length = cpu_to_le32(transfer_length);
2949
2950        /* ----- CRC context -------- */
2951
2952        /* Allocate CRC context from global pool */
2953        crc_ctx_pkt = cmd->ctx =
2954            dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2955
2956        if (!crc_ctx_pkt)
2957                goto crc_queuing_error;
2958
2959        /* Zero out CTX area. */
2960        clr_ptr = (uint8_t *)crc_ctx_pkt;
2961        memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2962
2963        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2964        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2965
2966        /* Set handle */
2967        crc_ctx_pkt->handle = pkt->handle;
2968
2969        qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
2970
2971        pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2972        pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2973        pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2974
2975        if (!bundling) {
2976                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2977        } else {
2978                /*
2979                 * Configure Bundling if we need to fetch interlaving
2980                 * protection PCI accesses
2981                 */
2982                fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2983                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2984                crc_ctx_pkt->u.bundling.dseg_count =
2985                        cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2986                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2987        }
2988
2989        /* Finish the common fields of CRC pkt */
2990        crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
2991        crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
2992        crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2993        crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2994
2995        memset((uint8_t *)&tc, 0 , sizeof(tc));
2996        tc.vha = vha;
2997        tc.blk_sz = cmd->blk_sz;
2998        tc.bufflen = cmd->bufflen;
2999        tc.sg = cmd->sg;
3000        tc.prot_sg = cmd->prot_sg;
3001        tc.ctx = crc_ctx_pkt;
3002        tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3003
3004        /* Walks data segments */
3005        pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3006
3007        if (!bundling && prm->prot_seg_cnt) {
3008                if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3009                        prm->tot_dsds, &tc))
3010                        goto crc_queuing_error;
3011        } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3012                (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3013                goto crc_queuing_error;
3014
3015        if (bundling && prm->prot_seg_cnt) {
3016                /* Walks dif segments */
3017                pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3018
3019                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
3020                if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3021                        prm->prot_seg_cnt, &tc))
3022                        goto crc_queuing_error;
3023        }
3024        return QLA_SUCCESS;
3025
3026crc_queuing_error:
3027        /* Cleanup will be performed by the caller */
3028        qpair->req->outstanding_cmds[h] = NULL;
3029
3030        return QLA_FUNCTION_FAILED;
3031}
3032
3033/*
3034 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3035 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3036 */
3037int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3038        uint8_t scsi_status)
3039{
3040        struct scsi_qla_host *vha = cmd->vha;
3041        struct qla_qpair *qpair = cmd->qpair;
3042        struct ctio7_to_24xx *pkt;
3043        struct qla_tgt_prm prm;
3044        uint32_t full_req_cnt = 0;
3045        unsigned long flags = 0;
3046        int res;
3047
3048        if (cmd->sess && cmd->sess->deleted) {
3049                cmd->state = QLA_TGT_STATE_PROCESSED;
3050                if (cmd->sess->logout_completed)
3051                        /* no need to terminate. FW already freed exchange. */
3052                        qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3053                else
3054                        qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
3055                return 0;
3056        }
3057
3058        ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3059            "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3060            (xmit_type & QLA_TGT_XMIT_STATUS) ?
3061            1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3062            &cmd->se_cmd, qpair->id);
3063
3064        res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3065            &full_req_cnt);
3066        if (unlikely(res != 0)) {
3067                return res;
3068        }
3069
3070        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3071
3072        if (xmit_type == QLA_TGT_XMIT_STATUS)
3073                qpair->tgt_counters.core_qla_snd_status++;
3074        else
3075                qpair->tgt_counters.core_qla_que_buf++;
3076
3077        if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3078                /*
3079                 * Either the port is not online or this request was from
3080                 * previous life, just abort the processing.
3081                 */
3082                cmd->state = QLA_TGT_STATE_PROCESSED;
3083                qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3084                ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3085                        "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3086                        vha->flags.online, qla2x00_reset_active(vha),
3087                        cmd->reset_count, qpair->chip_reset);
3088                spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3089                return 0;
3090        }
3091
3092        /* Does F/W have an IOCBs for this request */
3093        res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3094        if (unlikely(res))
3095                goto out_unmap_unlock;
3096
3097        if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3098                res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3099        else
3100                res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3101        if (unlikely(res != 0)) {
3102                qpair->req->cnt += full_req_cnt;
3103                goto out_unmap_unlock;
3104        }
3105
3106        pkt = (struct ctio7_to_24xx *)prm.pkt;
3107
3108        if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3109                pkt->u.status0.flags |=
3110                    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3111                        CTIO7_FLAGS_STATUS_MODE_0);
3112
3113                if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3114                        qlt_load_data_segments(&prm);
3115
3116                if (prm.add_status_pkt == 0) {
3117                        if (xmit_type & QLA_TGT_XMIT_STATUS) {
3118                                pkt->u.status0.scsi_status =
3119                                    cpu_to_le16(prm.rq_result);
3120                                pkt->u.status0.residual =
3121                                    cpu_to_le32(prm.residual);
3122                                pkt->u.status0.flags |= cpu_to_le16(
3123                                    CTIO7_FLAGS_SEND_STATUS);
3124                                if (qlt_need_explicit_conf(cmd, 0)) {
3125                                        pkt->u.status0.flags |=
3126                                            cpu_to_le16(
3127                                                CTIO7_FLAGS_EXPLICIT_CONFORM |
3128                                                CTIO7_FLAGS_CONFORM_REQ);
3129                                }
3130                        }
3131
3132                } else {
3133                        /*
3134                         * We have already made sure that there is sufficient
3135                         * amount of request entries to not drop HW lock in
3136                         * req_pkt().
3137                         */
3138                        struct ctio7_to_24xx *ctio =
3139                                (struct ctio7_to_24xx *)qlt_get_req_pkt(
3140                                    qpair->req);
3141
3142                        ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3143                            "Building additional status packet 0x%p.\n",
3144                            ctio);
3145
3146                        /*
3147                         * T10Dif: ctio_crc2_to_fw overlay ontop of
3148                         * ctio7_to_24xx
3149                         */
3150                        memcpy(ctio, pkt, sizeof(*ctio));
3151                        /* reset back to CTIO7 */
3152                        ctio->entry_count = 1;
3153                        ctio->entry_type = CTIO_TYPE7;
3154                        ctio->dseg_count = 0;
3155                        ctio->u.status1.flags &= ~cpu_to_le16(
3156                            CTIO7_FLAGS_DATA_IN);
3157
3158                        /* Real finish is ctio_m1's finish */
3159                        pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3160                        pkt->u.status0.flags |= cpu_to_le16(
3161                            CTIO7_FLAGS_DONT_RET_CTIO);
3162
3163                        /* qlt_24xx_init_ctio_to_isp will correct
3164                         * all neccessary fields that's part of CTIO7.
3165                         * There should be no residual of CTIO-CRC2 data.
3166                         */
3167                        qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3168                            &prm);
3169                }
3170        } else
3171                qlt_24xx_init_ctio_to_isp(pkt, &prm);
3172
3173
3174        cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3175        cmd->cmd_sent_to_fw = 1;
3176
3177        /* Memory Barrier */
3178        wmb();
3179        if (qpair->reqq_start_iocbs)
3180                qpair->reqq_start_iocbs(qpair);
3181        else
3182                qla2x00_start_iocbs(vha, qpair->req);
3183        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3184
3185        return 0;
3186
3187out_unmap_unlock:
3188        qlt_unmap_sg(vha, cmd);
3189        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3190
3191        return res;
3192}
3193EXPORT_SYMBOL(qlt_xmit_response);
3194
3195int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3196{
3197        struct ctio7_to_24xx *pkt;
3198        struct scsi_qla_host *vha = cmd->vha;
3199        struct qla_tgt *tgt = cmd->tgt;
3200        struct qla_tgt_prm prm;
3201        unsigned long flags = 0;
3202        int res = 0;
3203        struct qla_qpair *qpair = cmd->qpair;
3204
3205        memset(&prm, 0, sizeof(prm));
3206        prm.cmd = cmd;
3207        prm.tgt = tgt;
3208        prm.sg = NULL;
3209        prm.req_cnt = 1;
3210
3211        /* Calculate number of entries and segments required */
3212        if (qlt_pci_map_calc_cnt(&prm) != 0)
3213                return -EAGAIN;
3214
3215        if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3216            (cmd->sess && cmd->sess->deleted)) {
3217                /*
3218                 * Either the port is not online or this request was from
3219                 * previous life, just abort the processing.
3220                 */
3221                cmd->state = QLA_TGT_STATE_NEED_DATA;
3222                qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
3223                ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3224                        "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3225                        vha->flags.online, qla2x00_reset_active(vha),
3226                        cmd->reset_count, qpair->chip_reset);
3227                return 0;
3228        }
3229
3230        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3231        /* Does F/W have an IOCBs for this request */
3232        res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3233        if (res != 0)
3234                goto out_unlock_free_unmap;
3235        if (cmd->se_cmd.prot_op)
3236                res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3237        else
3238                res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3239
3240        if (unlikely(res != 0)) {
3241                qpair->req->cnt += prm.req_cnt;
3242                goto out_unlock_free_unmap;
3243        }
3244
3245        pkt = (struct ctio7_to_24xx *)prm.pkt;
3246        pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3247            CTIO7_FLAGS_STATUS_MODE_0);
3248
3249        if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3250                qlt_load_data_segments(&prm);
3251
3252        cmd->state = QLA_TGT_STATE_NEED_DATA;
3253        cmd->cmd_sent_to_fw = 1;
3254
3255        /* Memory Barrier */
3256        wmb();
3257        if (qpair->reqq_start_iocbs)
3258                qpair->reqq_start_iocbs(qpair);
3259        else
3260                qla2x00_start_iocbs(vha, qpair->req);
3261        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3262
3263        return res;
3264
3265out_unlock_free_unmap:
3266        qlt_unmap_sg(vha, cmd);
3267        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3268
3269        return res;
3270}
3271EXPORT_SYMBOL(qlt_rdy_to_xfer);
3272
3273
3274/*
3275 * it is assumed either hardware_lock or qpair lock is held.
3276 */
3277static void
3278qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3279        struct ctio_crc_from_fw *sts)
3280{
3281        uint8_t         *ap = &sts->actual_dif[0];
3282        uint8_t         *ep = &sts->expected_dif[0];
3283        uint64_t        lba = cmd->se_cmd.t_task_lba;
3284        uint8_t scsi_status, sense_key, asc, ascq;
3285        unsigned long flags;
3286        struct scsi_qla_host *vha = cmd->vha;
3287
3288        cmd->trc_flags |= TRC_DIF_ERR;
3289
3290        cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
3291        cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
3292        cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
3293
3294        cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
3295        cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
3296        cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
3297
3298        ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3299            "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3300
3301        scsi_status = sense_key = asc = ascq = 0;
3302
3303        /* check appl tag */
3304        if (cmd->e_app_tag != cmd->a_app_tag) {
3305                ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3306                    "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3307                    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3308                    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3309                    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3310                    cmd->atio.u.isp24.fcp_hdr.ox_id);
3311
3312                cmd->dif_err_code = DIF_ERR_APP;
3313                scsi_status = SAM_STAT_CHECK_CONDITION;
3314                sense_key = ABORTED_COMMAND;
3315                asc = 0x10;
3316                ascq = 0x2;
3317        }
3318
3319        /* check ref tag */
3320        if (cmd->e_ref_tag != cmd->a_ref_tag) {
3321                ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3322                    "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3323                    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3324                    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3325                    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3326                    cmd->atio.u.isp24.fcp_hdr.ox_id);
3327
3328                cmd->dif_err_code = DIF_ERR_REF;
3329                scsi_status = SAM_STAT_CHECK_CONDITION;
3330                sense_key = ABORTED_COMMAND;
3331                asc = 0x10;
3332                ascq = 0x3;
3333                goto out;
3334        }
3335
3336        /* check guard */
3337        if (cmd->e_guard != cmd->a_guard) {
3338                ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3339                    "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3340                    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3341                    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3342                    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3343                    cmd->atio.u.isp24.fcp_hdr.ox_id);
3344
3345                cmd->dif_err_code = DIF_ERR_GRD;
3346                scsi_status = SAM_STAT_CHECK_CONDITION;
3347                sense_key = ABORTED_COMMAND;
3348                asc = 0x10;
3349                ascq = 0x1;
3350        }
3351out:
3352        switch (cmd->state) {
3353        case QLA_TGT_STATE_NEED_DATA:
3354                /* handle_data will load DIF error code  */
3355                cmd->state = QLA_TGT_STATE_DATA_IN;
3356                vha->hw->tgt.tgt_ops->handle_data(cmd);
3357                break;
3358        default:
3359                spin_lock_irqsave(&cmd->cmd_lock, flags);
3360                if (cmd->aborted) {
3361                        spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3362                        vha->hw->tgt.tgt_ops->free_cmd(cmd);
3363                        break;
3364                }
3365                spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3366
3367                qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3368                    ascq);
3369                /* assume scsi status gets out on the wire.
3370                 * Will not wait for completion.
3371                 */
3372                vha->hw->tgt.tgt_ops->free_cmd(cmd);
3373                break;
3374        }
3375}
3376
3377/* If hardware_lock held on entry, might drop it, then reaquire */
3378/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3379static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3380        struct imm_ntfy_from_isp *ntfy)
3381{
3382        struct nack_to_isp *nack;
3383        struct qla_hw_data *ha = vha->hw;
3384        request_t *pkt;
3385        int ret = 0;
3386
3387        ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3388            "Sending TERM ELS CTIO (ha=%p)\n", ha);
3389
3390        pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3391        if (pkt == NULL) {
3392                ql_dbg(ql_dbg_tgt, vha, 0xe080,
3393                    "qla_target(%d): %s failed: unable to allocate "
3394                    "request packet\n", vha->vp_idx, __func__);
3395                return -ENOMEM;
3396        }
3397
3398        pkt->entry_type = NOTIFY_ACK_TYPE;
3399        pkt->entry_count = 1;
3400        pkt->handle = QLA_TGT_SKIP_HANDLE;
3401
3402        nack = (struct nack_to_isp *)pkt;
3403        nack->ox_id = ntfy->ox_id;
3404
3405        nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3406        if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3407                nack->u.isp24.flags = ntfy->u.isp24.flags &
3408                        __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3409        }
3410
3411        /* terminate */
3412        nack->u.isp24.flags |=
3413                __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3414
3415        nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3416        nack->u.isp24.status = ntfy->u.isp24.status;
3417        nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3418        nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3419        nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3420        nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3421        nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3422        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3423
3424        qla2x00_start_iocbs(vha, vha->req);
3425        return ret;
3426}
3427
3428static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3429        struct imm_ntfy_from_isp *imm, int ha_locked)
3430{
3431        unsigned long flags = 0;
3432        int rc;
3433
3434        if (ha_locked) {
3435                rc = __qlt_send_term_imm_notif(vha, imm);
3436
3437#if 0   /* Todo  */
3438                if (rc == -ENOMEM)
3439                        qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3440#else
3441                if (rc) {
3442                }
3443#endif
3444                goto done;
3445        }
3446
3447        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3448        rc = __qlt_send_term_imm_notif(vha, imm);
3449
3450#if 0   /* Todo */
3451        if (rc == -ENOMEM)
3452                qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3453#endif
3454
3455done:
3456        if (!ha_locked)
3457                spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3458}
3459
3460/*
3461 * If hardware_lock held on entry, might drop it, then reaquire
3462 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3463 */
3464static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3465        struct qla_tgt_cmd *cmd,
3466        struct atio_from_isp *atio)
3467{
3468        struct scsi_qla_host *vha = qpair->vha;
3469        struct ctio7_to_24xx *ctio24;
3470        struct qla_hw_data *ha = vha->hw;
3471        request_t *pkt;
3472        int ret = 0;
3473        uint16_t temp;
3474
3475        ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3476
3477        if (cmd)
3478                vha = cmd->vha;
3479
3480        pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3481        if (pkt == NULL) {
3482                ql_dbg(ql_dbg_tgt, vha, 0xe050,
3483                    "qla_target(%d): %s failed: unable to allocate "
3484                    "request packet\n", vha->vp_idx, __func__);
3485                return -ENOMEM;
3486        }
3487
3488        if (cmd != NULL) {
3489                if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3490                        ql_dbg(ql_dbg_tgt, vha, 0xe051,
3491                            "qla_target(%d): Terminating cmd %p with "
3492                            "incorrect state %d\n", vha->vp_idx, cmd,
3493                            cmd->state);
3494                } else
3495                        ret = 1;
3496        }
3497
3498        qpair->tgt_counters.num_term_xchg_sent++;
3499        pkt->entry_count = 1;
3500        pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3501
3502        ctio24 = (struct ctio7_to_24xx *)pkt;
3503        ctio24->entry_type = CTIO_TYPE7;
3504        ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3505        ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3506        ctio24->vp_index = vha->vp_idx;
3507        ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3508        ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3509        ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3510        ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3511        temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3512                CTIO7_FLAGS_TERMINATE;
3513        ctio24->u.status1.flags = cpu_to_le16(temp);
3514        temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3515        ctio24->u.status1.ox_id = cpu_to_le16(temp);
3516
3517        /* Most likely, it isn't needed */
3518        ctio24->u.status1.residual = get_unaligned((uint32_t *)
3519            &atio->u.isp24.fcp_cmnd.add_cdb[
3520            atio->u.isp24.fcp_cmnd.add_cdb_len]);
3521        if (ctio24->u.status1.residual != 0)
3522                ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3523
3524        /* Memory Barrier */
3525        wmb();
3526        if (qpair->reqq_start_iocbs)
3527                qpair->reqq_start_iocbs(qpair);
3528        else
3529                qla2x00_start_iocbs(vha, qpair->req);
3530        return ret;
3531}
3532
3533static void qlt_send_term_exchange(struct qla_qpair *qpair,
3534        struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3535        int ul_abort)
3536{
3537        struct scsi_qla_host *vha;
3538        unsigned long flags = 0;
3539        int rc;
3540
3541        /* why use different vha? NPIV */
3542        if (cmd)
3543                vha = cmd->vha;
3544        else
3545                vha = qpair->vha;
3546
3547        if (ha_locked) {
3548                rc = __qlt_send_term_exchange(qpair, cmd, atio);
3549                if (rc == -ENOMEM)
3550                        qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3551                goto done;
3552        }
3553        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3554        rc = __qlt_send_term_exchange(qpair, cmd, atio);
3555        if (rc == -ENOMEM)
3556                qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3557
3558done:
3559        if (cmd && !ul_abort && !cmd->aborted) {
3560                if (cmd->sg_mapped)
3561                        qlt_unmap_sg(vha, cmd);
3562                vha->hw->tgt.tgt_ops->free_cmd(cmd);
3563        }
3564
3565        if (!ha_locked)
3566                spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3567
3568        return;
3569}
3570
3571static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3572{
3573        struct list_head free_list;
3574        struct qla_tgt_cmd *cmd, *tcmd;
3575
3576        vha->hw->tgt.leak_exchg_thresh_hold =
3577            (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3578
3579        cmd = tcmd = NULL;
3580        if (!list_empty(&vha->hw->tgt.q_full_list)) {
3581                INIT_LIST_HEAD(&free_list);
3582                list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3583
3584                list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3585                        list_del(&cmd->cmd_list);
3586                        /* This cmd was never sent to TCM.  There is no need
3587                         * to schedule free or call free_cmd
3588                         */
3589                        qlt_free_cmd(cmd);
3590                        vha->hw->tgt.num_qfull_cmds_alloc--;
3591                }
3592        }
3593        vha->hw->tgt.num_qfull_cmds_dropped = 0;
3594}
3595
3596static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3597{
3598        uint32_t total_leaked;
3599
3600        total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3601
3602        if (vha->hw->tgt.leak_exchg_thresh_hold &&
3603            (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3604
3605                ql_dbg(ql_dbg_tgt, vha, 0xe079,
3606                    "Chip reset due to exchange starvation: %d/%d.\n",
3607                    total_leaked, vha->hw->cur_fw_xcb_count);
3608
3609                if (IS_P3P_TYPE(vha->hw))
3610                        set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3611                else
3612                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3613                qla2xxx_wake_dpc(vha);
3614        }
3615
3616}
3617
3618int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3619{
3620        struct qla_tgt *tgt = cmd->tgt;
3621        struct scsi_qla_host *vha = tgt->vha;
3622        struct se_cmd *se_cmd = &cmd->se_cmd;
3623        unsigned long flags;
3624
3625        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3626            "qla_target(%d): terminating exchange for aborted cmd=%p "
3627            "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3628            se_cmd->tag);
3629
3630        spin_lock_irqsave(&cmd->cmd_lock, flags);
3631        if (cmd->aborted) {
3632                spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3633                /*
3634                 * It's normal to see 2 calls in this path:
3635                 *  1) XFER Rdy completion + CMD_T_ABORT
3636                 *  2) TCM TMR - drain_state_list
3637                 */
3638                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3639                    "multiple abort. %p transport_state %x, t_state %x, "
3640                    "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3641                    cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3642                return EIO;
3643        }
3644        cmd->aborted = 1;
3645        cmd->trc_flags |= TRC_ABORT;
3646        spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3647
3648        qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3649        return 0;
3650}
3651EXPORT_SYMBOL(qlt_abort_cmd);
3652
3653void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3654{
3655        struct fc_port *sess = cmd->sess;
3656
3657        ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3658            "%s: se_cmd[%p] ox_id %04x\n",
3659            __func__, &cmd->se_cmd,
3660            be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3661
3662        BUG_ON(cmd->cmd_in_wq);
3663
3664        if (cmd->sg_mapped)
3665                qlt_unmap_sg(cmd->vha, cmd);
3666
3667        if (!cmd->q_full)
3668                qlt_decr_num_pend_cmds(cmd->vha);
3669
3670        BUG_ON(cmd->sg_mapped);
3671        cmd->jiffies_at_free = get_jiffies_64();
3672        if (unlikely(cmd->free_sg))
3673                kfree(cmd->sg);
3674
3675        if (!sess || !sess->se_sess) {
3676                WARN_ON(1);
3677                return;
3678        }
3679        cmd->jiffies_at_free = get_jiffies_64();
3680        percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3681}
3682EXPORT_SYMBOL(qlt_free_cmd);
3683
3684/*
3685 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3686 */
3687static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3688        struct qla_tgt_cmd *cmd, uint32_t status)
3689{
3690        int term = 0;
3691        struct scsi_qla_host *vha = qpair->vha;
3692
3693        if (cmd->se_cmd.prot_op)
3694                ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3695                    "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3696                    "se_cmd=%p tag[%x] op %#x/%s",
3697                     cmd->lba, cmd->lba,
3698                     cmd->num_blks, &cmd->se_cmd,
3699                     cmd->atio.u.isp24.exchange_addr,
3700                     cmd->se_cmd.prot_op,
3701                     prot_op_str(cmd->se_cmd.prot_op));
3702
3703        if (ctio != NULL) {
3704                struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3705                term = !(c->flags &
3706                    cpu_to_le16(OF_TERM_EXCH));
3707        } else
3708                term = 1;
3709
3710        if (term)
3711                qlt_term_ctio_exchange(qpair, ctio, cmd, status);
3712
3713        return term;
3714}
3715
3716
3717/* ha->hardware_lock supposed to be held on entry */
3718static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3719        struct rsp_que *rsp, uint32_t handle, void *ctio)
3720{
3721        struct qla_tgt_cmd *cmd = NULL;
3722        struct req_que *req;
3723        int qid = GET_QID(handle);
3724        uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3725
3726        if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3727                return NULL;
3728
3729        if (qid == rsp->req->id) {
3730                req = rsp->req;
3731        } else if (vha->hw->req_q_map[qid]) {
3732                ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3733                    "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3734                    vha->vp_idx, rsp->id, handle);
3735                req = vha->hw->req_q_map[qid];
3736        } else {
3737                return NULL;
3738        }
3739
3740        h &= QLA_CMD_HANDLE_MASK;
3741
3742        if (h != QLA_TGT_NULL_HANDLE) {
3743                if (unlikely(h >= req->num_outstanding_cmds)) {
3744                        ql_dbg(ql_dbg_tgt, vha, 0xe052,
3745                            "qla_target(%d): Wrong handle %x received\n",
3746                            vha->vp_idx, handle);
3747                        return NULL;
3748                }
3749
3750                cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
3751                if (unlikely(cmd == NULL)) {
3752                        ql_dbg(ql_dbg_async, vha, 0xe053,
3753                            "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3754                                vha->vp_idx, handle, req->id, rsp->id);
3755                        return NULL;
3756                }
3757                req->outstanding_cmds[h] = NULL;
3758        } else if (ctio != NULL) {
3759                /* We can't get loop ID from CTIO7 */
3760                ql_dbg(ql_dbg_tgt, vha, 0xe054,
3761                    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3762                    "support NULL handles\n", vha->vp_idx);
3763                return NULL;
3764        }
3765
3766        return cmd;
3767}
3768
3769/* hardware_lock should be held by caller. */
3770void
3771qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3772{
3773        struct qla_hw_data *ha = vha->hw;
3774
3775        if (cmd->sg_mapped)
3776                qlt_unmap_sg(vha, cmd);
3777
3778        /* TODO: fix debug message type and ids. */
3779        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3780                ql_dbg(ql_dbg_io, vha, 0xff00,
3781                    "HOST-ABORT: state=PROCESSED.\n");
3782        } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3783                cmd->write_data_transferred = 0;
3784                cmd->state = QLA_TGT_STATE_DATA_IN;
3785
3786                ql_dbg(ql_dbg_io, vha, 0xff01,
3787                    "HOST-ABORT: state=DATA_IN.\n");
3788
3789                ha->tgt.tgt_ops->handle_data(cmd);
3790                return;
3791        } else {
3792                ql_dbg(ql_dbg_io, vha, 0xff03,
3793                    "HOST-ABORT: state=BAD(%d).\n",
3794                    cmd->state);
3795                dump_stack();
3796        }
3797
3798        cmd->trc_flags |= TRC_FLUSH;
3799        ha->tgt.tgt_ops->free_cmd(cmd);
3800}
3801
3802/*
3803 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3804 */
3805static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3806    struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3807{
3808        struct qla_hw_data *ha = vha->hw;
3809        struct se_cmd *se_cmd;
3810        struct qla_tgt_cmd *cmd;
3811        struct qla_qpair *qpair = rsp->qpair;
3812
3813        if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3814                /* That could happen only in case of an error/reset/abort */
3815                if (status != CTIO_SUCCESS) {
3816                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3817                            "Intermediate CTIO received"
3818                            " (status %x)\n", status);
3819                }
3820                return;
3821        }
3822
3823        cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3824        if (cmd == NULL)
3825                return;
3826
3827        se_cmd = &cmd->se_cmd;
3828        cmd->cmd_sent_to_fw = 0;
3829
3830        qlt_unmap_sg(vha, cmd);
3831
3832        if (unlikely(status != CTIO_SUCCESS)) {
3833                switch (status & 0xFFFF) {
3834                case CTIO_LIP_RESET:
3835                case CTIO_TARGET_RESET:
3836                case CTIO_ABORTED:
3837                        /* driver request abort via Terminate exchange */
3838                case CTIO_TIMEOUT:
3839                case CTIO_INVALID_RX_ID:
3840                        /* They are OK */
3841                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3842                            "qla_target(%d): CTIO with "
3843                            "status %#x received, state %x, se_cmd %p, "
3844                            "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3845                            "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3846                            status, cmd->state, se_cmd);
3847                        break;
3848
3849                case CTIO_PORT_LOGGED_OUT:
3850                case CTIO_PORT_UNAVAILABLE:
3851                {
3852                        int logged_out =
3853                                (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3854
3855                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3856                            "qla_target(%d): CTIO with %s status %x "
3857                            "received (state %x, se_cmd %p)\n", vha->vp_idx,
3858                            logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3859                            status, cmd->state, se_cmd);
3860
3861                        if (logged_out && cmd->sess) {
3862                                /*
3863                                 * Session is already logged out, but we need
3864                                 * to notify initiator, who's not aware of this
3865                                 */
3866                                cmd->sess->logout_on_delete = 0;
3867                                cmd->sess->send_els_logo = 1;
3868                                ql_dbg(ql_dbg_disc, vha, 0x20f8,
3869                                    "%s %d %8phC post del sess\n",
3870                                    __func__, __LINE__, cmd->sess->port_name);
3871
3872                                qlt_schedule_sess_for_deletion_lock(cmd->sess);
3873                        }
3874                        break;
3875                }
3876                case CTIO_DIF_ERROR: {
3877                        struct ctio_crc_from_fw *crc =
3878                                (struct ctio_crc_from_fw *)ctio;
3879                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3880                            "qla_target(%d): CTIO with DIF_ERROR status %x "
3881                            "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
3882                            "expect_dif[0x%llx]\n",
3883                            vha->vp_idx, status, cmd->state, se_cmd,
3884                            *((u64 *)&crc->actual_dif[0]),
3885                            *((u64 *)&crc->expected_dif[0]));
3886
3887                        qlt_handle_dif_error(qpair, cmd, ctio);
3888                        return;
3889                }
3890                default:
3891                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3892                            "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3893                            vha->vp_idx, status, cmd->state, se_cmd);
3894                        break;
3895                }
3896
3897
3898                /* "cmd->aborted" means
3899                 * cmd is already aborted/terminated, we don't
3900                 * need to terminate again.  The exchange is already
3901                 * cleaned up/freed at FW level.  Just cleanup at driver
3902                 * level.
3903                 */
3904                if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3905                    (!cmd->aborted)) {
3906                        cmd->trc_flags |= TRC_CTIO_ERR;
3907                        if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
3908                                return;
3909                }
3910        }
3911
3912        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3913                cmd->trc_flags |= TRC_CTIO_DONE;
3914        } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3915                cmd->state = QLA_TGT_STATE_DATA_IN;
3916
3917                if (status == CTIO_SUCCESS)
3918                        cmd->write_data_transferred = 1;
3919
3920                ha->tgt.tgt_ops->handle_data(cmd);
3921                return;
3922        } else if (cmd->aborted) {
3923                cmd->trc_flags |= TRC_CTIO_ABORTED;
3924                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3925                  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3926        } else {
3927                cmd->trc_flags |= TRC_CTIO_STRANGE;
3928                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3929                    "qla_target(%d): A command in state (%d) should "
3930                    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3931        }
3932
3933        if (unlikely(status != CTIO_SUCCESS) &&
3934                !cmd->aborted) {
3935                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3936                dump_stack();
3937        }
3938
3939        ha->tgt.tgt_ops->free_cmd(cmd);
3940}
3941
3942static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3943        uint8_t task_codes)
3944{
3945        int fcp_task_attr;
3946
3947        switch (task_codes) {
3948        case ATIO_SIMPLE_QUEUE:
3949                fcp_task_attr = TCM_SIMPLE_TAG;
3950                break;
3951        case ATIO_HEAD_OF_QUEUE:
3952                fcp_task_attr = TCM_HEAD_TAG;
3953                break;
3954        case ATIO_ORDERED_QUEUE:
3955                fcp_task_attr = TCM_ORDERED_TAG;
3956                break;
3957        case ATIO_ACA_QUEUE:
3958                fcp_task_attr = TCM_ACA_TAG;
3959                break;
3960        case ATIO_UNTAGGED:
3961                fcp_task_attr = TCM_SIMPLE_TAG;
3962                break;
3963        default:
3964                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3965                    "qla_target: unknown task code %x, use ORDERED instead\n",
3966                    task_codes);
3967                fcp_task_attr = TCM_ORDERED_TAG;
3968                break;
3969        }
3970
3971        return fcp_task_attr;
3972}
3973
3974static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
3975                                        uint8_t *);
3976/*
3977 * Process context for I/O path into tcm_qla2xxx code
3978 */
3979static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3980{
3981        scsi_qla_host_t *vha = cmd->vha;
3982        struct qla_hw_data *ha = vha->hw;
3983        struct fc_port *sess = cmd->sess;
3984        struct atio_from_isp *atio = &cmd->atio;
3985        unsigned char *cdb;
3986        unsigned long flags;
3987        uint32_t data_length;
3988        int ret, fcp_task_attr, data_dir, bidi = 0;
3989        struct qla_qpair *qpair = cmd->qpair;
3990
3991        cmd->cmd_in_wq = 0;
3992        cmd->trc_flags |= TRC_DO_WORK;
3993
3994        if (cmd->aborted) {
3995                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3996                    "cmd with tag %u is aborted\n",
3997                    cmd->atio.u.isp24.exchange_addr);
3998                goto out_term;
3999        }
4000
4001        spin_lock_init(&cmd->cmd_lock);
4002        cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4003        cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
4004
4005        if (atio->u.isp24.fcp_cmnd.rddata &&
4006            atio->u.isp24.fcp_cmnd.wrdata) {
4007                bidi = 1;
4008                data_dir = DMA_TO_DEVICE;
4009        } else if (atio->u.isp24.fcp_cmnd.rddata)
4010                data_dir = DMA_FROM_DEVICE;
4011        else if (atio->u.isp24.fcp_cmnd.wrdata)
4012                data_dir = DMA_TO_DEVICE;
4013        else
4014                data_dir = DMA_NONE;
4015
4016        fcp_task_attr = qlt_get_fcp_task_attr(vha,
4017            atio->u.isp24.fcp_cmnd.task_attr);
4018        data_length = be32_to_cpu(get_unaligned((uint32_t *)
4019            &atio->u.isp24.fcp_cmnd.add_cdb[
4020            atio->u.isp24.fcp_cmnd.add_cdb_len]));
4021
4022        ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4023                                          fcp_task_attr, data_dir, bidi);
4024        if (ret != 0)
4025                goto out_term;
4026        /*
4027         * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
4028         */
4029        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4030        ha->tgt.tgt_ops->put_sess(sess);
4031        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4032        return;
4033
4034out_term:
4035        ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4036        /*
4037         * cmd has not sent to target yet, so pass NULL as the second
4038         * argument to qlt_send_term_exchange() and free the memory here.
4039         */
4040        cmd->trc_flags |= TRC_DO_WORK_ERR;
4041        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4042        qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4043
4044        qlt_decr_num_pend_cmds(vha);
4045        percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
4046        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4047
4048        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4049        ha->tgt.tgt_ops->put_sess(sess);
4050        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4051}
4052
4053static void qlt_do_work(struct work_struct *work)
4054{
4055        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4056        scsi_qla_host_t *vha = cmd->vha;
4057        unsigned long flags;
4058
4059        spin_lock_irqsave(&vha->cmd_list_lock, flags);
4060        list_del(&cmd->cmd_list);
4061        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4062
4063        __qlt_do_work(cmd);
4064}
4065
4066void qlt_clr_qp_table(struct scsi_qla_host *vha)
4067{
4068        unsigned long flags;
4069        struct qla_hw_data *ha = vha->hw;
4070        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4071        void *node;
4072        u64 key = 0;
4073
4074        ql_log(ql_log_info, vha, 0x706c,
4075            "User update Number of Active Qpairs %d\n",
4076            ha->tgt.num_act_qpairs);
4077
4078        spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4079
4080        btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4081                btree_remove64(&tgt->lun_qpair_map, key);
4082
4083        ha->base_qpair->lun_cnt = 0;
4084        for (key = 0; key < ha->max_qpairs; key++)
4085                if (ha->queue_pair_map[key])
4086                        ha->queue_pair_map[key]->lun_cnt = 0;
4087
4088        spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4089}
4090
4091static void qlt_assign_qpair(struct scsi_qla_host *vha,
4092        struct qla_tgt_cmd *cmd)
4093{
4094        struct qla_qpair *qpair, *qp;
4095        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4096        struct qla_qpair_hint *h;
4097
4098        if (vha->flags.qpairs_available) {
4099                h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4100                if (unlikely(!h)) {
4101                        /* spread lun to qpair ratio evently */
4102                        int lcnt = 0, rc;
4103                        struct scsi_qla_host *base_vha =
4104                                pci_get_drvdata(vha->hw->pdev);
4105
4106                        qpair = vha->hw->base_qpair;
4107                        if (qpair->lun_cnt == 0) {
4108                                qpair->lun_cnt++;
4109                                h = qla_qpair_to_hint(tgt, qpair);
4110                                BUG_ON(!h);
4111                                rc = btree_insert64(&tgt->lun_qpair_map,
4112                                        cmd->unpacked_lun, h, GFP_ATOMIC);
4113                                if (rc) {
4114                                        qpair->lun_cnt--;
4115                                        ql_log(ql_log_info, vha, 0xd037,
4116                                            "Unable to insert lun %llx into lun_qpair_map\n",
4117                                            cmd->unpacked_lun);
4118                                }
4119                                goto out;
4120                        } else {
4121                                lcnt = qpair->lun_cnt;
4122                        }
4123
4124                        h = NULL;
4125                        list_for_each_entry(qp, &base_vha->qp_list,
4126                            qp_list_elem) {
4127                                if (qp->lun_cnt == 0) {
4128                                        qp->lun_cnt++;
4129                                        h = qla_qpair_to_hint(tgt, qp);
4130                                        BUG_ON(!h);
4131                                        rc = btree_insert64(&tgt->lun_qpair_map,
4132                                            cmd->unpacked_lun, h, GFP_ATOMIC);
4133                                        if (rc) {
4134                                                qp->lun_cnt--;
4135                                                ql_log(ql_log_info, vha, 0xd038,
4136                                                        "Unable to insert lun %llx into lun_qpair_map\n",
4137                                                        cmd->unpacked_lun);
4138                                        }
4139                                        qpair = qp;
4140                                        goto out;
4141                                } else {
4142                                        if (qp->lun_cnt < lcnt) {
4143                                                lcnt = qp->lun_cnt;
4144                                                qpair = qp;
4145                                                continue;
4146                                        }
4147                                }
4148                        }
4149                        BUG_ON(!qpair);
4150                        qpair->lun_cnt++;
4151                        h = qla_qpair_to_hint(tgt, qpair);
4152                        BUG_ON(!h);
4153                        rc = btree_insert64(&tgt->lun_qpair_map,
4154                                cmd->unpacked_lun, h, GFP_ATOMIC);
4155                        if (rc) {
4156                                qpair->lun_cnt--;
4157                                ql_log(ql_log_info, vha, 0xd039,
4158                                   "Unable to insert lun %llx into lun_qpair_map\n",
4159                                   cmd->unpacked_lun);
4160                        }
4161                }
4162        } else {
4163                h = &tgt->qphints[0];
4164        }
4165out:
4166        cmd->qpair = h->qpair;
4167        cmd->se_cmd.cpuid = h->cpuid;
4168}
4169
4170static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4171                                       struct fc_port *sess,
4172                                       struct atio_from_isp *atio)
4173{
4174        struct se_session *se_sess = sess->se_sess;
4175        struct qla_tgt_cmd *cmd;
4176        int tag;
4177
4178        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
4179        if (tag < 0)
4180                return NULL;
4181
4182        cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4183        memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4184        cmd->cmd_type = TYPE_TGT_CMD;
4185        memcpy(&cmd->atio, atio, sizeof(*atio));
4186        cmd->state = QLA_TGT_STATE_NEW;
4187        cmd->tgt = vha->vha_tgt.qla_tgt;
4188        qlt_incr_num_pend_cmds(vha);
4189        cmd->vha = vha;
4190        cmd->se_cmd.map_tag = tag;
4191        cmd->sess = sess;
4192        cmd->loop_id = sess->loop_id;
4193        cmd->conf_compl_supported = sess->conf_compl_supported;
4194
4195        cmd->trc_flags = 0;
4196        cmd->jiffies_at_alloc = get_jiffies_64();
4197
4198        cmd->unpacked_lun = scsilun_to_int(
4199            (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4200        qlt_assign_qpair(vha, cmd);
4201        cmd->reset_count = vha->hw->base_qpair->chip_reset;
4202        cmd->vp_idx = vha->vp_idx;
4203
4204        return cmd;
4205}
4206
4207static void qlt_create_sess_from_atio(struct work_struct *work)
4208{
4209        struct qla_tgt_sess_op *op = container_of(work,
4210                                        struct qla_tgt_sess_op, work);
4211        scsi_qla_host_t *vha = op->vha;
4212        struct qla_hw_data *ha = vha->hw;
4213        struct fc_port *sess;
4214        struct qla_tgt_cmd *cmd;
4215        unsigned long flags;
4216        uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
4217
4218        spin_lock_irqsave(&vha->cmd_list_lock, flags);
4219        list_del(&op->cmd_list);
4220        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4221
4222        if (op->aborted) {
4223                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
4224                    "sess_op with tag %u is aborted\n",
4225                    op->atio.u.isp24.exchange_addr);
4226                goto out_term;
4227        }
4228
4229        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
4230            "qla_target(%d): Unable to find wwn login"
4231            " (s_id %x:%x:%x), trying to create it manually\n",
4232            vha->vp_idx, s_id[0], s_id[1], s_id[2]);
4233
4234        if (op->atio.u.raw.entry_count > 1) {
4235                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
4236                    "Dropping multy entry atio %p\n", &op->atio);
4237                goto out_term;
4238        }
4239
4240        sess = qlt_make_local_sess(vha, s_id);
4241        /* sess has an extra creation ref. */
4242
4243        if (!sess)
4244                goto out_term;
4245        /*
4246         * Now obtain a pre-allocated session tag using the original op->atio
4247         * packet header, and dispatch into __qlt_do_work() using the existing
4248         * process context.
4249         */
4250        cmd = qlt_get_tag(vha, sess, &op->atio);
4251        if (!cmd) {
4252                struct qla_qpair *qpair = ha->base_qpair;
4253
4254                spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4255                qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
4256                spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4257
4258                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4259                ha->tgt.tgt_ops->put_sess(sess);
4260                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4261                kfree(op);
4262                return;
4263        }
4264
4265        /*
4266         * __qlt_do_work() will call qlt_put_sess() to release
4267         * the extra reference taken above by qlt_make_local_sess()
4268         */
4269        __qlt_do_work(cmd);
4270        kfree(op);
4271        return;
4272out_term:
4273        qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
4274        kfree(op);
4275}
4276
4277/* ha->hardware_lock supposed to be held on entry */
4278static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4279        struct atio_from_isp *atio)
4280{
4281        struct qla_hw_data *ha = vha->hw;
4282        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4283        struct fc_port *sess;
4284        struct qla_tgt_cmd *cmd;
4285        unsigned long flags;
4286
4287        if (unlikely(tgt->tgt_stop)) {
4288                ql_dbg(ql_dbg_io, vha, 0x3061,
4289                    "New command while device %p is shutting down\n", tgt);
4290                return -EFAULT;
4291        }
4292
4293        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4294        if (unlikely(!sess)) {
4295                struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
4296                                                     GFP_ATOMIC);
4297                if (!op)
4298                        return -ENOMEM;
4299
4300                memcpy(&op->atio, atio, sizeof(*atio));
4301                op->vha = vha;
4302
4303                spin_lock_irqsave(&vha->cmd_list_lock, flags);
4304                list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
4305                spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4306
4307                INIT_WORK(&op->work, qlt_create_sess_from_atio);
4308                queue_work(qla_tgt_wq, &op->work);
4309                return 0;
4310        }
4311
4312        /* Another WWN used to have our s_id. Our PLOGI scheduled its
4313         * session deletion, but it's still in sess_del_work wq */
4314        if (sess->deleted) {
4315                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4316                    "New command while old session %p is being deleted\n",
4317                    sess);
4318                return -EFAULT;
4319        }
4320
4321        /*
4322         * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4323         */
4324        if (!kref_get_unless_zero(&sess->sess_kref)) {
4325                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4326                    "%s: kref_get fail, %8phC oxid %x \n",
4327                    __func__, sess->port_name,
4328                     be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4329                return -EFAULT;
4330        }
4331
4332        cmd = qlt_get_tag(vha, sess, atio);
4333        if (!cmd) {
4334                ql_dbg(ql_dbg_io, vha, 0x3062,
4335                    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4336                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4337                ha->tgt.tgt_ops->put_sess(sess);
4338                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4339                return -ENOMEM;
4340        }
4341
4342        cmd->cmd_in_wq = 1;
4343        cmd->trc_flags |= TRC_NEW_CMD;
4344
4345        spin_lock_irqsave(&vha->cmd_list_lock, flags);
4346        list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4347        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4348
4349        INIT_WORK(&cmd->work, qlt_do_work);
4350        if (vha->flags.qpairs_available) {
4351                queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4352        } else if (ha->msix_count) {
4353                if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4354                        queue_work_on(smp_processor_id(), qla_tgt_wq,
4355                            &cmd->work);
4356                else
4357                        queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4358                            &cmd->work);
4359        } else {
4360                queue_work(qla_tgt_wq, &cmd->work);
4361        }
4362
4363        return 0;
4364}
4365
4366/* ha->hardware_lock supposed to be held on entry */
4367static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4368        int fn, void *iocb, int flags)
4369{
4370        struct scsi_qla_host *vha = sess->vha;
4371        struct qla_hw_data *ha = vha->hw;
4372        struct qla_tgt_mgmt_cmd *mcmd;
4373        struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4374        int res;
4375
4376        mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4377        if (!mcmd) {
4378                ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4379                    "qla_target(%d): Allocation of management "
4380                    "command failed, some commands and their data could "
4381                    "leak\n", vha->vp_idx);
4382                return -ENOMEM;
4383        }
4384        memset(mcmd, 0, sizeof(*mcmd));
4385        mcmd->sess = sess;
4386
4387        if (iocb) {
4388                memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4389                    sizeof(mcmd->orig_iocb.imm_ntfy));
4390        }
4391        mcmd->tmr_func = fn;
4392        mcmd->flags = flags;
4393        mcmd->reset_count = ha->base_qpair->chip_reset;
4394        mcmd->qpair = ha->base_qpair;
4395        mcmd->vha = vha;
4396
4397        switch (fn) {
4398        case QLA_TGT_LUN_RESET:
4399            abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4400            break;
4401        }
4402
4403        res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
4404        if (res != 0) {
4405                ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
4406                    "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
4407                    sess->vha->vp_idx, res);
4408                mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4409                return -EFAULT;
4410        }
4411
4412        return 0;
4413}
4414
4415/* ha->hardware_lock supposed to be held on entry */
4416static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4417{
4418        struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4419        struct qla_hw_data *ha = vha->hw;
4420        struct qla_tgt *tgt;
4421        struct fc_port *sess;
4422        u64 unpacked_lun;
4423        int fn;
4424        unsigned long flags;
4425
4426        tgt = vha->vha_tgt.qla_tgt;
4427
4428        fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4429
4430        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4431        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4432            a->u.isp24.fcp_hdr.s_id);
4433        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4434
4435        unpacked_lun =
4436            scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4437
4438        if (!sess) {
4439                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
4440                    "qla_target(%d): task mgmt fn 0x%x for "
4441                    "non-existant session\n", vha->vp_idx, fn);
4442                return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
4443                    sizeof(struct atio_from_isp));
4444        }
4445
4446        if (sess->deleted)
4447                return -EFAULT;
4448
4449        return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4450}
4451
4452/* ha->hardware_lock supposed to be held on entry */
4453static int __qlt_abort_task(struct scsi_qla_host *vha,
4454        struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4455{
4456        struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4457        struct qla_hw_data *ha = vha->hw;
4458        struct qla_tgt_mgmt_cmd *mcmd;
4459        u64 unpacked_lun;
4460        int rc;
4461
4462        mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4463        if (mcmd == NULL) {
4464                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4465                    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4466                    vha->vp_idx, __func__);
4467                return -ENOMEM;
4468        }
4469        memset(mcmd, 0, sizeof(*mcmd));
4470
4471        mcmd->sess = sess;
4472        memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4473            sizeof(mcmd->orig_iocb.imm_ntfy));
4474
4475        unpacked_lun =
4476            scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4477        mcmd->reset_count = ha->base_qpair->chip_reset;
4478        mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4479        mcmd->qpair = ha->base_qpair;
4480
4481        rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4482            le16_to_cpu(iocb->u.isp2x.seq_id));
4483        if (rc != 0) {
4484                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4485                    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4486                    vha->vp_idx, rc);
4487                mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4488                return -EFAULT;
4489        }
4490
4491        return 0;
4492}
4493
4494/* ha->hardware_lock supposed to be held on entry */
4495static int qlt_abort_task(struct scsi_qla_host *vha,
4496        struct imm_ntfy_from_isp *iocb)
4497{
4498        struct qla_hw_data *ha = vha->hw;
4499        struct fc_port *sess;
4500        int loop_id;
4501        unsigned long flags;
4502
4503        loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4504
4505        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4506        sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4507        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4508
4509        if (sess == NULL) {
4510                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4511                    "qla_target(%d): task abort for unexisting "
4512                    "session\n", vha->vp_idx);
4513                return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4514                    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4515        }
4516
4517        return __qlt_abort_task(vha, iocb, sess);
4518}
4519
4520void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4521{
4522        if (rc != MBS_COMMAND_COMPLETE) {
4523                ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4524                        "%s: se_sess %p / sess %p from"
4525                        " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4526                        " LOGO failed: %#x\n",
4527                        __func__,
4528                        fcport->se_sess,
4529                        fcport,
4530                        fcport->port_name, fcport->loop_id,
4531                        fcport->d_id.b.domain, fcport->d_id.b.area,
4532                        fcport->d_id.b.al_pa, rc);
4533        }
4534
4535        fcport->logout_completed = 1;
4536}
4537
4538/*
4539* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4540*
4541* Schedules sessions with matching port_id/loop_id but different wwn for
4542* deletion. Returns existing session with matching wwn if present.
4543* Null otherwise.
4544*/
4545struct fc_port *
4546qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4547    port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4548{
4549        struct fc_port *sess = NULL, *other_sess;
4550        uint64_t other_wwn;
4551
4552        *conflict_sess = NULL;
4553
4554        list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4555
4556                other_wwn = wwn_to_u64(other_sess->port_name);
4557
4558                if (wwn == other_wwn) {
4559                        WARN_ON(sess);
4560                        sess = other_sess;
4561                        continue;
4562                }
4563
4564                /* find other sess with nport_id collision */
4565                if (port_id.b24 == other_sess->d_id.b24) {
4566                        if (loop_id != other_sess->loop_id) {
4567                                ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
4568                                    "Invalidating sess %p loop_id %d wwn %llx.\n",
4569                                    other_sess, other_sess->loop_id, other_wwn);
4570
4571                                /*
4572                                 * logout_on_delete is set by default, but another
4573                                 * session that has the same s_id/loop_id combo
4574                                 * might have cleared it when requested this session
4575                                 * deletion, so don't touch it
4576                                 */
4577                                qlt_schedule_sess_for_deletion(other_sess, true);
4578                        } else {
4579                                /*
4580                                 * Another wwn used to have our s_id/loop_id
4581                                 * kill the session, but don't free the loop_id
4582                                 */
4583                                ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
4584                                    "Invalidating sess %p loop_id %d wwn %llx.\n",
4585                                    other_sess, other_sess->loop_id, other_wwn);
4586
4587
4588                                other_sess->keep_nport_handle = 1;
4589                                *conflict_sess = other_sess;
4590                                qlt_schedule_sess_for_deletion(other_sess,
4591                                    true);
4592                        }
4593                        continue;
4594                }
4595
4596                /* find other sess with nport handle collision */
4597                if ((loop_id == other_sess->loop_id) &&
4598                        (loop_id != FC_NO_LOOP_ID)) {
4599                        ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
4600                               "Invalidating sess %p loop_id %d wwn %llx.\n",
4601                               other_sess, other_sess->loop_id, other_wwn);
4602
4603                        /* Same loop_id but different s_id
4604                         * Ok to kill and logout */
4605                        qlt_schedule_sess_for_deletion(other_sess, true);
4606                }
4607        }
4608
4609        return sess;
4610}
4611
4612/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4613static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4614{
4615        struct qla_tgt_sess_op *op;
4616        struct qla_tgt_cmd *cmd;
4617        uint32_t key;
4618        int count = 0;
4619        unsigned long flags;
4620
4621        key = (((u32)s_id->b.domain << 16) |
4622               ((u32)s_id->b.area   <<  8) |
4623               ((u32)s_id->b.al_pa));
4624
4625        spin_lock_irqsave(&vha->cmd_list_lock, flags);
4626        list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4627                uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4628
4629                if (op_key == key) {
4630                        op->aborted = true;
4631                        count++;
4632                }
4633        }
4634
4635        list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4636                uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4637                if (op_key == key) {
4638                        op->aborted = true;
4639                        count++;
4640                }
4641        }
4642
4643        list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4644                uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4645                if (cmd_key == key) {
4646                        cmd->aborted = 1;
4647                        count++;
4648                }
4649        }
4650        spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4651
4652        return count;
4653}
4654
4655/*
4656 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4657 */
4658static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4659        struct imm_ntfy_from_isp *iocb)
4660{
4661        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4662        struct qla_hw_data *ha = vha->hw;
4663        struct fc_port *sess = NULL, *conflict_sess = NULL;
4664        uint64_t wwn;
4665        port_id_t port_id;
4666        uint16_t loop_id;
4667        uint16_t wd3_lo;
4668        int res = 0;
4669        struct qlt_plogi_ack_t *pla;
4670        unsigned long flags;
4671
4672        wwn = wwn_to_u64(iocb->u.isp24.port_name);
4673
4674        port_id.b.domain = iocb->u.isp24.port_id[2];
4675        port_id.b.area   = iocb->u.isp24.port_id[1];
4676        port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4677        port_id.b.rsvd_1 = 0;
4678
4679        loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4680
4681        ql_dbg(ql_dbg_disc, vha, 0xf026,
4682            "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4683            vha->vp_idx, iocb->u.isp24.port_id[2],
4684                iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4685                   iocb->u.isp24.status_subcode, loop_id,
4686                iocb->u.isp24.port_name);
4687
4688        /* res = 1 means ack at the end of thread
4689         * res = 0 means ack async/later.
4690         */
4691        switch (iocb->u.isp24.status_subcode) {
4692        case ELS_PLOGI:
4693
4694                /* Mark all stale commands in qla_tgt_wq for deletion */
4695                abort_cmds_for_s_id(vha, &port_id);
4696
4697                if (wwn) {
4698                        spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4699                        sess = qlt_find_sess_invalidate_other(vha, wwn,
4700                                port_id, loop_id, &conflict_sess);
4701                        spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4702                }
4703
4704                if (IS_SW_RESV_ADDR(port_id)) {
4705                        res = 1;
4706                        break;
4707                }
4708
4709                pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4710                if (!pla) {
4711                        qlt_send_term_imm_notif(vha, iocb, 1);
4712                        break;
4713                }
4714
4715                res = 0;
4716
4717                if (conflict_sess) {
4718                        conflict_sess->login_gen++;
4719                        qlt_plogi_ack_link(vha, pla, conflict_sess,
4720                                QLT_PLOGI_LINK_CONFLICT);
4721                }
4722
4723                if (!sess) {
4724                        pla->ref_count++;
4725                        qla24xx_post_newsess_work(vha, &port_id,
4726                                iocb->u.isp24.port_name, pla);
4727                        res = 0;
4728                        break;
4729                }
4730
4731                qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4732                sess->fw_login_state = DSC_LS_PLOGI_PEND;
4733                sess->d_id = port_id;
4734                sess->login_gen++;
4735
4736                switch (sess->disc_state) {
4737                case DSC_DELETED:
4738                        qlt_plogi_ack_unref(vha, pla);
4739                        break;
4740
4741                default:
4742                        /*
4743                         * Under normal circumstances we want to release nport handle
4744                         * during LOGO process to avoid nport handle leaks inside FW.
4745                         * The exception is when LOGO is done while another PLOGI with
4746                         * the same nport handle is waiting as might be the case here.
4747                         * Note: there is always a possibily of a race where session
4748                         * deletion has already started for other reasons (e.g. ACL
4749                         * removal) and now PLOGI arrives:
4750                         * 1. if PLOGI arrived in FW after nport handle has been freed,
4751                         *    FW must have assigned this PLOGI a new/same handle and we
4752                         *    can proceed ACK'ing it as usual when session deletion
4753                         *    completes.
4754                         * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4755                         *    bit reached it, the handle has now been released. We'll
4756                         *    get an error when we ACK this PLOGI. Nothing will be sent
4757                         *    back to initiator. Initiator should eventually retry
4758                         *    PLOGI and situation will correct itself.
4759                         */
4760                        sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4761                           (sess->d_id.b24 == port_id.b24));
4762
4763                        ql_dbg(ql_dbg_disc, vha, 0x20f9,
4764                            "%s %d %8phC post del sess\n",
4765                            __func__, __LINE__, sess->port_name);
4766
4767
4768                        qlt_schedule_sess_for_deletion_lock(sess);
4769                        break;
4770                }
4771
4772                break;
4773
4774        case ELS_PRLI:
4775                wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4776
4777                if (wwn) {
4778                        spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4779                        sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
4780                                loop_id, &conflict_sess);
4781                        spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4782                }
4783
4784                if (conflict_sess) {
4785                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4786                            "PRLI with conflicting sess %p port %8phC\n",
4787                            conflict_sess, conflict_sess->port_name);
4788                        qlt_send_term_imm_notif(vha, iocb, 1);
4789                        res = 0;
4790                        break;
4791                }
4792
4793                if (sess != NULL) {
4794                        if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
4795                            sess->fw_login_state != DSC_LS_PLOGI_COMP) {
4796                                /*
4797                                 * Impatient initiator sent PRLI before last
4798                                 * PLOGI could finish. Will force him to re-try,
4799                                 * while last one finishes.
4800                                 */
4801                                ql_log(ql_log_warn, sess->vha, 0xf095,
4802                                    "sess %p PRLI received, before plogi ack.\n",
4803                                    sess);
4804                                qlt_send_term_imm_notif(vha, iocb, 1);
4805                                res = 0;
4806                                break;
4807                        }
4808
4809                        /*
4810                         * This shouldn't happen under normal circumstances,
4811                         * since we have deleted the old session during PLOGI
4812                         */
4813                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4814                            "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4815                            sess->loop_id, sess, iocb->u.isp24.nport_handle);
4816
4817                        sess->local = 0;
4818                        sess->loop_id = loop_id;
4819                        sess->d_id = port_id;
4820                        sess->fw_login_state = DSC_LS_PRLI_PEND;
4821
4822                        if (wd3_lo & BIT_7)
4823                                sess->conf_compl_supported = 1;
4824
4825                        if ((wd3_lo & BIT_4) == 0)
4826                                sess->port_type = FCT_INITIATOR;
4827                        else
4828                                sess->port_type = FCT_TARGET;
4829                }
4830                res = 1; /* send notify ack */
4831
4832                /* Make session global (not used in fabric mode) */
4833                if (ha->current_topology != ISP_CFG_F) {
4834                        if (sess) {
4835                                ql_dbg(ql_dbg_disc, vha, 0x20fa,
4836                                    "%s %d %8phC post nack\n",
4837                                    __func__, __LINE__, sess->port_name);
4838                                qla24xx_post_nack_work(vha, sess, iocb,
4839                                        SRB_NACK_PRLI);
4840                                res = 0;
4841                        } else {
4842                                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4843                                set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4844                                qla2xxx_wake_dpc(vha);
4845                        }
4846                } else {
4847                        if (sess) {
4848                                ql_dbg(ql_dbg_disc, vha, 0x20fb,
4849                                    "%s %d %8phC post nack\n",
4850                                    __func__, __LINE__, sess->port_name);
4851                                qla24xx_post_nack_work(vha, sess, iocb,
4852                                        SRB_NACK_PRLI);
4853                                res = 0;
4854                        }
4855                }
4856                break;
4857
4858        case ELS_TPRLO:
4859                if (le16_to_cpu(iocb->u.isp24.flags) &
4860                        NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
4861                        loop_id = 0xFFFF;
4862                        qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
4863                        res = 1;
4864                        break;
4865                }
4866                /* drop through */
4867        case ELS_LOGO:
4868        case ELS_PRLO:
4869                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4870                sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
4871                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4872
4873                if (sess) {
4874                        sess->login_gen++;
4875                        sess->fw_login_state = DSC_LS_LOGO_PEND;
4876                        sess->logo_ack_needed = 1;
4877                        memcpy(sess->iocb, iocb, IOCB_SIZE);
4878                }
4879
4880                res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4881
4882                ql_dbg(ql_dbg_disc, vha, 0x20fc,
4883                    "%s: logo %llx res %d sess %p ",
4884                    __func__, wwn, res, sess);
4885                if (res == 0) {
4886                        /*
4887                         * cmd went upper layer, look for qlt_xmit_tm_rsp()
4888                         * for LOGO_ACK & sess delete
4889                         */
4890                        BUG_ON(!sess);
4891                        res = 0;
4892                } else {
4893                        /* cmd did not go to upper layer. */
4894                        if (sess) {
4895                                qlt_schedule_sess_for_deletion_lock(sess);
4896                                res = 0;
4897                        }
4898                        /* else logo will be ack */
4899                }
4900                break;
4901        case ELS_PDISC:
4902        case ELS_ADISC:
4903        {
4904                struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4905                if (tgt->link_reinit_iocb_pending) {
4906                        qlt_send_notify_ack(ha->base_qpair,
4907                            &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
4908                        tgt->link_reinit_iocb_pending = 0;
4909                }
4910
4911                sess = qla2x00_find_fcport_by_wwpn(vha,
4912                    iocb->u.isp24.port_name, 1);
4913                if (sess) {
4914                        ql_dbg(ql_dbg_disc, vha, 0x20fd,
4915                                "sess %p lid %d|%d DS %d LS %d\n",
4916                                sess, sess->loop_id, loop_id,
4917                                sess->disc_state, sess->fw_login_state);
4918                }
4919
4920                res = 1; /* send notify ack */
4921                break;
4922        }
4923
4924        case ELS_FLOGI: /* should never happen */
4925        default:
4926                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
4927                    "qla_target(%d): Unsupported ELS command %x "
4928                    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
4929                res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4930                break;
4931        }
4932
4933        return res;
4934}
4935
4936/*
4937 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4938 */
4939static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4940        struct imm_ntfy_from_isp *iocb)
4941{
4942        struct qla_hw_data *ha = vha->hw;
4943        uint32_t add_flags = 0;
4944        int send_notify_ack = 1;
4945        uint16_t status;
4946
4947        status = le16_to_cpu(iocb->u.isp2x.status);
4948        switch (status) {
4949        case IMM_NTFY_LIP_RESET:
4950        {
4951                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
4952                    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4953                    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
4954                    iocb->u.isp24.status_subcode);
4955
4956                if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4957                        send_notify_ack = 0;
4958                break;
4959        }
4960
4961        case IMM_NTFY_LIP_LINK_REINIT:
4962        {
4963                struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4964                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
4965                    "qla_target(%d): LINK REINIT (loop %#x, "
4966                    "subcode %x)\n", vha->vp_idx,
4967                    le16_to_cpu(iocb->u.isp24.nport_handle),
4968                    iocb->u.isp24.status_subcode);
4969                if (tgt->link_reinit_iocb_pending) {
4970                        qlt_send_notify_ack(ha->base_qpair,
4971                            &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
4972                }
4973                memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
4974                tgt->link_reinit_iocb_pending = 1;
4975                /*
4976                 * QLogic requires to wait after LINK REINIT for possible
4977                 * PDISC or ADISC ELS commands
4978                 */
4979                send_notify_ack = 0;
4980                break;
4981        }
4982
4983        case IMM_NTFY_PORT_LOGOUT:
4984                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
4985                    "qla_target(%d): Port logout (loop "
4986                    "%#x, subcode %x)\n", vha->vp_idx,
4987                    le16_to_cpu(iocb->u.isp24.nport_handle),
4988                    iocb->u.isp24.status_subcode);
4989
4990                if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
4991                        send_notify_ack = 0;
4992                /* The sessions will be cleared in the callback, if needed */
4993                break;
4994
4995        case IMM_NTFY_GLBL_TPRLO:
4996                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
4997                    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
4998                if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4999                        send_notify_ack = 0;
5000                /* The sessions will be cleared in the callback, if needed */
5001                break;
5002
5003        case IMM_NTFY_PORT_CONFIG:
5004                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5005                    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5006                    status);
5007                if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5008                        send_notify_ack = 0;
5009                /* The sessions will be cleared in the callback, if needed */
5010                break;
5011
5012        case IMM_NTFY_GLBL_LOGO:
5013                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5014                    "qla_target(%d): Link failure detected\n",
5015                    vha->vp_idx);
5016                /* I_T nexus loss */
5017                if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5018                        send_notify_ack = 0;
5019                break;
5020
5021        case IMM_NTFY_IOCB_OVERFLOW:
5022                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5023                    "qla_target(%d): Cannot provide requested "
5024                    "capability (IOCB overflowed the immediate notify "
5025                    "resource count)\n", vha->vp_idx);
5026                break;
5027
5028        case IMM_NTFY_ABORT_TASK:
5029                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5030                    "qla_target(%d): Abort Task (S %08x I %#x -> "
5031                    "L %#x)\n", vha->vp_idx,
5032                    le16_to_cpu(iocb->u.isp2x.seq_id),
5033                    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5034                    le16_to_cpu(iocb->u.isp2x.lun));
5035                if (qlt_abort_task(vha, iocb) == 0)
5036                        send_notify_ack = 0;
5037                break;
5038
5039        case IMM_NTFY_RESOURCE:
5040                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5041                    "qla_target(%d): Out of resources, host %ld\n",
5042                    vha->vp_idx, vha->host_no);
5043                break;
5044
5045        case IMM_NTFY_MSG_RX:
5046                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5047                    "qla_target(%d): Immediate notify task %x\n",
5048                    vha->vp_idx, iocb->u.isp2x.task_flags);
5049                if (qlt_handle_task_mgmt(vha, iocb) == 0)
5050                        send_notify_ack = 0;
5051                break;
5052
5053        case IMM_NTFY_ELS:
5054                if (qlt_24xx_handle_els(vha, iocb) == 0)
5055                        send_notify_ack = 0;
5056                break;
5057        default:
5058                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5059                    "qla_target(%d): Received unknown immediate "
5060                    "notify status %x\n", vha->vp_idx, status);
5061                break;
5062        }
5063
5064        if (send_notify_ack)
5065                qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5066                    0, 0);
5067}
5068
5069/*
5070 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5071 * This function sends busy to ISP 2xxx or 24xx.
5072 */
5073static int __qlt_send_busy(struct qla_qpair *qpair,
5074        struct atio_from_isp *atio, uint16_t status)
5075{
5076        struct scsi_qla_host *vha = qpair->vha;
5077        struct ctio7_to_24xx *ctio24;
5078        struct qla_hw_data *ha = vha->hw;
5079        request_t *pkt;
5080        struct fc_port *sess = NULL;
5081        unsigned long flags;
5082        u16 temp;
5083
5084        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5085        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5086            atio->u.isp24.fcp_hdr.s_id);
5087        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5088        if (!sess) {
5089                qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5090                return 0;
5091        }
5092        /* Sending marker isn't necessary, since we called from ISR */
5093
5094        pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5095        if (!pkt) {
5096                ql_dbg(ql_dbg_io, vha, 0x3063,
5097                    "qla_target(%d): %s failed: unable to allocate "
5098                    "request packet", vha->vp_idx, __func__);
5099                return -ENOMEM;
5100        }
5101
5102        qpair->tgt_counters.num_q_full_sent++;
5103        pkt->entry_count = 1;
5104        pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5105
5106        ctio24 = (struct ctio7_to_24xx *)pkt;
5107        ctio24->entry_type = CTIO_TYPE7;
5108        ctio24->nport_handle = sess->loop_id;
5109        ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5110        ctio24->vp_index = vha->vp_idx;
5111        ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
5112        ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
5113        ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
5114        ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5115        temp = (atio->u.isp24.attr << 9) |
5116                CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5117                CTIO7_FLAGS_DONT_RET_CTIO;
5118        ctio24->u.status1.flags = cpu_to_le16(temp);
5119        /*
5120         * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5121         * if the explicit conformation is used.
5122         */
5123        ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5124        ctio24->u.status1.scsi_status = cpu_to_le16(status);
5125        /* Memory Barrier */
5126        wmb();
5127        if (qpair->reqq_start_iocbs)
5128                qpair->reqq_start_iocbs(qpair);
5129        else
5130                qla2x00_start_iocbs(vha, qpair->req);
5131        return 0;
5132}
5133
5134/*
5135 * This routine is used to allocate a command for either a QFull condition
5136 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5137 * out previously.
5138 */
5139static void
5140qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5141        struct atio_from_isp *atio, uint16_t status, int qfull)
5142{
5143        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5144        struct qla_hw_data *ha = vha->hw;
5145        struct fc_port *sess;
5146        struct se_session *se_sess;
5147        struct qla_tgt_cmd *cmd;
5148        int tag;
5149        unsigned long flags;
5150
5151        if (unlikely(tgt->tgt_stop)) {
5152                ql_dbg(ql_dbg_io, vha, 0x300a,
5153                        "New command while device %p is shutting down\n", tgt);
5154                return;
5155        }
5156
5157        if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5158                vha->hw->tgt.num_qfull_cmds_dropped++;
5159                if (vha->hw->tgt.num_qfull_cmds_dropped >
5160                        vha->qla_stats.stat_max_qfull_cmds_dropped)
5161                        vha->qla_stats.stat_max_qfull_cmds_dropped =
5162                                vha->hw->tgt.num_qfull_cmds_dropped;
5163
5164                ql_dbg(ql_dbg_io, vha, 0x3068,
5165                        "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5166                        vha->vp_idx, __func__,
5167                        vha->hw->tgt.num_qfull_cmds_dropped);
5168
5169                qlt_chk_exch_leak_thresh_hold(vha);
5170                return;
5171        }
5172
5173        sess = ha->tgt.tgt_ops->find_sess_by_s_id
5174                (vha, atio->u.isp24.fcp_hdr.s_id);
5175        if (!sess)
5176                return;
5177
5178        se_sess = sess->se_sess;
5179
5180        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
5181        if (tag < 0)
5182                return;
5183
5184        cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5185        if (!cmd) {
5186                ql_dbg(ql_dbg_io, vha, 0x3009,
5187                        "qla_target(%d): %s: Allocation of cmd failed\n",
5188                        vha->vp_idx, __func__);
5189
5190                vha->hw->tgt.num_qfull_cmds_dropped++;
5191                if (vha->hw->tgt.num_qfull_cmds_dropped >
5192                        vha->qla_stats.stat_max_qfull_cmds_dropped)
5193                        vha->qla_stats.stat_max_qfull_cmds_dropped =
5194                                vha->hw->tgt.num_qfull_cmds_dropped;
5195
5196                qlt_chk_exch_leak_thresh_hold(vha);
5197                return;
5198        }
5199
5200        memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5201
5202        qlt_incr_num_pend_cmds(vha);
5203        INIT_LIST_HEAD(&cmd->cmd_list);
5204        memcpy(&cmd->atio, atio, sizeof(*atio));
5205
5206        cmd->tgt = vha->vha_tgt.qla_tgt;
5207        cmd->vha = vha;
5208        cmd->reset_count = ha->base_qpair->chip_reset;
5209        cmd->q_full = 1;
5210        cmd->qpair = ha->base_qpair;
5211
5212        if (qfull) {
5213                cmd->q_full = 1;
5214                /* NOTE: borrowing the state field to carry the status */
5215                cmd->state = status;
5216        } else
5217                cmd->term_exchg = 1;
5218
5219        spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5220        list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5221
5222        vha->hw->tgt.num_qfull_cmds_alloc++;
5223        if (vha->hw->tgt.num_qfull_cmds_alloc >
5224                vha->qla_stats.stat_max_qfull_cmds_alloc)
5225                vha->qla_stats.stat_max_qfull_cmds_alloc =
5226                        vha->hw->tgt.num_qfull_cmds_alloc;
5227        spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5228}
5229
5230int
5231qlt_free_qfull_cmds(struct qla_qpair *qpair)
5232{
5233        struct scsi_qla_host *vha = qpair->vha;
5234        struct qla_hw_data *ha = vha->hw;
5235        unsigned long flags;
5236        struct qla_tgt_cmd *cmd, *tcmd;
5237        struct list_head free_list, q_full_list;
5238        int rc = 0;
5239
5240        if (list_empty(&ha->tgt.q_full_list))
5241                return 0;
5242
5243        INIT_LIST_HEAD(&free_list);
5244        INIT_LIST_HEAD(&q_full_list);
5245
5246        spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5247        if (list_empty(&ha->tgt.q_full_list)) {
5248                spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5249                return 0;
5250        }
5251
5252        list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5253        spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5254
5255        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5256        list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5257                if (cmd->q_full)
5258                        /* cmd->state is a borrowed field to hold status */
5259                        rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5260                else if (cmd->term_exchg)
5261                        rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5262
5263                if (rc == -ENOMEM)
5264                        break;
5265
5266                if (cmd->q_full)
5267                        ql_dbg(ql_dbg_io, vha, 0x3006,
5268                            "%s: busy sent for ox_id[%04x]\n", __func__,
5269                            be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5270                else if (cmd->term_exchg)
5271                        ql_dbg(ql_dbg_io, vha, 0x3007,
5272                            "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5273                            be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5274                else
5275                        ql_dbg(ql_dbg_io, vha, 0x3008,
5276                            "%s: Unexpected cmd in QFull list %p\n", __func__,
5277                            cmd);
5278
5279                list_del(&cmd->cmd_list);
5280                list_add_tail(&cmd->cmd_list, &free_list);
5281
5282                /* piggy back on hardware_lock for protection */
5283                vha->hw->tgt.num_qfull_cmds_alloc--;
5284        }
5285        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5286
5287        cmd = NULL;
5288
5289        list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5290                list_del(&cmd->cmd_list);
5291                /* This cmd was never sent to TCM.  There is no need
5292                 * to schedule free or call free_cmd
5293                 */
5294                qlt_free_cmd(cmd);
5295        }
5296
5297        if (!list_empty(&q_full_list)) {
5298                spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5299                list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5300                spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5301        }
5302
5303        return rc;
5304}
5305
5306static void
5307qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5308    uint16_t status)
5309{
5310        int rc = 0;
5311        struct scsi_qla_host *vha = qpair->vha;
5312
5313        rc = __qlt_send_busy(qpair, atio, status);
5314        if (rc == -ENOMEM)
5315                qlt_alloc_qfull_cmd(vha, atio, status, 1);
5316}
5317
5318static int
5319qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5320        struct atio_from_isp *atio, uint8_t ha_locked)
5321{
5322        struct qla_hw_data *ha = vha->hw;
5323        uint16_t status;
5324        unsigned long flags;
5325
5326        if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5327                return 0;
5328
5329        if (!ha_locked)
5330                spin_lock_irqsave(&ha->hardware_lock, flags);
5331        status = temp_sam_status;
5332        qlt_send_busy(qpair, atio, status);
5333        if (!ha_locked)
5334                spin_unlock_irqrestore(&ha->hardware_lock, flags);
5335
5336        return 1;
5337}
5338
5339/* ha->hardware_lock supposed to be held on entry */
5340/* called via callback from qla2xxx */
5341static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5342        struct atio_from_isp *atio, uint8_t ha_locked)
5343{
5344        struct qla_hw_data *ha = vha->hw;
5345        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5346        int rc;
5347        unsigned long flags;
5348
5349        if (unlikely(tgt == NULL)) {
5350                ql_dbg(ql_dbg_tgt, vha, 0x3064,
5351                    "ATIO pkt, but no tgt (ha %p)", ha);
5352                return;
5353        }
5354        /*
5355         * In tgt_stop mode we also should allow all requests to pass.
5356         * Otherwise, some commands can stuck.
5357         */
5358
5359        tgt->atio_irq_cmd_count++;
5360
5361        switch (atio->u.raw.entry_type) {
5362        case ATIO_TYPE7:
5363                if (unlikely(atio->u.isp24.exchange_addr ==
5364                    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5365                        ql_dbg(ql_dbg_io, vha, 0x3065,
5366                            "qla_target(%d): ATIO_TYPE7 "
5367                            "received with UNKNOWN exchange address, "
5368                            "sending QUEUE_FULL\n", vha->vp_idx);
5369                        if (!ha_locked)
5370                                spin_lock_irqsave(&ha->hardware_lock, flags);
5371                        qlt_send_busy(ha->base_qpair, atio,
5372                            SAM_STAT_TASK_SET_FULL);
5373                        if (!ha_locked)
5374                                spin_unlock_irqrestore(&ha->hardware_lock,
5375                                    flags);
5376                        break;
5377                }
5378
5379                if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5380                        rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5381                            atio, ha_locked);
5382                        if (rc != 0) {
5383                                tgt->atio_irq_cmd_count--;
5384                                return;
5385                        }
5386                        rc = qlt_handle_cmd_for_atio(vha, atio);
5387                } else {
5388                        rc = qlt_handle_task_mgmt(vha, atio);
5389                }
5390                if (unlikely(rc != 0)) {
5391                        if (rc == -ESRCH) {
5392                                if (!ha_locked)
5393                                        spin_lock_irqsave(&ha->hardware_lock,
5394                                            flags);
5395
5396#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5397                                qlt_send_busy(ha->base_qpair, atio,
5398                                    SAM_STAT_BUSY);
5399#else
5400                                qlt_send_term_exchange(ha->base_qpair, NULL,
5401                                    atio, 1, 0);
5402#endif
5403                                if (!ha_locked)
5404                                        spin_unlock_irqrestore(
5405                                            &ha->hardware_lock, flags);
5406                        } else {
5407                                if (tgt->tgt_stop) {
5408                                        ql_dbg(ql_dbg_tgt, vha, 0xe059,
5409                                            "qla_target: Unable to send "
5410                                            "command to target for req, "
5411                                            "ignoring.\n");
5412                                } else {
5413                                        ql_dbg(ql_dbg_tgt, vha, 0xe05a,
5414                                            "qla_target(%d): Unable to send "
5415                                            "command to target, sending BUSY "
5416                                            "status.\n", vha->vp_idx);
5417                                        if (!ha_locked)
5418                                                spin_lock_irqsave(
5419                                                    &ha->hardware_lock, flags);
5420                                        qlt_send_busy(ha->base_qpair,
5421                                            atio, SAM_STAT_BUSY);
5422                                        if (!ha_locked)
5423                                                spin_unlock_irqrestore(
5424                                                    &ha->hardware_lock, flags);
5425                                }
5426                        }
5427                }
5428                break;
5429
5430        case IMMED_NOTIFY_TYPE:
5431        {
5432                if (unlikely(atio->u.isp2x.entry_status != 0)) {
5433                        ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5434                            "qla_target(%d): Received ATIO packet %x "
5435                            "with error status %x\n", vha->vp_idx,
5436                            atio->u.raw.entry_type,
5437                            atio->u.isp2x.entry_status);
5438                        break;
5439                }
5440                ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5441
5442                if (!ha_locked)
5443                        spin_lock_irqsave(&ha->hardware_lock, flags);
5444                qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5445                if (!ha_locked)
5446                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
5447                break;
5448        }
5449
5450        default:
5451                ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5452                    "qla_target(%d): Received unknown ATIO atio "
5453                    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5454                break;
5455        }
5456
5457        tgt->atio_irq_cmd_count--;
5458}
5459
5460/* ha->hardware_lock supposed to be held on entry */
5461/* called via callback from qla2xxx */
5462static void qlt_response_pkt(struct scsi_qla_host *vha,
5463        struct rsp_que *rsp, response_t *pkt)
5464{
5465        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5466
5467        if (unlikely(tgt == NULL)) {
5468                ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5469                    "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5470                    vha->vp_idx, pkt->entry_type, vha->hw);
5471                return;
5472        }
5473
5474        /*
5475         * In tgt_stop mode we also should allow all requests to pass.
5476         * Otherwise, some commands can stuck.
5477         */
5478
5479        switch (pkt->entry_type) {
5480        case CTIO_CRC2:
5481        case CTIO_TYPE7:
5482        {
5483                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5484                qlt_do_ctio_completion(vha, rsp, entry->handle,
5485                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5486                    entry);
5487                break;
5488        }
5489
5490        case ACCEPT_TGT_IO_TYPE:
5491        {
5492                struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5493                int rc;
5494                if (atio->u.isp2x.status !=
5495                    cpu_to_le16(ATIO_CDB_VALID)) {
5496                        ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5497                            "qla_target(%d): ATIO with error "
5498                            "status %x received\n", vha->vp_idx,
5499                            le16_to_cpu(atio->u.isp2x.status));
5500                        break;
5501                }
5502
5503                rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5504                if (rc != 0)
5505                        return;
5506
5507                rc = qlt_handle_cmd_for_atio(vha, atio);
5508                if (unlikely(rc != 0)) {
5509                        if (rc == -ESRCH) {
5510#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5511                                qlt_send_busy(rsp->qpair, atio, 0);
5512#else
5513                                qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
5514#endif
5515                        } else {
5516                                if (tgt->tgt_stop) {
5517                                        ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5518                                            "qla_target: Unable to send "
5519                                            "command to target, sending TERM "
5520                                            "EXCHANGE for rsp\n");
5521                                        qlt_send_term_exchange(rsp->qpair, NULL,
5522                                            atio, 1, 0);
5523                                } else {
5524                                        ql_dbg(ql_dbg_tgt, vha, 0xe060,
5525                                            "qla_target(%d): Unable to send "
5526                                            "command to target, sending BUSY "
5527                                            "status\n", vha->vp_idx);
5528                                        qlt_send_busy(rsp->qpair, atio, 0);
5529                                }
5530                        }
5531                }
5532        }
5533        break;
5534
5535        case CONTINUE_TGT_IO_TYPE:
5536        {
5537                struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5538                qlt_do_ctio_completion(vha, rsp, entry->handle,
5539                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5540                    entry);
5541                break;
5542        }
5543
5544        case CTIO_A64_TYPE:
5545        {
5546                struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5547                qlt_do_ctio_completion(vha, rsp, entry->handle,
5548                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5549                    entry);
5550                break;
5551        }
5552
5553        case IMMED_NOTIFY_TYPE:
5554                ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5555                qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5556                break;
5557
5558        case NOTIFY_ACK_TYPE:
5559                if (tgt->notify_ack_expected > 0) {
5560                        struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5561                        ql_dbg(ql_dbg_tgt, vha, 0xe036,
5562                            "NOTIFY_ACK seq %08x status %x\n",
5563                            le16_to_cpu(entry->u.isp2x.seq_id),
5564                            le16_to_cpu(entry->u.isp2x.status));
5565                        tgt->notify_ack_expected--;
5566                        if (entry->u.isp2x.status !=
5567                            cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5568                                ql_dbg(ql_dbg_tgt, vha, 0xe061,
5569                                    "qla_target(%d): NOTIFY_ACK "
5570                                    "failed %x\n", vha->vp_idx,
5571                                    le16_to_cpu(entry->u.isp2x.status));
5572                        }
5573                } else {
5574                        ql_dbg(ql_dbg_tgt, vha, 0xe062,
5575                            "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5576                            vha->vp_idx);
5577                }
5578                break;
5579
5580        case ABTS_RECV_24XX:
5581                ql_dbg(ql_dbg_tgt, vha, 0xe037,
5582                    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5583                qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5584                break;
5585
5586        case ABTS_RESP_24XX:
5587                if (tgt->abts_resp_expected > 0) {
5588                        struct abts_resp_from_24xx_fw *entry =
5589                                (struct abts_resp_from_24xx_fw *)pkt;
5590                        ql_dbg(ql_dbg_tgt, vha, 0xe038,
5591                            "ABTS_RESP_24XX: compl_status %x\n",
5592                            entry->compl_status);
5593                        tgt->abts_resp_expected--;
5594                        if (le16_to_cpu(entry->compl_status) !=
5595                            ABTS_RESP_COMPL_SUCCESS) {
5596                                if ((entry->error_subcode1 == 0x1E) &&
5597                                    (entry->error_subcode2 == 0)) {
5598                                        /*
5599                                         * We've got a race here: aborted
5600                                         * exchange not terminated, i.e.
5601                                         * response for the aborted command was
5602                                         * sent between the abort request was
5603                                         * received and processed.
5604                                         * Unfortunately, the firmware has a
5605                                         * silly requirement that all aborted
5606                                         * exchanges must be explicitely
5607                                         * terminated, otherwise it refuses to
5608                                         * send responses for the abort
5609                                         * requests. So, we have to
5610                                         * (re)terminate the exchange and retry
5611                                         * the abort response.
5612                                         */
5613                                        qlt_24xx_retry_term_exchange(vha,
5614                                            entry);
5615                                } else
5616                                        ql_dbg(ql_dbg_tgt, vha, 0xe063,
5617                                            "qla_target(%d): ABTS_RESP_24XX "
5618                                            "failed %x (subcode %x:%x)",
5619                                            vha->vp_idx, entry->compl_status,
5620                                            entry->error_subcode1,
5621                                            entry->error_subcode2);
5622                        }
5623                } else {
5624                        ql_dbg(ql_dbg_tgt, vha, 0xe064,
5625                            "qla_target(%d): Unexpected ABTS_RESP_24XX "
5626                            "received\n", vha->vp_idx);
5627                }
5628                break;
5629
5630        default:
5631                ql_dbg(ql_dbg_tgt, vha, 0xe065,
5632                    "qla_target(%d): Received unknown response pkt "
5633                    "type %x\n", vha->vp_idx, pkt->entry_type);
5634                break;
5635        }
5636
5637}
5638
5639/*
5640 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5641 */
5642void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5643        uint16_t *mailbox)
5644{
5645        struct qla_hw_data *ha = vha->hw;
5646        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5647        int login_code;
5648
5649        if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5650                return;
5651
5652        if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5653            IS_QLA2100(ha))
5654                return;
5655        /*
5656         * In tgt_stop mode we also should allow all requests to pass.
5657         * Otherwise, some commands can stuck.
5658         */
5659
5660
5661        switch (code) {
5662        case MBA_RESET:                 /* Reset */
5663        case MBA_SYSTEM_ERR:            /* System Error */
5664        case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
5665        case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
5666                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5667                    "qla_target(%d): System error async event %#x "
5668                    "occurred", vha->vp_idx, code);
5669                break;
5670        case MBA_WAKEUP_THRES:          /* Request Queue Wake-up. */
5671                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5672                break;
5673
5674        case MBA_LOOP_UP:
5675        {
5676                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5677                    "qla_target(%d): Async LOOP_UP occurred "
5678                    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5679                    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5680                    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5681                if (tgt->link_reinit_iocb_pending) {
5682                        qlt_send_notify_ack(ha->base_qpair,
5683                            (void *)&tgt->link_reinit_iocb,
5684                            0, 0, 0, 0, 0, 0);
5685                        tgt->link_reinit_iocb_pending = 0;
5686                }
5687                break;
5688        }
5689
5690        case MBA_LIP_OCCURRED:
5691        case MBA_LOOP_DOWN:
5692        case MBA_LIP_RESET:
5693        case MBA_RSCN_UPDATE:
5694                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5695                    "qla_target(%d): Async event %#x occurred "
5696                    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5697                    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5698                    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5699                break;
5700
5701        case MBA_REJECTED_FCP_CMD:
5702                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
5703                    "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5704                    vha->vp_idx,
5705                    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5706                    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5707
5708                if (le16_to_cpu(mailbox[3]) == 1) {
5709                        /* exchange starvation. */
5710                        vha->hw->exch_starvation++;
5711                        if (vha->hw->exch_starvation > 5) {
5712                                ql_log(ql_log_warn, vha, 0xd03a,
5713                                    "Exchange starvation-. Resetting RISC\n");
5714
5715                                vha->hw->exch_starvation = 0;
5716                                if (IS_P3P_TYPE(vha->hw))
5717                                        set_bit(FCOE_CTX_RESET_NEEDED,
5718                                            &vha->dpc_flags);
5719                                else
5720                                        set_bit(ISP_ABORT_NEEDED,
5721                                            &vha->dpc_flags);
5722                                qla2xxx_wake_dpc(vha);
5723                        }
5724                }
5725                break;
5726
5727        case MBA_PORT_UPDATE:
5728                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5729                    "qla_target(%d): Port update async event %#x "
5730                    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5731                    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5732                    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5733                    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5734
5735                login_code = le16_to_cpu(mailbox[2]);
5736                if (login_code == 0x4) {
5737                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5738                            "Async MB 2: Got PLOGI Complete\n");
5739                        vha->hw->exch_starvation = 0;
5740                } else if (login_code == 0x7)
5741                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5742                            "Async MB 2: Port Logged Out\n");
5743                break;
5744        default:
5745                break;
5746        }
5747
5748}
5749
5750static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5751        uint16_t loop_id)
5752{
5753        fc_port_t *fcport, *tfcp, *del;
5754        int rc;
5755        unsigned long flags;
5756        u8 newfcport = 0;
5757
5758        fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
5759        if (!fcport) {
5760                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5761                    "qla_target(%d): Allocation of tmp FC port failed",
5762                    vha->vp_idx);
5763                return NULL;
5764        }
5765
5766        fcport->loop_id = loop_id;
5767
5768        rc = qla24xx_gpdb_wait(vha, fcport, 0);
5769        if (rc != QLA_SUCCESS) {
5770                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5771                    "qla_target(%d): Failed to retrieve fcport "
5772                    "information -- get_port_database() returned %x "
5773                    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
5774                kfree(fcport);
5775                return NULL;
5776        }
5777
5778        del = NULL;
5779        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5780        tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
5781
5782        if (tfcp) {
5783                tfcp->d_id = fcport->d_id;
5784                tfcp->port_type = fcport->port_type;
5785                tfcp->supported_classes = fcport->supported_classes;
5786                tfcp->flags |= fcport->flags;
5787
5788                del = fcport;
5789                fcport = tfcp;
5790        } else {
5791                if (vha->hw->current_topology == ISP_CFG_F)
5792                        fcport->flags |= FCF_FABRIC_DEVICE;
5793
5794                list_add_tail(&fcport->list, &vha->vp_fcports);
5795                if (!IS_SW_RESV_ADDR(fcport->d_id))
5796                   vha->fcport_count++;
5797                fcport->login_gen++;
5798                fcport->disc_state = DSC_LOGIN_COMPLETE;
5799                fcport->login_succ = 1;
5800                newfcport = 1;
5801        }
5802
5803        fcport->deleted = 0;
5804        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5805
5806        switch (vha->host->active_mode) {
5807        case MODE_INITIATOR:
5808        case MODE_DUAL:
5809                if (newfcport) {
5810                        if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
5811                                ql_dbg(ql_dbg_disc, vha, 0x20fe,
5812                                   "%s %d %8phC post upd_fcport fcp_cnt %d\n",
5813                                   __func__, __LINE__, fcport->port_name, vha->fcport_count);
5814                                qla24xx_post_upd_fcport_work(vha, fcport);
5815                        } else {
5816                                ql_dbg(ql_dbg_disc, vha, 0x20ff,
5817                                   "%s %d %8phC post gpsc fcp_cnt %d\n",
5818                                   __func__, __LINE__, fcport->port_name, vha->fcport_count);
5819                                qla24xx_post_gpsc_work(vha, fcport);
5820                        }
5821                }
5822                break;
5823
5824        case MODE_TARGET:
5825        default:
5826                break;
5827        }
5828        if (del)
5829                qla2x00_free_fcport(del);
5830
5831        return fcport;
5832}
5833
5834/* Must be called under tgt_mutex */
5835static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
5836        uint8_t *s_id)
5837{
5838        struct fc_port *sess = NULL;
5839        fc_port_t *fcport = NULL;
5840        int rc, global_resets;
5841        uint16_t loop_id = 0;
5842
5843        if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
5844                /*
5845                 * This is Domain Controller, so it should be
5846                 * OK to drop SCSI commands from it.
5847                 */
5848                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
5849                    "Unable to find initiator with S_ID %x:%x:%x",
5850                    s_id[0], s_id[1], s_id[2]);
5851                return NULL;
5852        }
5853
5854        mutex_lock(&vha->vha_tgt.tgt_mutex);
5855
5856retry:
5857        global_resets =
5858            atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5859
5860        rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
5861        if (rc != 0) {
5862                mutex_unlock(&vha->vha_tgt.tgt_mutex);
5863
5864                ql_log(ql_log_info, vha, 0xf071,
5865                    "qla_target(%d): Unable to find "
5866                    "initiator with S_ID %x:%x:%x",
5867                    vha->vp_idx, s_id[0], s_id[1],
5868                    s_id[2]);
5869
5870                if (rc == -ENOENT) {
5871                        qlt_port_logo_t logo;
5872                        sid_to_portid(s_id, &logo.id);
5873                        logo.cmd_count = 1;
5874                        qlt_send_first_logo(vha, &logo);
5875                }
5876
5877                return NULL;
5878        }
5879
5880        fcport = qlt_get_port_database(vha, loop_id);
5881        if (!fcport) {
5882                mutex_unlock(&vha->vha_tgt.tgt_mutex);
5883                return NULL;
5884        }
5885
5886        if (global_resets !=
5887            atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5888                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
5889                    "qla_target(%d): global reset during session discovery "
5890                    "(counter was %d, new %d), retrying", vha->vp_idx,
5891                    global_resets,
5892                    atomic_read(&vha->vha_tgt.
5893                        qla_tgt->tgt_global_resets_count));
5894                goto retry;
5895        }
5896
5897        sess = qlt_create_sess(vha, fcport, true);
5898
5899        mutex_unlock(&vha->vha_tgt.tgt_mutex);
5900
5901        return sess;
5902}
5903
5904static void qlt_abort_work(struct qla_tgt *tgt,
5905        struct qla_tgt_sess_work_param *prm)
5906{
5907        struct scsi_qla_host *vha = tgt->vha;
5908        struct qla_hw_data *ha = vha->hw;
5909        struct fc_port *sess = NULL;
5910        unsigned long flags = 0, flags2 = 0;
5911        uint32_t be_s_id;
5912        uint8_t s_id[3];
5913        int rc;
5914
5915        spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5916
5917        if (tgt->tgt_stop)
5918                goto out_term2;
5919
5920        s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5921        s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5922        s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5923
5924        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5925            (unsigned char *)&be_s_id);
5926        if (!sess) {
5927                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5928
5929                sess = qlt_make_local_sess(vha, s_id);
5930                /* sess has got an extra creation ref */
5931
5932                spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5933                if (!sess)
5934                        goto out_term2;
5935        } else {
5936                if (sess->deleted) {
5937                        sess = NULL;
5938                        goto out_term2;
5939                }
5940
5941                if (!kref_get_unless_zero(&sess->sess_kref)) {
5942                        ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
5943                            "%s: kref_get fail %8phC \n",
5944                             __func__, sess->port_name);
5945                        sess = NULL;
5946                        goto out_term2;
5947                }
5948        }
5949
5950        rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5951        ha->tgt.tgt_ops->put_sess(sess);
5952        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5953
5954        if (rc != 0)
5955                goto out_term;
5956        return;
5957
5958out_term2:
5959        if (sess)
5960                ha->tgt.tgt_ops->put_sess(sess);
5961        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5962
5963out_term:
5964        spin_lock_irqsave(&ha->hardware_lock, flags);
5965        qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
5966            FCP_TMF_REJECTED, false);
5967        spin_unlock_irqrestore(&ha->hardware_lock, flags);
5968}
5969
5970static void qlt_tmr_work(struct qla_tgt *tgt,
5971        struct qla_tgt_sess_work_param *prm)
5972{
5973        struct atio_from_isp *a = &prm->tm_iocb2;
5974        struct scsi_qla_host *vha = tgt->vha;
5975        struct qla_hw_data *ha = vha->hw;
5976        struct fc_port *sess = NULL;
5977        unsigned long flags;
5978        uint8_t *s_id = NULL; /* to hide compiler warnings */
5979        int rc;
5980        u64 unpacked_lun;
5981        int fn;
5982        void *iocb;
5983
5984        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5985
5986        if (tgt->tgt_stop)
5987                goto out_term2;
5988
5989        s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5990        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5991        if (!sess) {
5992                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5993
5994                sess = qlt_make_local_sess(vha, s_id);
5995                /* sess has got an extra creation ref */
5996
5997                spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5998                if (!sess)
5999                        goto out_term2;
6000        } else {
6001                if (sess->deleted) {
6002                        sess = NULL;
6003                        goto out_term2;
6004                }
6005
6006                if (!kref_get_unless_zero(&sess->sess_kref)) {
6007                        ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6008                            "%s: kref_get fail %8phC\n",
6009                             __func__, sess->port_name);
6010                        sess = NULL;
6011                        goto out_term2;
6012                }
6013        }
6014
6015        iocb = a;
6016        fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6017        unpacked_lun =
6018            scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6019
6020        rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6021        ha->tgt.tgt_ops->put_sess(sess);
6022        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6023
6024        if (rc != 0)
6025                goto out_term;
6026        return;
6027
6028out_term2:
6029        if (sess)
6030                ha->tgt.tgt_ops->put_sess(sess);
6031        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6032out_term:
6033        qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6034}
6035
6036static void qlt_sess_work_fn(struct work_struct *work)
6037{
6038        struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6039        struct scsi_qla_host *vha = tgt->vha;
6040        unsigned long flags;
6041
6042        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6043
6044        spin_lock_irqsave(&tgt->sess_work_lock, flags);
6045        while (!list_empty(&tgt->sess_works_list)) {
6046                struct qla_tgt_sess_work_param *prm = list_entry(
6047                    tgt->sess_works_list.next, typeof(*prm),
6048                    sess_works_list_entry);
6049
6050                /*
6051                 * This work can be scheduled on several CPUs at time, so we
6052                 * must delete the entry to eliminate double processing
6053                 */
6054                list_del(&prm->sess_works_list_entry);
6055
6056                spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6057
6058                switch (prm->type) {
6059                case QLA_TGT_SESS_WORK_ABORT:
6060                        qlt_abort_work(tgt, prm);
6061                        break;
6062                case QLA_TGT_SESS_WORK_TM:
6063                        qlt_tmr_work(tgt, prm);
6064                        break;
6065                default:
6066                        BUG_ON(1);
6067                        break;
6068                }
6069
6070                spin_lock_irqsave(&tgt->sess_work_lock, flags);
6071
6072                kfree(prm);
6073        }
6074        spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6075}
6076
6077/* Must be called under tgt_host_action_mutex */
6078int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6079{
6080        struct qla_tgt *tgt;
6081        int rc, i;
6082        struct qla_qpair_hint *h;
6083
6084        if (!QLA_TGT_MODE_ENABLED())
6085                return 0;
6086
6087        if (!IS_TGT_MODE_CAPABLE(ha)) {
6088                ql_log(ql_log_warn, base_vha, 0xe070,
6089                    "This adapter does not support target mode.\n");
6090                return 0;
6091        }
6092
6093        ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6094            "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6095
6096        BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6097
6098        tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6099        if (!tgt) {
6100                ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6101                    "Unable to allocate struct qla_tgt\n");
6102                return -ENOMEM;
6103        }
6104
6105        tgt->qphints = kzalloc((ha->max_qpairs + 1) *
6106            sizeof(struct qla_qpair_hint), GFP_KERNEL);
6107        if (!tgt->qphints) {
6108                kfree(tgt);
6109                ql_log(ql_log_warn, base_vha, 0x0197,
6110                    "Unable to allocate qpair hints.\n");
6111                return -ENOMEM;
6112        }
6113
6114        if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6115                base_vha->host->hostt->supported_mode |= MODE_TARGET;
6116
6117        rc = btree_init64(&tgt->lun_qpair_map);
6118        if (rc) {
6119                kfree(tgt->qphints);
6120                kfree(tgt);
6121                ql_log(ql_log_info, base_vha, 0x0198,
6122                        "Unable to initialize lun_qpair_map btree\n");
6123                return -EIO;
6124        }
6125        h = &tgt->qphints[0];
6126        h->qpair = ha->base_qpair;
6127        INIT_LIST_HEAD(&h->hint_elem);
6128        h->cpuid = ha->base_qpair->cpuid;
6129        list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6130
6131        for (i = 0; i < ha->max_qpairs; i++) {
6132                unsigned long flags;
6133
6134                struct qla_qpair *qpair = ha->queue_pair_map[i];
6135                h = &tgt->qphints[i + 1];
6136                INIT_LIST_HEAD(&h->hint_elem);
6137                if (qpair) {
6138                        h->qpair = qpair;
6139                        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6140                        list_add_tail(&h->hint_elem, &qpair->hints_list);
6141                        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6142                        h->cpuid = qpair->cpuid;
6143                }
6144        }
6145
6146        tgt->ha = ha;
6147        tgt->vha = base_vha;
6148        init_waitqueue_head(&tgt->waitQ);
6149        INIT_LIST_HEAD(&tgt->del_sess_list);
6150        spin_lock_init(&tgt->sess_work_lock);
6151        INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6152        INIT_LIST_HEAD(&tgt->sess_works_list);
6153        atomic_set(&tgt->tgt_global_resets_count, 0);
6154
6155        base_vha->vha_tgt.qla_tgt = tgt;
6156
6157        ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6158                "qla_target(%d): using 64 Bit PCI addressing",
6159                base_vha->vp_idx);
6160        /* 3 is reserved */
6161        tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6162
6163        mutex_lock(&qla_tgt_mutex);
6164        list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6165        mutex_unlock(&qla_tgt_mutex);
6166
6167        if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6168                ha->tgt.tgt_ops->add_target(base_vha);
6169
6170        return 0;
6171}
6172
6173/* Must be called under tgt_host_action_mutex */
6174int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6175{
6176        if (!vha->vha_tgt.qla_tgt)
6177                return 0;
6178
6179        if (vha->fc_vport) {
6180                qlt_release(vha->vha_tgt.qla_tgt);
6181                return 0;
6182        }
6183
6184        /* free left over qfull cmds */
6185        qlt_init_term_exchange(vha);
6186
6187        ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6188            vha->host_no, ha);
6189        qlt_release(vha->vha_tgt.qla_tgt);
6190
6191        return 0;
6192}
6193
6194void qlt_remove_target_resources(struct qla_hw_data *ha)
6195{
6196        struct scsi_qla_host *node;
6197        u32 key = 0;
6198
6199        btree_for_each_safe32(&ha->tgt.host_map, key, node)
6200                btree_remove32(&ha->tgt.host_map, key);
6201
6202        btree_destroy32(&ha->tgt.host_map);
6203}
6204
6205static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6206        unsigned char *b)
6207{
6208        int i;
6209
6210        pr_debug("qla2xxx HW vha->node_name: ");
6211        for (i = 0; i < WWN_SIZE; i++)
6212                pr_debug("%02x ", vha->node_name[i]);
6213        pr_debug("\n");
6214        pr_debug("qla2xxx HW vha->port_name: ");
6215        for (i = 0; i < WWN_SIZE; i++)
6216                pr_debug("%02x ", vha->port_name[i]);
6217        pr_debug("\n");
6218
6219        pr_debug("qla2xxx passed configfs WWPN: ");
6220        put_unaligned_be64(wwpn, b);
6221        for (i = 0; i < WWN_SIZE; i++)
6222                pr_debug("%02x ", b[i]);
6223        pr_debug("\n");
6224}
6225
6226/**
6227 * qla_tgt_lport_register - register lport with external module
6228 *
6229 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
6230 * @wwpn: Passwd FC target WWPN
6231 * @callback:  lport initialization callback for tcm_qla2xxx code
6232 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6233 */
6234int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6235                       u64 npiv_wwpn, u64 npiv_wwnn,
6236                       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6237{
6238        struct qla_tgt *tgt;
6239        struct scsi_qla_host *vha;
6240        struct qla_hw_data *ha;
6241        struct Scsi_Host *host;
6242        unsigned long flags;
6243        int rc;
6244        u8 b[WWN_SIZE];
6245
6246        mutex_lock(&qla_tgt_mutex);
6247        list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6248                vha = tgt->vha;
6249                ha = vha->hw;
6250
6251                host = vha->host;
6252                if (!host)
6253                        continue;
6254
6255                if (!(host->hostt->supported_mode & MODE_TARGET))
6256                        continue;
6257
6258                spin_lock_irqsave(&ha->hardware_lock, flags);
6259                if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6260                        pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6261                            host->host_no);
6262                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
6263                        continue;
6264                }
6265                if (tgt->tgt_stop) {
6266                        pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6267                                 host->host_no);
6268                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
6269                        continue;
6270                }
6271                spin_unlock_irqrestore(&ha->hardware_lock, flags);
6272
6273                if (!scsi_host_get(host)) {
6274                        ql_dbg(ql_dbg_tgt, vha, 0xe068,
6275                            "Unable to scsi_host_get() for"
6276                            " qla2xxx scsi_host\n");
6277                        continue;
6278                }
6279                qlt_lport_dump(vha, phys_wwpn, b);
6280
6281                if (memcmp(vha->port_name, b, WWN_SIZE)) {
6282                        scsi_host_put(host);
6283                        continue;
6284                }
6285                rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6286                if (rc != 0)
6287                        scsi_host_put(host);
6288
6289                mutex_unlock(&qla_tgt_mutex);
6290                return rc;
6291        }
6292        mutex_unlock(&qla_tgt_mutex);
6293
6294        return -ENODEV;
6295}
6296EXPORT_SYMBOL(qlt_lport_register);
6297
6298/**
6299 * qla_tgt_lport_deregister - Degister lport
6300 *
6301 * @vha:  Registered scsi_qla_host pointer
6302 */
6303void qlt_lport_deregister(struct scsi_qla_host *vha)
6304{
6305        struct qla_hw_data *ha = vha->hw;
6306        struct Scsi_Host *sh = vha->host;
6307        /*
6308         * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6309         */
6310        vha->vha_tgt.target_lport_ptr = NULL;
6311        ha->tgt.tgt_ops = NULL;
6312        /*
6313         * Release the Scsi_Host reference for the underlying qla2xxx host
6314         */
6315        scsi_host_put(sh);
6316}
6317EXPORT_SYMBOL(qlt_lport_deregister);
6318
6319/* Must be called under HW lock */
6320static void qlt_set_mode(struct scsi_qla_host *vha)
6321{
6322        switch (ql2x_ini_mode) {
6323        case QLA2XXX_INI_MODE_DISABLED:
6324        case QLA2XXX_INI_MODE_EXCLUSIVE:
6325                vha->host->active_mode = MODE_TARGET;
6326                break;
6327        case QLA2XXX_INI_MODE_ENABLED:
6328                vha->host->active_mode = MODE_UNKNOWN;
6329                break;
6330        case QLA2XXX_INI_MODE_DUAL:
6331                vha->host->active_mode = MODE_DUAL;
6332                break;
6333        default:
6334                break;
6335        }
6336}
6337
6338/* Must be called under HW lock */
6339static void qlt_clear_mode(struct scsi_qla_host *vha)
6340{
6341        switch (ql2x_ini_mode) {
6342        case QLA2XXX_INI_MODE_DISABLED:
6343                vha->host->active_mode = MODE_UNKNOWN;
6344                break;
6345        case QLA2XXX_INI_MODE_EXCLUSIVE:
6346                vha->host->active_mode = MODE_INITIATOR;
6347                break;
6348        case QLA2XXX_INI_MODE_ENABLED:
6349        case QLA2XXX_INI_MODE_DUAL:
6350                vha->host->active_mode = MODE_INITIATOR;
6351                break;
6352        default:
6353                break;
6354        }
6355}
6356
6357/*
6358 * qla_tgt_enable_vha - NO LOCK HELD
6359 *
6360 * host_reset, bring up w/ Target Mode Enabled
6361 */
6362void
6363qlt_enable_vha(struct scsi_qla_host *vha)
6364{
6365        struct qla_hw_data *ha = vha->hw;
6366        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6367        unsigned long flags;
6368        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6369
6370        if (!tgt) {
6371                ql_dbg(ql_dbg_tgt, vha, 0xe069,
6372                    "Unable to locate qla_tgt pointer from"
6373                    " struct qla_hw_data\n");
6374                dump_stack();
6375                return;
6376        }
6377
6378        spin_lock_irqsave(&ha->hardware_lock, flags);
6379        tgt->tgt_stopped = 0;
6380        qlt_set_mode(vha);
6381        spin_unlock_irqrestore(&ha->hardware_lock, flags);
6382
6383        if (vha->vp_idx) {
6384                qla24xx_disable_vp(vha);
6385                qla24xx_enable_vp(vha);
6386        } else {
6387                set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6388                qla2xxx_wake_dpc(base_vha);
6389                qla2x00_wait_for_hba_online(base_vha);
6390        }
6391}
6392EXPORT_SYMBOL(qlt_enable_vha);
6393
6394/*
6395 * qla_tgt_disable_vha - NO LOCK HELD
6396 *
6397 * Disable Target Mode and reset the adapter
6398 */
6399static void qlt_disable_vha(struct scsi_qla_host *vha)
6400{
6401        struct qla_hw_data *ha = vha->hw;
6402        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6403        unsigned long flags;
6404
6405        if (!tgt) {
6406                ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6407                    "Unable to locate qla_tgt pointer from"
6408                    " struct qla_hw_data\n");
6409                dump_stack();
6410                return;
6411        }
6412
6413        spin_lock_irqsave(&ha->hardware_lock, flags);
6414        qlt_clear_mode(vha);
6415        spin_unlock_irqrestore(&ha->hardware_lock, flags);
6416
6417        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6418        qla2xxx_wake_dpc(vha);
6419        qla2x00_wait_for_hba_online(vha);
6420}
6421
6422/*
6423 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6424 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6425 * members.
6426 */
6427void
6428qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6429{
6430        vha->vha_tgt.qla_tgt = NULL;
6431
6432        mutex_init(&vha->vha_tgt.tgt_mutex);
6433        mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6434
6435        qlt_clear_mode(vha);
6436
6437        /*
6438         * NOTE: Currently the value is kept the same for <24xx and
6439         * >=24xx ISPs. If it is necessary to change it,
6440         * the check should be added for specific ISPs,
6441         * assigning the value appropriately.
6442         */
6443        ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6444
6445        qlt_add_target(ha, vha);
6446}
6447
6448void
6449qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
6450{
6451        /*
6452         * FC-4 Feature bit 0 indicates target functionality to the name server.
6453         */
6454        if (qla_tgt_mode_enabled(vha)) {
6455                ct_req->req.rff_id.fc4_feature = BIT_0;
6456        } else if (qla_ini_mode_enabled(vha)) {
6457                ct_req->req.rff_id.fc4_feature = BIT_1;
6458        } else if (qla_dual_mode_enabled(vha))
6459                ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6460}
6461
6462/*
6463 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6464 * @ha: HA context
6465 *
6466 * Beginning of ATIO ring has initialization control block already built
6467 * by nvram config routine.
6468 *
6469 * Returns 0 on success.
6470 */
6471void
6472qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6473{
6474        struct qla_hw_data *ha = vha->hw;
6475        uint16_t cnt;
6476        struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6477
6478        if (qla_ini_mode_enabled(vha))
6479                return;
6480
6481        for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6482                pkt->u.raw.signature = ATIO_PROCESSED;
6483                pkt++;
6484        }
6485
6486}
6487
6488/*
6489 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6490 * @ha: SCSI driver HA context
6491 */
6492void
6493qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6494{
6495        struct qla_hw_data *ha = vha->hw;
6496        struct atio_from_isp *pkt;
6497        int cnt, i;
6498
6499        if (!ha->flags.fw_started)
6500                return;
6501
6502        while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6503            fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6504                pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6505                cnt = pkt->u.raw.entry_count;
6506
6507                if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6508                        /*
6509                         * This packet is corrupted. The header + payload
6510                         * can not be trusted. There is no point in passing
6511                         * it further up.
6512                         */
6513                        ql_log(ql_log_warn, vha, 0xd03c,
6514                            "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6515                            pkt->u.isp24.fcp_hdr.s_id,
6516                            be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6517                            le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6518
6519                        adjust_corrupted_atio(pkt);
6520                        qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6521                            ha_locked, 0);
6522                } else {
6523                        qlt_24xx_atio_pkt_all_vps(vha,
6524                            (struct atio_from_isp *)pkt, ha_locked);
6525                }
6526
6527                for (i = 0; i < cnt; i++) {
6528                        ha->tgt.atio_ring_index++;
6529                        if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6530                                ha->tgt.atio_ring_index = 0;
6531                                ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6532                        } else
6533                                ha->tgt.atio_ring_ptr++;
6534
6535                        pkt->u.raw.signature = ATIO_PROCESSED;
6536                        pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6537                }
6538                wmb();
6539        }
6540
6541        /* Adjust ring index */
6542        WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6543}
6544
6545void
6546qlt_24xx_config_rings(struct scsi_qla_host *vha)
6547{
6548        struct qla_hw_data *ha = vha->hw;
6549        if (!QLA_TGT_MODE_ENABLED())
6550                return;
6551
6552        WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6553        WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6554        RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6555
6556        if (IS_ATIO_MSIX_CAPABLE(ha)) {
6557                struct qla_msix_entry *msix = &ha->msix_entries[2];
6558                struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6559
6560                icb->msix_atio = cpu_to_le16(msix->entry);
6561                ql_dbg(ql_dbg_init, vha, 0xf072,
6562                    "Registering ICB vector 0x%x for atio que.\n",
6563                    msix->entry);
6564        }
6565}
6566
6567void
6568qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6569{
6570        struct qla_hw_data *ha = vha->hw;
6571
6572        if (!QLA_TGT_MODE_ENABLED())
6573                return;
6574
6575        if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6576                if (!ha->tgt.saved_set) {
6577                        /* We save only once */
6578                        ha->tgt.saved_exchange_count = nv->exchange_count;
6579                        ha->tgt.saved_firmware_options_1 =
6580                            nv->firmware_options_1;
6581                        ha->tgt.saved_firmware_options_2 =
6582                            nv->firmware_options_2;
6583                        ha->tgt.saved_firmware_options_3 =
6584                            nv->firmware_options_3;
6585                        ha->tgt.saved_set = 1;
6586                }
6587
6588                if (qla_tgt_mode_enabled(vha))
6589                        nv->exchange_count = cpu_to_le16(0xFFFF);
6590                else                    /* dual */
6591                        nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6592
6593                /* Enable target mode */
6594                nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6595
6596                /* Disable ini mode, if requested */
6597                if (qla_tgt_mode_enabled(vha))
6598                        nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6599
6600                /* Disable Full Login after LIP */
6601                nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6602                /* Enable initial LIP */
6603                nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6604                if (ql2xtgt_tape_enable)
6605                        /* Enable FC Tape support */
6606                        nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6607                else
6608                        /* Disable FC Tape support */
6609                        nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6610
6611                /* Disable Full Login after LIP */
6612                nv->host_p &= cpu_to_le32(~BIT_10);
6613
6614                /*
6615                 * clear BIT 15 explicitly as we have seen at least
6616                 * a couple of instances where this was set and this
6617                 * was causing the firmware to not be initialized.
6618                 */
6619                nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6620                /* Enable target PRLI control */
6621                nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6622        } else {
6623                if (ha->tgt.saved_set) {
6624                        nv->exchange_count = ha->tgt.saved_exchange_count;
6625                        nv->firmware_options_1 =
6626                            ha->tgt.saved_firmware_options_1;
6627                        nv->firmware_options_2 =
6628                            ha->tgt.saved_firmware_options_2;
6629                        nv->firmware_options_3 =
6630                            ha->tgt.saved_firmware_options_3;
6631                }
6632                return;
6633        }
6634
6635        if (ha->base_qpair->enable_class_2) {
6636                if (vha->flags.init_done)
6637                        fc_host_supported_classes(vha->host) =
6638                                FC_COS_CLASS2 | FC_COS_CLASS3;
6639
6640                nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6641        } else {
6642                if (vha->flags.init_done)
6643                        fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6644
6645                nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6646        }
6647}
6648
6649void
6650qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6651        struct init_cb_24xx *icb)
6652{
6653        struct qla_hw_data *ha = vha->hw;
6654
6655        if (!QLA_TGT_MODE_ENABLED())
6656                return;
6657
6658        if (ha->tgt.node_name_set) {
6659                memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6660                icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6661        }
6662
6663        /* disable ZIO at start time. */
6664        if (!vha->flags.init_done) {
6665                uint32_t tmp;
6666                tmp = le32_to_cpu(icb->firmware_options_2);
6667                tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6668                icb->firmware_options_2 = cpu_to_le32(tmp);
6669        }
6670}
6671
6672void
6673qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6674{
6675        struct qla_hw_data *ha = vha->hw;
6676
6677        if (!QLA_TGT_MODE_ENABLED())
6678                return;
6679
6680        if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6681                if (!ha->tgt.saved_set) {
6682                        /* We save only once */
6683                        ha->tgt.saved_exchange_count = nv->exchange_count;
6684                        ha->tgt.saved_firmware_options_1 =
6685                            nv->firmware_options_1;
6686                        ha->tgt.saved_firmware_options_2 =
6687                            nv->firmware_options_2;
6688                        ha->tgt.saved_firmware_options_3 =
6689                            nv->firmware_options_3;
6690                        ha->tgt.saved_set = 1;
6691                }
6692
6693                if (qla_tgt_mode_enabled(vha))
6694                        nv->exchange_count = cpu_to_le16(0xFFFF);
6695                else                    /* dual */
6696                        nv->exchange_count = cpu_to_le16(ql2xexchoffld);
6697
6698                /* Enable target mode */
6699                nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6700
6701                /* Disable ini mode, if requested */
6702                if (qla_tgt_mode_enabled(vha))
6703                        nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6704                /* Disable Full Login after LIP */
6705                nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6706                /* Enable initial LIP */
6707                nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6708                /*
6709                 * clear BIT 15 explicitly as we have seen at
6710                 * least a couple of instances where this was set
6711                 * and this was causing the firmware to not be
6712                 * initialized.
6713                 */
6714                nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6715                if (ql2xtgt_tape_enable)
6716                        /* Enable FC tape support */
6717                        nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6718                else
6719                        /* Disable FC tape support */
6720                        nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6721
6722                /* Disable Full Login after LIP */
6723                nv->host_p &= cpu_to_le32(~BIT_10);
6724                /* Enable target PRLI control */
6725                nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6726        } else {
6727                if (ha->tgt.saved_set) {
6728                        nv->exchange_count = ha->tgt.saved_exchange_count;
6729                        nv->firmware_options_1 =
6730                            ha->tgt.saved_firmware_options_1;
6731                        nv->firmware_options_2 =
6732                            ha->tgt.saved_firmware_options_2;
6733                        nv->firmware_options_3 =
6734                            ha->tgt.saved_firmware_options_3;
6735                }
6736                return;
6737        }
6738
6739        if (ha->base_qpair->enable_class_2) {
6740                if (vha->flags.init_done)
6741                        fc_host_supported_classes(vha->host) =
6742                                FC_COS_CLASS2 | FC_COS_CLASS3;
6743
6744                nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6745        } else {
6746                if (vha->flags.init_done)
6747                        fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6748
6749                nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6750        }
6751}
6752
6753void
6754qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6755        struct init_cb_81xx *icb)
6756{
6757        struct qla_hw_data *ha = vha->hw;
6758
6759        if (!QLA_TGT_MODE_ENABLED())
6760                return;
6761
6762        if (ha->tgt.node_name_set) {
6763                memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6764                icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6765        }
6766
6767        /* disable ZIO at start time. */
6768        if (!vha->flags.init_done) {
6769                uint32_t tmp;
6770                tmp = le32_to_cpu(icb->firmware_options_2);
6771                tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6772                icb->firmware_options_2 = cpu_to_le32(tmp);
6773        }
6774
6775}
6776
6777void
6778qlt_83xx_iospace_config(struct qla_hw_data *ha)
6779{
6780        if (!QLA_TGT_MODE_ENABLED())
6781                return;
6782
6783        ha->msix_count += 1; /* For ATIO Q */
6784}
6785
6786
6787void
6788qlt_modify_vp_config(struct scsi_qla_host *vha,
6789        struct vp_config_entry_24xx *vpmod)
6790{
6791        /* enable target mode.  Bit5 = 1 => disable */
6792        if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
6793                vpmod->options_idx1 &= ~BIT_5;
6794
6795        /* Disable ini mode, if requested.  bit4 = 1 => disable */
6796        if (qla_tgt_mode_enabled(vha))
6797                vpmod->options_idx1 &= ~BIT_4;
6798}
6799
6800void
6801qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6802{
6803        int rc;
6804
6805        if (!QLA_TGT_MODE_ENABLED())
6806                return;
6807
6808        if  (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6809                ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
6810                ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
6811        } else {
6812                ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
6813                ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
6814        }
6815
6816        mutex_init(&base_vha->vha_tgt.tgt_mutex);
6817        mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6818
6819        INIT_LIST_HEAD(&base_vha->unknown_atio_list);
6820        INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
6821            qlt_unknown_atio_work_fn);
6822
6823        qlt_clear_mode(base_vha);
6824
6825        rc = btree_init32(&ha->tgt.host_map);
6826        if (rc)
6827                ql_log(ql_log_info, base_vha, 0xd03d,
6828                    "Unable to initialize ha->host_map btree\n");
6829
6830        qlt_update_vp_map(base_vha, SET_VP_IDX);
6831}
6832
6833irqreturn_t
6834qla83xx_msix_atio_q(int irq, void *dev_id)
6835{
6836        struct rsp_que *rsp;
6837        scsi_qla_host_t *vha;
6838        struct qla_hw_data *ha;
6839        unsigned long flags;
6840
6841        rsp = (struct rsp_que *) dev_id;
6842        ha = rsp->hw;
6843        vha = pci_get_drvdata(ha->pdev);
6844
6845        spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6846
6847        qlt_24xx_process_atio_queue(vha, 0);
6848
6849        spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6850
6851        return IRQ_HANDLED;
6852}
6853
6854static void
6855qlt_handle_abts_recv_work(struct work_struct *work)
6856{
6857        struct qla_tgt_sess_op *op = container_of(work,
6858                struct qla_tgt_sess_op, work);
6859        scsi_qla_host_t *vha = op->vha;
6860        struct qla_hw_data *ha = vha->hw;
6861        unsigned long flags;
6862
6863        if (qla2x00_reset_active(vha) ||
6864            (op->chip_reset != ha->base_qpair->chip_reset))
6865                return;
6866
6867        spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6868        qlt_24xx_process_atio_queue(vha, 0);
6869        spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6870
6871        spin_lock_irqsave(&ha->hardware_lock, flags);
6872        qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
6873        spin_unlock_irqrestore(&ha->hardware_lock, flags);
6874
6875        kfree(op);
6876}
6877
6878void
6879qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
6880    response_t *pkt)
6881{
6882        struct qla_tgt_sess_op *op;
6883
6884        op = kzalloc(sizeof(*op), GFP_ATOMIC);
6885
6886        if (!op) {
6887                /* do not reach for ATIO queue here.  This is best effort err
6888                 * recovery at this point.
6889                 */
6890                qlt_response_pkt_all_vps(vha, rsp, pkt);
6891                return;
6892        }
6893
6894        memcpy(&op->atio, pkt, sizeof(*pkt));
6895        op->vha = vha;
6896        op->chip_reset = vha->hw->base_qpair->chip_reset;
6897        op->rsp = rsp;
6898        INIT_WORK(&op->work, qlt_handle_abts_recv_work);
6899        queue_work(qla_tgt_wq, &op->work);
6900        return;
6901}
6902
6903int
6904qlt_mem_alloc(struct qla_hw_data *ha)
6905{
6906        if (!QLA_TGT_MODE_ENABLED())
6907                return 0;
6908
6909        ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
6910            MAX_MULTI_ID_FABRIC, GFP_KERNEL);
6911        if (!ha->tgt.tgt_vp_map)
6912                return -ENOMEM;
6913
6914        ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
6915            (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
6916            &ha->tgt.atio_dma, GFP_KERNEL);
6917        if (!ha->tgt.atio_ring) {
6918                kfree(ha->tgt.tgt_vp_map);
6919                return -ENOMEM;
6920        }
6921        return 0;
6922}
6923
6924void
6925qlt_mem_free(struct qla_hw_data *ha)
6926{
6927        if (!QLA_TGT_MODE_ENABLED())
6928                return;
6929
6930        if (ha->tgt.atio_ring) {
6931                dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
6932                    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
6933                    ha->tgt.atio_dma);
6934        }
6935        kfree(ha->tgt.tgt_vp_map);
6936}
6937
6938/* vport_slock to be held by the caller */
6939void
6940qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6941{
6942        void *slot;
6943        u32 key;
6944        int rc;
6945
6946        if (!QLA_TGT_MODE_ENABLED())
6947                return;
6948
6949        key = vha->d_id.b24;
6950
6951        switch (cmd) {
6952        case SET_VP_IDX:
6953                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
6954                break;
6955        case SET_AL_PA:
6956                slot = btree_lookup32(&vha->hw->tgt.host_map, key);
6957                if (!slot) {
6958                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
6959                            "Save vha in host_map %p %06x\n", vha, key);
6960                        rc = btree_insert32(&vha->hw->tgt.host_map,
6961                                key, vha, GFP_ATOMIC);
6962                        if (rc)
6963                                ql_log(ql_log_info, vha, 0xd03e,
6964                                    "Unable to insert s_id into host_map: %06x\n",
6965                                    key);
6966                        return;
6967                }
6968                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
6969                    "replace existing vha in host_map %p %06x\n", vha, key);
6970                btree_update32(&vha->hw->tgt.host_map, key, vha);
6971                break;
6972        case RESET_VP_IDX:
6973                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
6974                break;
6975        case RESET_AL_PA:
6976                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
6977                   "clear vha in host_map %p %06x\n", vha, key);
6978                slot = btree_lookup32(&vha->hw->tgt.host_map, key);
6979                if (slot)
6980                        btree_remove32(&vha->hw->tgt.host_map, key);
6981                vha->d_id.b24 = 0;
6982                break;
6983        }
6984}
6985
6986void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
6987{
6988        unsigned long flags;
6989        struct qla_hw_data *ha = vha->hw;
6990
6991        if (!vha->d_id.b24) {
6992                spin_lock_irqsave(&ha->vport_slock, flags);
6993                vha->d_id = id;
6994                qlt_update_vp_map(vha, SET_AL_PA);
6995                spin_unlock_irqrestore(&ha->vport_slock, flags);
6996        } else if (vha->d_id.b24 != id.b24) {
6997                spin_lock_irqsave(&ha->vport_slock, flags);
6998                qlt_update_vp_map(vha, RESET_AL_PA);
6999                vha->d_id = id;
7000                qlt_update_vp_map(vha, SET_AL_PA);
7001                spin_unlock_irqrestore(&ha->vport_slock, flags);
7002        }
7003}
7004
7005static int __init qlt_parse_ini_mode(void)
7006{
7007        if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7008                ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7009        else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7010                ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7011        else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7012                ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7013        else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7014                ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7015        else
7016                return false;
7017
7018        return true;
7019}
7020
7021int __init qlt_init(void)
7022{
7023        int ret;
7024
7025        if (!qlt_parse_ini_mode()) {
7026                ql_log(ql_log_fatal, NULL, 0xe06b,
7027                    "qlt_parse_ini_mode() failed\n");
7028                return -EINVAL;
7029        }
7030
7031        if (!QLA_TGT_MODE_ENABLED())
7032                return 0;
7033
7034        qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7035            sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7036            qla_tgt_mgmt_cmd), 0, NULL);
7037        if (!qla_tgt_mgmt_cmd_cachep) {
7038                ql_log(ql_log_fatal, NULL, 0xd04b,
7039                    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7040                return -ENOMEM;
7041        }
7042
7043        qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7044            sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7045            0, NULL);
7046
7047        if (!qla_tgt_plogi_cachep) {
7048                ql_log(ql_log_fatal, NULL, 0xe06d,
7049                    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7050                ret = -ENOMEM;
7051                goto out_mgmt_cmd_cachep;
7052        }
7053
7054        qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7055            mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7056        if (!qla_tgt_mgmt_cmd_mempool) {
7057                ql_log(ql_log_fatal, NULL, 0xe06e,
7058                    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7059                ret = -ENOMEM;
7060                goto out_plogi_cachep;
7061        }
7062
7063        qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7064        if (!qla_tgt_wq) {
7065                ql_log(ql_log_fatal, NULL, 0xe06f,
7066                    "alloc_workqueue for qla_tgt_wq failed\n");
7067                ret = -ENOMEM;
7068                goto out_cmd_mempool;
7069        }
7070        /*
7071         * Return 1 to signal that initiator-mode is being disabled
7072         */
7073        return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7074
7075out_cmd_mempool:
7076        mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7077out_plogi_cachep:
7078        kmem_cache_destroy(qla_tgt_plogi_cachep);
7079out_mgmt_cmd_cachep:
7080        kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7081        return ret;
7082}
7083
7084void qlt_exit(void)
7085{
7086        if (!QLA_TGT_MODE_ENABLED())
7087                return;
7088
7089        destroy_workqueue(qla_tgt_wq);
7090        mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7091        kmem_cache_destroy(qla_tgt_plogi_cachep);
7092        kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7093}
7094