linux/drivers/scsi/cxlflash/main.c
<<
>>
Prefs
   1/*
   2 * CXL Flash Device Driver
   3 *
   4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
   5 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
   6 *
   7 * Copyright (C) 2015 IBM Corporation
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/delay.h>
  16#include <linux/list.h>
  17#include <linux/module.h>
  18#include <linux/pci.h>
  19
  20#include <asm/unaligned.h>
  21
  22#include <misc/cxl.h>
  23
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_host.h>
  26#include <uapi/scsi/cxlflash_ioctl.h>
  27
  28#include "main.h"
  29#include "sislite.h"
  30#include "common.h"
  31
  32MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
  33MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
  34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
  35MODULE_LICENSE("GPL");
  36
  37static struct class *cxlflash_class;
  38static u32 cxlflash_major;
  39static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
  40
  41/**
  42 * process_cmd_err() - command error handler
  43 * @cmd:        AFU command that experienced the error.
  44 * @scp:        SCSI command associated with the AFU command in error.
  45 *
  46 * Translates error bits from AFU command to SCSI command results.
  47 */
  48static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  49{
  50        struct afu *afu = cmd->parent;
  51        struct cxlflash_cfg *cfg = afu->parent;
  52        struct device *dev = &cfg->dev->dev;
  53        struct sisl_ioarcb *ioarcb;
  54        struct sisl_ioasa *ioasa;
  55        u32 resid;
  56
  57        if (unlikely(!cmd))
  58                return;
  59
  60        ioarcb = &(cmd->rcb);
  61        ioasa = &(cmd->sa);
  62
  63        if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
  64                resid = ioasa->resid;
  65                scsi_set_resid(scp, resid);
  66                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
  67                        __func__, cmd, scp, resid);
  68        }
  69
  70        if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
  71                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
  72                        __func__, cmd, scp);
  73                scp->result = (DID_ERROR << 16);
  74        }
  75
  76        dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
  77                "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
  78                ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
  79                ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
  80
  81        if (ioasa->rc.scsi_rc) {
  82                /* We have a SCSI status */
  83                if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
  84                        memcpy(scp->sense_buffer, ioasa->sense_data,
  85                               SISL_SENSE_DATA_LEN);
  86                        scp->result = ioasa->rc.scsi_rc;
  87                } else
  88                        scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
  89        }
  90
  91        /*
  92         * We encountered an error. Set scp->result based on nature
  93         * of error.
  94         */
  95        if (ioasa->rc.fc_rc) {
  96                /* We have an FC status */
  97                switch (ioasa->rc.fc_rc) {
  98                case SISL_FC_RC_LINKDOWN:
  99                        scp->result = (DID_REQUEUE << 16);
 100                        break;
 101                case SISL_FC_RC_RESID:
 102                        /* This indicates an FCP resid underrun */
 103                        if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
 104                                /* If the SISL_RC_FLAGS_OVERRUN flag was set,
 105                                 * then we will handle this error else where.
 106                                 * If not then we must handle it here.
 107                                 * This is probably an AFU bug.
 108                                 */
 109                                scp->result = (DID_ERROR << 16);
 110                        }
 111                        break;
 112                case SISL_FC_RC_RESIDERR:
 113                        /* Resid mismatch between adapter and device */
 114                case SISL_FC_RC_TGTABORT:
 115                case SISL_FC_RC_ABORTOK:
 116                case SISL_FC_RC_ABORTFAIL:
 117                case SISL_FC_RC_NOLOGI:
 118                case SISL_FC_RC_ABORTPEND:
 119                case SISL_FC_RC_WRABORTPEND:
 120                case SISL_FC_RC_NOEXP:
 121                case SISL_FC_RC_INUSE:
 122                        scp->result = (DID_ERROR << 16);
 123                        break;
 124                }
 125        }
 126
 127        if (ioasa->rc.afu_rc) {
 128                /* We have an AFU error */
 129                switch (ioasa->rc.afu_rc) {
 130                case SISL_AFU_RC_NO_CHANNELS:
 131                        scp->result = (DID_NO_CONNECT << 16);
 132                        break;
 133                case SISL_AFU_RC_DATA_DMA_ERR:
 134                        switch (ioasa->afu_extra) {
 135                        case SISL_AFU_DMA_ERR_PAGE_IN:
 136                                /* Retry */
 137                                scp->result = (DID_IMM_RETRY << 16);
 138                                break;
 139                        case SISL_AFU_DMA_ERR_INVALID_EA:
 140                        default:
 141                                scp->result = (DID_ERROR << 16);
 142                        }
 143                        break;
 144                case SISL_AFU_RC_OUT_OF_DATA_BUFS:
 145                        /* Retry */
 146                        scp->result = (DID_ALLOC_FAILURE << 16);
 147                        break;
 148                default:
 149                        scp->result = (DID_ERROR << 16);
 150                }
 151        }
 152}
 153
 154/**
 155 * cmd_complete() - command completion handler
 156 * @cmd:        AFU command that has completed.
 157 *
 158 * For SCSI commands this routine prepares and submits commands that have
 159 * either completed or timed out to the SCSI stack. For internal commands
 160 * (TMF or AFU), this routine simply notifies the originator that the
 161 * command has completed.
 162 */
 163static void cmd_complete(struct afu_cmd *cmd)
 164{
 165        struct scsi_cmnd *scp;
 166        ulong lock_flags;
 167        struct afu *afu = cmd->parent;
 168        struct cxlflash_cfg *cfg = afu->parent;
 169        struct device *dev = &cfg->dev->dev;
 170        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 171
 172        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 173        list_del(&cmd->list);
 174        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 175
 176        if (cmd->scp) {
 177                scp = cmd->scp;
 178                if (unlikely(cmd->sa.ioasc))
 179                        process_cmd_err(cmd, scp);
 180                else
 181                        scp->result = (DID_OK << 16);
 182
 183                dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
 184                                    __func__, scp, scp->result, cmd->sa.ioasc);
 185                scp->scsi_done(scp);
 186        } else if (cmd->cmd_tmf) {
 187                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 188                cfg->tmf_active = false;
 189                wake_up_all_locked(&cfg->tmf_waitq);
 190                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 191        } else
 192                complete(&cmd->cevent);
 193}
 194
 195/**
 196 * flush_pending_cmds() - flush all pending commands on this hardware queue
 197 * @hwq:        Hardware queue to flush.
 198 *
 199 * The hardware send queue lock associated with this hardware queue must be
 200 * held when calling this routine.
 201 */
 202static void flush_pending_cmds(struct hwq *hwq)
 203{
 204        struct cxlflash_cfg *cfg = hwq->afu->parent;
 205        struct afu_cmd *cmd, *tmp;
 206        struct scsi_cmnd *scp;
 207        ulong lock_flags;
 208
 209        list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
 210                /* Bypass command when on a doneq, cmd_complete() will handle */
 211                if (!list_empty(&cmd->queue))
 212                        continue;
 213
 214                list_del(&cmd->list);
 215
 216                if (cmd->scp) {
 217                        scp = cmd->scp;
 218                        scp->result = (DID_IMM_RETRY << 16);
 219                        scp->scsi_done(scp);
 220                } else {
 221                        cmd->cmd_aborted = true;
 222
 223                        if (cmd->cmd_tmf) {
 224                                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 225                                cfg->tmf_active = false;
 226                                wake_up_all_locked(&cfg->tmf_waitq);
 227                                spin_unlock_irqrestore(&cfg->tmf_slock,
 228                                                       lock_flags);
 229                        } else
 230                                complete(&cmd->cevent);
 231                }
 232        }
 233}
 234
 235/**
 236 * context_reset() - reset context via specified register
 237 * @hwq:        Hardware queue owning the context to be reset.
 238 * @reset_reg:  MMIO register to perform reset.
 239 *
 240 * When the reset is successful, the SISLite specification guarantees that
 241 * the AFU has aborted all currently pending I/O. Accordingly, these commands
 242 * must be flushed.
 243 *
 244 * Return: 0 on success, -errno on failure
 245 */
 246static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
 247{
 248        struct cxlflash_cfg *cfg = hwq->afu->parent;
 249        struct device *dev = &cfg->dev->dev;
 250        int rc = -ETIMEDOUT;
 251        int nretry = 0;
 252        u64 val = 0x1;
 253        ulong lock_flags;
 254
 255        dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
 256
 257        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 258
 259        writeq_be(val, reset_reg);
 260        do {
 261                val = readq_be(reset_reg);
 262                if ((val & 0x1) == 0x0) {
 263                        rc = 0;
 264                        break;
 265                }
 266
 267                /* Double delay each time */
 268                udelay(1 << nretry);
 269        } while (nretry++ < MC_ROOM_RETRY_CNT);
 270
 271        if (!rc)
 272                flush_pending_cmds(hwq);
 273
 274        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 275
 276        dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
 277                __func__, rc, val, nretry);
 278        return rc;
 279}
 280
 281/**
 282 * context_reset_ioarrin() - reset context via IOARRIN register
 283 * @hwq:        Hardware queue owning the context to be reset.
 284 *
 285 * Return: 0 on success, -errno on failure
 286 */
 287static int context_reset_ioarrin(struct hwq *hwq)
 288{
 289        return context_reset(hwq, &hwq->host_map->ioarrin);
 290}
 291
 292/**
 293 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
 294 * @hwq:        Hardware queue owning the context to be reset.
 295 *
 296 * Return: 0 on success, -errno on failure
 297 */
 298static int context_reset_sq(struct hwq *hwq)
 299{
 300        return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
 301}
 302
 303/**
 304 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
 305 * @afu:        AFU associated with the host.
 306 * @cmd:        AFU command to send.
 307 *
 308 * Return:
 309 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 310 */
 311static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
 312{
 313        struct cxlflash_cfg *cfg = afu->parent;
 314        struct device *dev = &cfg->dev->dev;
 315        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 316        int rc = 0;
 317        s64 room;
 318        ulong lock_flags;
 319
 320        /*
 321         * To avoid the performance penalty of MMIO, spread the update of
 322         * 'room' over multiple commands.
 323         */
 324        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 325        if (--hwq->room < 0) {
 326                room = readq_be(&hwq->host_map->cmd_room);
 327                if (room <= 0) {
 328                        dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
 329                                            "0x%02X, room=0x%016llX\n",
 330                                            __func__, cmd->rcb.cdb[0], room);
 331                        hwq->room = 0;
 332                        rc = SCSI_MLQUEUE_HOST_BUSY;
 333                        goto out;
 334                }
 335                hwq->room = room - 1;
 336        }
 337
 338        list_add(&cmd->list, &hwq->pending_cmds);
 339        writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
 340out:
 341        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 342        dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
 343                cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
 344        return rc;
 345}
 346
 347/**
 348 * send_cmd_sq() - sends an AFU command via SQ ring
 349 * @afu:        AFU associated with the host.
 350 * @cmd:        AFU command to send.
 351 *
 352 * Return:
 353 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 354 */
 355static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
 356{
 357        struct cxlflash_cfg *cfg = afu->parent;
 358        struct device *dev = &cfg->dev->dev;
 359        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 360        int rc = 0;
 361        int newval;
 362        ulong lock_flags;
 363
 364        newval = atomic_dec_if_positive(&hwq->hsq_credits);
 365        if (newval <= 0) {
 366                rc = SCSI_MLQUEUE_HOST_BUSY;
 367                goto out;
 368        }
 369
 370        cmd->rcb.ioasa = &cmd->sa;
 371
 372        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 373
 374        *hwq->hsq_curr = cmd->rcb;
 375        if (hwq->hsq_curr < hwq->hsq_end)
 376                hwq->hsq_curr++;
 377        else
 378                hwq->hsq_curr = hwq->hsq_start;
 379
 380        list_add(&cmd->list, &hwq->pending_cmds);
 381        writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
 382
 383        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 384out:
 385        dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
 386               "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
 387               cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
 388               readq_be(&hwq->host_map->sq_head),
 389               readq_be(&hwq->host_map->sq_tail));
 390        return rc;
 391}
 392
 393/**
 394 * wait_resp() - polls for a response or timeout to a sent AFU command
 395 * @afu:        AFU associated with the host.
 396 * @cmd:        AFU command that was sent.
 397 *
 398 * Return: 0 on success, -errno on failure
 399 */
 400static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 401{
 402        struct cxlflash_cfg *cfg = afu->parent;
 403        struct device *dev = &cfg->dev->dev;
 404        int rc = 0;
 405        ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
 406
 407        timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
 408        if (!timeout)
 409                rc = -ETIMEDOUT;
 410
 411        if (cmd->cmd_aborted)
 412                rc = -EAGAIN;
 413
 414        if (unlikely(cmd->sa.ioasc != 0)) {
 415                dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
 416                        __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
 417                rc = -EIO;
 418        }
 419
 420        return rc;
 421}
 422
 423/**
 424 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
 425 * @host:       SCSI host associated with device.
 426 * @scp:        SCSI command to send.
 427 * @afu:        SCSI command to send.
 428 *
 429 * Hashes a command based upon the hardware queue mode.
 430 *
 431 * Return: Trusted index of target hardware queue
 432 */
 433static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
 434                             struct afu *afu)
 435{
 436        u32 tag;
 437        u32 hwq = 0;
 438
 439        if (afu->num_hwqs == 1)
 440                return 0;
 441
 442        switch (afu->hwq_mode) {
 443        case HWQ_MODE_RR:
 444                hwq = afu->hwq_rr_count++ % afu->num_hwqs;
 445                break;
 446        case HWQ_MODE_TAG:
 447                tag = blk_mq_unique_tag(scp->request);
 448                hwq = blk_mq_unique_tag_to_hwq(tag);
 449                break;
 450        case HWQ_MODE_CPU:
 451                hwq = smp_processor_id() % afu->num_hwqs;
 452                break;
 453        default:
 454                WARN_ON_ONCE(1);
 455        }
 456
 457        return hwq;
 458}
 459
 460/**
 461 * send_tmf() - sends a Task Management Function (TMF)
 462 * @cfg:        Internal structure associated with the host.
 463 * @sdev:       SCSI device destined for TMF.
 464 * @tmfcmd:     TMF command to send.
 465 *
 466 * Return:
 467 *      0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
 468 */
 469static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 470                    u64 tmfcmd)
 471{
 472        struct afu *afu = cfg->afu;
 473        struct afu_cmd *cmd = NULL;
 474        struct device *dev = &cfg->dev->dev;
 475        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
 476        char *buf = NULL;
 477        ulong lock_flags;
 478        int rc = 0;
 479        ulong to;
 480
 481        buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
 482        if (unlikely(!buf)) {
 483                dev_err(dev, "%s: no memory for command\n", __func__);
 484                rc = -ENOMEM;
 485                goto out;
 486        }
 487
 488        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
 489        INIT_LIST_HEAD(&cmd->queue);
 490
 491        /* When Task Management Function is active do not send another */
 492        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 493        if (cfg->tmf_active)
 494                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 495                                                  !cfg->tmf_active,
 496                                                  cfg->tmf_slock);
 497        cfg->tmf_active = true;
 498        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 499
 500        cmd->parent = afu;
 501        cmd->cmd_tmf = true;
 502        cmd->hwq_index = hwq->index;
 503
 504        cmd->rcb.ctx_id = hwq->ctx_hndl;
 505        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 506        cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
 507        cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
 508        cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
 509                              SISL_REQ_FLAGS_SUP_UNDERRUN |
 510                              SISL_REQ_FLAGS_TMF_CMD);
 511        memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 512
 513        rc = afu->send_cmd(afu, cmd);
 514        if (unlikely(rc)) {
 515                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 516                cfg->tmf_active = false;
 517                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 518                goto out;
 519        }
 520
 521        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 522        to = msecs_to_jiffies(5000);
 523        to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
 524                                                       !cfg->tmf_active,
 525                                                       cfg->tmf_slock,
 526                                                       to);
 527        if (!to) {
 528                dev_err(dev, "%s: TMF timed out\n", __func__);
 529                rc = -ETIMEDOUT;
 530        } else if (cmd->cmd_aborted) {
 531                dev_err(dev, "%s: TMF aborted\n", __func__);
 532                rc = -EAGAIN;
 533        } else if (cmd->sa.ioasc) {
 534                dev_err(dev, "%s: TMF failed ioasc=%08x\n",
 535                        __func__, cmd->sa.ioasc);
 536                rc = -EIO;
 537        }
 538        cfg->tmf_active = false;
 539        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 540out:
 541        kfree(buf);
 542        return rc;
 543}
 544
 545/**
 546 * cxlflash_driver_info() - information handler for this host driver
 547 * @host:       SCSI host associated with device.
 548 *
 549 * Return: A string describing the device.
 550 */
 551static const char *cxlflash_driver_info(struct Scsi_Host *host)
 552{
 553        return CXLFLASH_ADAPTER_NAME;
 554}
 555
 556/**
 557 * cxlflash_queuecommand() - sends a mid-layer request
 558 * @host:       SCSI host associated with device.
 559 * @scp:        SCSI command to send.
 560 *
 561 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 562 */
 563static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 564{
 565        struct cxlflash_cfg *cfg = shost_priv(host);
 566        struct afu *afu = cfg->afu;
 567        struct device *dev = &cfg->dev->dev;
 568        struct afu_cmd *cmd = sc_to_afuci(scp);
 569        struct scatterlist *sg = scsi_sglist(scp);
 570        int hwq_index = cmd_to_target_hwq(host, scp, afu);
 571        struct hwq *hwq = get_hwq(afu, hwq_index);
 572        u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
 573        ulong lock_flags;
 574        int rc = 0;
 575
 576        dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
 577                            "cdb=(%08x-%08x-%08x-%08x)\n",
 578                            __func__, scp, host->host_no, scp->device->channel,
 579                            scp->device->id, scp->device->lun,
 580                            get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
 581                            get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
 582                            get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
 583                            get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 584
 585        /*
 586         * If a Task Management Function is active, wait for it to complete
 587         * before continuing with regular commands.
 588         */
 589        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 590        if (cfg->tmf_active) {
 591                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 592                rc = SCSI_MLQUEUE_HOST_BUSY;
 593                goto out;
 594        }
 595        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 596
 597        switch (cfg->state) {
 598        case STATE_PROBING:
 599        case STATE_PROBED:
 600        case STATE_RESET:
 601                dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
 602                rc = SCSI_MLQUEUE_HOST_BUSY;
 603                goto out;
 604        case STATE_FAILTERM:
 605                dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
 606                scp->result = (DID_NO_CONNECT << 16);
 607                scp->scsi_done(scp);
 608                rc = 0;
 609                goto out;
 610        default:
 611                break;
 612        }
 613
 614        if (likely(sg)) {
 615                cmd->rcb.data_len = sg->length;
 616                cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
 617        }
 618
 619        cmd->scp = scp;
 620        cmd->parent = afu;
 621        cmd->hwq_index = hwq_index;
 622
 623        cmd->sa.ioasc = 0;
 624        cmd->rcb.ctx_id = hwq->ctx_hndl;
 625        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 626        cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
 627        cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
 628
 629        if (scp->sc_data_direction == DMA_TO_DEVICE)
 630                req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
 631
 632        cmd->rcb.req_flags = req_flags;
 633        memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 634
 635        rc = afu->send_cmd(afu, cmd);
 636out:
 637        return rc;
 638}
 639
 640/**
 641 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
 642 * @cfg:        Internal structure associated with the host.
 643 */
 644static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
 645{
 646        struct pci_dev *pdev = cfg->dev;
 647
 648        if (pci_channel_offline(pdev))
 649                wait_event_timeout(cfg->reset_waitq,
 650                                   !pci_channel_offline(pdev),
 651                                   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
 652}
 653
 654/**
 655 * free_mem() - free memory associated with the AFU
 656 * @cfg:        Internal structure associated with the host.
 657 */
 658static void free_mem(struct cxlflash_cfg *cfg)
 659{
 660        struct afu *afu = cfg->afu;
 661
 662        if (cfg->afu) {
 663                free_pages((ulong)afu, get_order(sizeof(struct afu)));
 664                cfg->afu = NULL;
 665        }
 666}
 667
 668/**
 669 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
 670 * @cfg:        Internal structure associated with the host.
 671 */
 672static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
 673{
 674        if (cfg->async_reset_cookie == 0)
 675                return;
 676
 677        /* Wait until all async calls prior to this cookie have completed */
 678        async_synchronize_cookie(cfg->async_reset_cookie + 1);
 679        cfg->async_reset_cookie = 0;
 680}
 681
 682/**
 683 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
 684 * @cfg:        Internal structure associated with the host.
 685 *
 686 * Safe to call with AFU in a partially allocated/initialized state.
 687 *
 688 * Cancels scheduled worker threads, waits for any active internal AFU
 689 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
 690 */
 691static void stop_afu(struct cxlflash_cfg *cfg)
 692{
 693        struct afu *afu = cfg->afu;
 694        struct hwq *hwq;
 695        int i;
 696
 697        cancel_work_sync(&cfg->work_q);
 698        if (!current_is_async())
 699                cxlflash_reset_sync(cfg);
 700
 701        if (likely(afu)) {
 702                while (atomic_read(&afu->cmds_active))
 703                        ssleep(1);
 704
 705                if (afu_is_irqpoll_enabled(afu)) {
 706                        for (i = 0; i < afu->num_hwqs; i++) {
 707                                hwq = get_hwq(afu, i);
 708
 709                                irq_poll_disable(&hwq->irqpoll);
 710                        }
 711                }
 712
 713                if (likely(afu->afu_map)) {
 714                        cfg->ops->psa_unmap(afu->afu_map);
 715                        afu->afu_map = NULL;
 716                }
 717        }
 718}
 719
 720/**
 721 * term_intr() - disables all AFU interrupts
 722 * @cfg:        Internal structure associated with the host.
 723 * @level:      Depth of allocation, where to begin waterfall tear down.
 724 * @index:      Index of the hardware queue.
 725 *
 726 * Safe to call with AFU/MC in partially allocated/initialized state.
 727 */
 728static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
 729                      u32 index)
 730{
 731        struct afu *afu = cfg->afu;
 732        struct device *dev = &cfg->dev->dev;
 733        struct hwq *hwq;
 734
 735        if (!afu) {
 736                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 737                return;
 738        }
 739
 740        hwq = get_hwq(afu, index);
 741
 742        if (!hwq->ctx_cookie) {
 743                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 744                return;
 745        }
 746
 747        switch (level) {
 748        case UNMAP_THREE:
 749                /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
 750                if (index == PRIMARY_HWQ)
 751                        cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
 752        case UNMAP_TWO:
 753                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
 754        case UNMAP_ONE:
 755                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
 756        case FREE_IRQ:
 757                cfg->ops->free_afu_irqs(hwq->ctx_cookie);
 758                /* fall through */
 759        case UNDO_NOOP:
 760                /* No action required */
 761                break;
 762        }
 763}
 764
 765/**
 766 * term_mc() - terminates the master context
 767 * @cfg:        Internal structure associated with the host.
 768 * @index:      Index of the hardware queue.
 769 *
 770 * Safe to call with AFU/MC in partially allocated/initialized state.
 771 */
 772static void term_mc(struct cxlflash_cfg *cfg, u32 index)
 773{
 774        struct afu *afu = cfg->afu;
 775        struct device *dev = &cfg->dev->dev;
 776        struct hwq *hwq;
 777        ulong lock_flags;
 778
 779        if (!afu) {
 780                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 781                return;
 782        }
 783
 784        hwq = get_hwq(afu, index);
 785
 786        if (!hwq->ctx_cookie) {
 787                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 788                return;
 789        }
 790
 791        WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
 792        if (index != PRIMARY_HWQ)
 793                WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
 794        hwq->ctx_cookie = NULL;
 795
 796        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 797        flush_pending_cmds(hwq);
 798        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 799}
 800
 801/**
 802 * term_afu() - terminates the AFU
 803 * @cfg:        Internal structure associated with the host.
 804 *
 805 * Safe to call with AFU/MC in partially allocated/initialized state.
 806 */
 807static void term_afu(struct cxlflash_cfg *cfg)
 808{
 809        struct device *dev = &cfg->dev->dev;
 810        int k;
 811
 812        /*
 813         * Tear down is carefully orchestrated to ensure
 814         * no interrupts can come in when the problem state
 815         * area is unmapped.
 816         *
 817         * 1) Disable all AFU interrupts for each master
 818         * 2) Unmap the problem state area
 819         * 3) Stop each master context
 820         */
 821        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 822                term_intr(cfg, UNMAP_THREE, k);
 823
 824        stop_afu(cfg);
 825
 826        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 827                term_mc(cfg, k);
 828
 829        dev_dbg(dev, "%s: returning\n", __func__);
 830}
 831
 832/**
 833 * notify_shutdown() - notifies device of pending shutdown
 834 * @cfg:        Internal structure associated with the host.
 835 * @wait:       Whether to wait for shutdown processing to complete.
 836 *
 837 * This function will notify the AFU that the adapter is being shutdown
 838 * and will wait for shutdown processing to complete if wait is true.
 839 * This notification should flush pending I/Os to the device and halt
 840 * further I/Os until the next AFU reset is issued and device restarted.
 841 */
 842static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
 843{
 844        struct afu *afu = cfg->afu;
 845        struct device *dev = &cfg->dev->dev;
 846        struct dev_dependent_vals *ddv;
 847        __be64 __iomem *fc_port_regs;
 848        u64 reg, status;
 849        int i, retry_cnt = 0;
 850
 851        ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
 852        if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
 853                return;
 854
 855        if (!afu || !afu->afu_map) {
 856                dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
 857                return;
 858        }
 859
 860        /* Notify AFU */
 861        for (i = 0; i < cfg->num_fc_ports; i++) {
 862                fc_port_regs = get_fc_port_regs(cfg, i);
 863
 864                reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
 865                reg |= SISL_FC_SHUTDOWN_NORMAL;
 866                writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
 867        }
 868
 869        if (!wait)
 870                return;
 871
 872        /* Wait up to 1.5 seconds for shutdown processing to complete */
 873        for (i = 0; i < cfg->num_fc_ports; i++) {
 874                fc_port_regs = get_fc_port_regs(cfg, i);
 875                retry_cnt = 0;
 876
 877                while (true) {
 878                        status = readq_be(&fc_port_regs[FC_STATUS / 8]);
 879                        if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
 880                                break;
 881                        if (++retry_cnt >= MC_RETRY_CNT) {
 882                                dev_dbg(dev, "%s: port %d shutdown processing "
 883                                        "not yet completed\n", __func__, i);
 884                                break;
 885                        }
 886                        msleep(100 * retry_cnt);
 887                }
 888        }
 889}
 890
 891/**
 892 * cxlflash_get_minor() - gets the first available minor number
 893 *
 894 * Return: Unique minor number that can be used to create the character device.
 895 */
 896static int cxlflash_get_minor(void)
 897{
 898        int minor;
 899        long bit;
 900
 901        bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
 902        if (bit >= CXLFLASH_MAX_ADAPTERS)
 903                return -1;
 904
 905        minor = bit & MINORMASK;
 906        set_bit(minor, cxlflash_minor);
 907        return minor;
 908}
 909
 910/**
 911 * cxlflash_put_minor() - releases the minor number
 912 * @minor:      Minor number that is no longer needed.
 913 */
 914static void cxlflash_put_minor(int minor)
 915{
 916        clear_bit(minor, cxlflash_minor);
 917}
 918
 919/**
 920 * cxlflash_release_chrdev() - release the character device for the host
 921 * @cfg:        Internal structure associated with the host.
 922 */
 923static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
 924{
 925        device_unregister(cfg->chardev);
 926        cfg->chardev = NULL;
 927        cdev_del(&cfg->cdev);
 928        cxlflash_put_minor(MINOR(cfg->cdev.dev));
 929}
 930
 931/**
 932 * cxlflash_remove() - PCI entry point to tear down host
 933 * @pdev:       PCI device associated with the host.
 934 *
 935 * Safe to use as a cleanup in partially allocated/initialized state. Note that
 936 * the reset_waitq is flushed as part of the stop/termination of user contexts.
 937 */
 938static void cxlflash_remove(struct pci_dev *pdev)
 939{
 940        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
 941        struct device *dev = &pdev->dev;
 942        ulong lock_flags;
 943
 944        if (!pci_is_enabled(pdev)) {
 945                dev_dbg(dev, "%s: Device is disabled\n", __func__);
 946                return;
 947        }
 948
 949        /* If a Task Management Function is active, wait for it to complete
 950         * before continuing with remove.
 951         */
 952        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 953        if (cfg->tmf_active)
 954                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 955                                                  !cfg->tmf_active,
 956                                                  cfg->tmf_slock);
 957        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 958
 959        /* Notify AFU and wait for shutdown processing to complete */
 960        notify_shutdown(cfg, true);
 961
 962        cfg->state = STATE_FAILTERM;
 963        cxlflash_stop_term_user_contexts(cfg);
 964
 965        switch (cfg->init_state) {
 966        case INIT_STATE_CDEV:
 967                cxlflash_release_chrdev(cfg);
 968        case INIT_STATE_SCSI:
 969                cxlflash_term_local_luns(cfg);
 970                scsi_remove_host(cfg->host);
 971        case INIT_STATE_AFU:
 972                term_afu(cfg);
 973        case INIT_STATE_PCI:
 974                pci_disable_device(pdev);
 975        case INIT_STATE_NONE:
 976                free_mem(cfg);
 977                scsi_host_put(cfg->host);
 978                break;
 979        }
 980
 981        dev_dbg(dev, "%s: returning\n", __func__);
 982}
 983
 984/**
 985 * alloc_mem() - allocates the AFU and its command pool
 986 * @cfg:        Internal structure associated with the host.
 987 *
 988 * A partially allocated state remains on failure.
 989 *
 990 * Return:
 991 *      0 on success
 992 *      -ENOMEM on failure to allocate memory
 993 */
 994static int alloc_mem(struct cxlflash_cfg *cfg)
 995{
 996        int rc = 0;
 997        struct device *dev = &cfg->dev->dev;
 998
 999        /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1000        cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1001                                            get_order(sizeof(struct afu)));
1002        if (unlikely(!cfg->afu)) {
1003                dev_err(dev, "%s: cannot get %d free pages\n",
1004                        __func__, get_order(sizeof(struct afu)));
1005                rc = -ENOMEM;
1006                goto out;
1007        }
1008        cfg->afu->parent = cfg;
1009        cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1010        cfg->afu->afu_map = NULL;
1011out:
1012        return rc;
1013}
1014
1015/**
1016 * init_pci() - initializes the host as a PCI device
1017 * @cfg:        Internal structure associated with the host.
1018 *
1019 * Return: 0 on success, -errno on failure
1020 */
1021static int init_pci(struct cxlflash_cfg *cfg)
1022{
1023        struct pci_dev *pdev = cfg->dev;
1024        struct device *dev = &cfg->dev->dev;
1025        int rc = 0;
1026
1027        rc = pci_enable_device(pdev);
1028        if (rc || pci_channel_offline(pdev)) {
1029                if (pci_channel_offline(pdev)) {
1030                        cxlflash_wait_for_pci_err_recovery(cfg);
1031                        rc = pci_enable_device(pdev);
1032                }
1033
1034                if (rc) {
1035                        dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1036                        cxlflash_wait_for_pci_err_recovery(cfg);
1037                        goto out;
1038                }
1039        }
1040
1041out:
1042        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1043        return rc;
1044}
1045
1046/**
1047 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1048 * @cfg:        Internal structure associated with the host.
1049 *
1050 * Return: 0 on success, -errno on failure
1051 */
1052static int init_scsi(struct cxlflash_cfg *cfg)
1053{
1054        struct pci_dev *pdev = cfg->dev;
1055        struct device *dev = &cfg->dev->dev;
1056        int rc = 0;
1057
1058        rc = scsi_add_host(cfg->host, &pdev->dev);
1059        if (rc) {
1060                dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1061                goto out;
1062        }
1063
1064        scsi_scan_host(cfg->host);
1065
1066out:
1067        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1068        return rc;
1069}
1070
1071/**
1072 * set_port_online() - transitions the specified host FC port to online state
1073 * @fc_regs:    Top of MMIO region defined for specified port.
1074 *
1075 * The provided MMIO region must be mapped prior to call. Online state means
1076 * that the FC link layer has synced, completed the handshaking process, and
1077 * is ready for login to start.
1078 */
1079static void set_port_online(__be64 __iomem *fc_regs)
1080{
1081        u64 cmdcfg;
1082
1083        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1084        cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1085        cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);   /* set ON_LINE */
1086        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1087}
1088
1089/**
1090 * set_port_offline() - transitions the specified host FC port to offline state
1091 * @fc_regs:    Top of MMIO region defined for specified port.
1092 *
1093 * The provided MMIO region must be mapped prior to call.
1094 */
1095static void set_port_offline(__be64 __iomem *fc_regs)
1096{
1097        u64 cmdcfg;
1098
1099        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1100        cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);  /* clear ON_LINE */
1101        cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);  /* set OFF_LINE */
1102        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1103}
1104
1105/**
1106 * wait_port_online() - waits for the specified host FC port come online
1107 * @fc_regs:    Top of MMIO region defined for specified port.
1108 * @delay_us:   Number of microseconds to delay between reading port status.
1109 * @nretry:     Number of cycles to retry reading port status.
1110 *
1111 * The provided MMIO region must be mapped prior to call. This will timeout
1112 * when the cable is not plugged in.
1113 *
1114 * Return:
1115 *      TRUE (1) when the specified port is online
1116 *      FALSE (0) when the specified port fails to come online after timeout
1117 */
1118static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1119{
1120        u64 status;
1121
1122        WARN_ON(delay_us < 1000);
1123
1124        do {
1125                msleep(delay_us / 1000);
1126                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1127                if (status == U64_MAX)
1128                        nretry /= 2;
1129        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1130                 nretry--);
1131
1132        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1133}
1134
1135/**
1136 * wait_port_offline() - waits for the specified host FC port go offline
1137 * @fc_regs:    Top of MMIO region defined for specified port.
1138 * @delay_us:   Number of microseconds to delay between reading port status.
1139 * @nretry:     Number of cycles to retry reading port status.
1140 *
1141 * The provided MMIO region must be mapped prior to call.
1142 *
1143 * Return:
1144 *      TRUE (1) when the specified port is offline
1145 *      FALSE (0) when the specified port fails to go offline after timeout
1146 */
1147static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1148{
1149        u64 status;
1150
1151        WARN_ON(delay_us < 1000);
1152
1153        do {
1154                msleep(delay_us / 1000);
1155                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1156                if (status == U64_MAX)
1157                        nretry /= 2;
1158        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1159                 nretry--);
1160
1161        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1162}
1163
1164/**
1165 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1166 * @afu:        AFU associated with the host that owns the specified FC port.
1167 * @port:       Port number being configured.
1168 * @fc_regs:    Top of MMIO region defined for specified port.
1169 * @wwpn:       The world-wide-port-number previously discovered for port.
1170 *
1171 * The provided MMIO region must be mapped prior to call. As part of the
1172 * sequence to configure the WWPN, the port is toggled offline and then back
1173 * online. This toggling action can cause this routine to delay up to a few
1174 * seconds. When configured to use the internal LUN feature of the AFU, a
1175 * failure to come online is overridden.
1176 */
1177static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1178                         u64 wwpn)
1179{
1180        struct cxlflash_cfg *cfg = afu->parent;
1181        struct device *dev = &cfg->dev->dev;
1182
1183        set_port_offline(fc_regs);
1184        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1185                               FC_PORT_STATUS_RETRY_CNT)) {
1186                dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1187                        __func__, port);
1188        }
1189
1190        writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1191
1192        set_port_online(fc_regs);
1193        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1194                              FC_PORT_STATUS_RETRY_CNT)) {
1195                dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1196                        __func__, port);
1197        }
1198}
1199
1200/**
1201 * afu_link_reset() - resets the specified host FC port
1202 * @afu:        AFU associated with the host that owns the specified FC port.
1203 * @port:       Port number being configured.
1204 * @fc_regs:    Top of MMIO region defined for specified port.
1205 *
1206 * The provided MMIO region must be mapped prior to call. The sequence to
1207 * reset the port involves toggling it offline and then back online. This
1208 * action can cause this routine to delay up to a few seconds. An effort
1209 * is made to maintain link with the device by switching to host to use
1210 * the alternate port exclusively while the reset takes place.
1211 * failure to come online is overridden.
1212 */
1213static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1214{
1215        struct cxlflash_cfg *cfg = afu->parent;
1216        struct device *dev = &cfg->dev->dev;
1217        u64 port_sel;
1218
1219        /* first switch the AFU to the other links, if any */
1220        port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1221        port_sel &= ~(1ULL << port);
1222        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1223        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1224
1225        set_port_offline(fc_regs);
1226        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1227                               FC_PORT_STATUS_RETRY_CNT))
1228                dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1229                        __func__, port);
1230
1231        set_port_online(fc_regs);
1232        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1233                              FC_PORT_STATUS_RETRY_CNT))
1234                dev_err(dev, "%s: wait on port %d to go online timed out\n",
1235                        __func__, port);
1236
1237        /* switch back to include this port */
1238        port_sel |= (1ULL << port);
1239        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1240        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1241
1242        dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1243}
1244
1245/**
1246 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1247 * @afu:        AFU associated with the host.
1248 */
1249static void afu_err_intr_init(struct afu *afu)
1250{
1251        struct cxlflash_cfg *cfg = afu->parent;
1252        __be64 __iomem *fc_port_regs;
1253        int i;
1254        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1255        u64 reg;
1256
1257        /* global async interrupts: AFU clears afu_ctrl on context exit
1258         * if async interrupts were sent to that context. This prevents
1259         * the AFU form sending further async interrupts when
1260         * there is
1261         * nobody to receive them.
1262         */
1263
1264        /* mask all */
1265        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1266        /* set LISN# to send and point to primary master context */
1267        reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1268
1269        if (afu->internal_lun)
1270                reg |= 1;       /* Bit 63 indicates local lun */
1271        writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1272        /* clear all */
1273        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1274        /* unmask bits that are of interest */
1275        /* note: afu can send an interrupt after this step */
1276        writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1277        /* clear again in case a bit came on after previous clear but before */
1278        /* unmask */
1279        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1280
1281        /* Clear/Set internal lun bits */
1282        fc_port_regs = get_fc_port_regs(cfg, 0);
1283        reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1284        reg &= SISL_FC_INTERNAL_MASK;
1285        if (afu->internal_lun)
1286                reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1287        writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1288
1289        /* now clear FC errors */
1290        for (i = 0; i < cfg->num_fc_ports; i++) {
1291                fc_port_regs = get_fc_port_regs(cfg, i);
1292
1293                writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1294                writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1295        }
1296
1297        /* sync interrupts for master's IOARRIN write */
1298        /* note that unlike asyncs, there can be no pending sync interrupts */
1299        /* at this time (this is a fresh context and master has not written */
1300        /* IOARRIN yet), so there is nothing to clear. */
1301
1302        /* set LISN#, it is always sent to the context that wrote IOARRIN */
1303        for (i = 0; i < afu->num_hwqs; i++) {
1304                hwq = get_hwq(afu, i);
1305
1306                writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
1307                writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1308        }
1309}
1310
1311/**
1312 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1313 * @irq:        Interrupt number.
1314 * @data:       Private data provided at interrupt registration, the AFU.
1315 *
1316 * Return: Always return IRQ_HANDLED.
1317 */
1318static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1319{
1320        struct hwq *hwq = (struct hwq *)data;
1321        struct cxlflash_cfg *cfg = hwq->afu->parent;
1322        struct device *dev = &cfg->dev->dev;
1323        u64 reg;
1324        u64 reg_unmasked;
1325
1326        reg = readq_be(&hwq->host_map->intr_status);
1327        reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1328
1329        if (reg_unmasked == 0UL) {
1330                dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1331                        __func__, reg);
1332                goto cxlflash_sync_err_irq_exit;
1333        }
1334
1335        dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1336                __func__, reg);
1337
1338        writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1339
1340cxlflash_sync_err_irq_exit:
1341        return IRQ_HANDLED;
1342}
1343
1344/**
1345 * process_hrrq() - process the read-response queue
1346 * @afu:        AFU associated with the host.
1347 * @doneq:      Queue of commands harvested from the RRQ.
1348 * @budget:     Threshold of RRQ entries to process.
1349 *
1350 * This routine must be called holding the disabled RRQ spin lock.
1351 *
1352 * Return: The number of entries processed.
1353 */
1354static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1355{
1356        struct afu *afu = hwq->afu;
1357        struct afu_cmd *cmd;
1358        struct sisl_ioasa *ioasa;
1359        struct sisl_ioarcb *ioarcb;
1360        bool toggle = hwq->toggle;
1361        int num_hrrq = 0;
1362        u64 entry,
1363            *hrrq_start = hwq->hrrq_start,
1364            *hrrq_end = hwq->hrrq_end,
1365            *hrrq_curr = hwq->hrrq_curr;
1366
1367        /* Process ready RRQ entries up to the specified budget (if any) */
1368        while (true) {
1369                entry = *hrrq_curr;
1370
1371                if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1372                        break;
1373
1374                entry &= ~SISL_RESP_HANDLE_T_BIT;
1375
1376                if (afu_is_sq_cmd_mode(afu)) {
1377                        ioasa = (struct sisl_ioasa *)entry;
1378                        cmd = container_of(ioasa, struct afu_cmd, sa);
1379                } else {
1380                        ioarcb = (struct sisl_ioarcb *)entry;
1381                        cmd = container_of(ioarcb, struct afu_cmd, rcb);
1382                }
1383
1384                list_add_tail(&cmd->queue, doneq);
1385
1386                /* Advance to next entry or wrap and flip the toggle bit */
1387                if (hrrq_curr < hrrq_end)
1388                        hrrq_curr++;
1389                else {
1390                        hrrq_curr = hrrq_start;
1391                        toggle ^= SISL_RESP_HANDLE_T_BIT;
1392                }
1393
1394                atomic_inc(&hwq->hsq_credits);
1395                num_hrrq++;
1396
1397                if (budget > 0 && num_hrrq >= budget)
1398                        break;
1399        }
1400
1401        hwq->hrrq_curr = hrrq_curr;
1402        hwq->toggle = toggle;
1403
1404        return num_hrrq;
1405}
1406
1407/**
1408 * process_cmd_doneq() - process a queue of harvested RRQ commands
1409 * @doneq:      Queue of completed commands.
1410 *
1411 * Note that upon return the queue can no longer be trusted.
1412 */
1413static void process_cmd_doneq(struct list_head *doneq)
1414{
1415        struct afu_cmd *cmd, *tmp;
1416
1417        WARN_ON(list_empty(doneq));
1418
1419        list_for_each_entry_safe(cmd, tmp, doneq, queue)
1420                cmd_complete(cmd);
1421}
1422
1423/**
1424 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1425 * @irqpoll:    IRQ poll structure associated with queue to poll.
1426 * @budget:     Threshold of RRQ entries to process per poll.
1427 *
1428 * Return: The number of entries processed.
1429 */
1430static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1431{
1432        struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1433        unsigned long hrrq_flags;
1434        LIST_HEAD(doneq);
1435        int num_entries = 0;
1436
1437        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1438
1439        num_entries = process_hrrq(hwq, &doneq, budget);
1440        if (num_entries < budget)
1441                irq_poll_complete(irqpoll);
1442
1443        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1444
1445        process_cmd_doneq(&doneq);
1446        return num_entries;
1447}
1448
1449/**
1450 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1451 * @irq:        Interrupt number.
1452 * @data:       Private data provided at interrupt registration, the AFU.
1453 *
1454 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1455 */
1456static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1457{
1458        struct hwq *hwq = (struct hwq *)data;
1459        struct afu *afu = hwq->afu;
1460        unsigned long hrrq_flags;
1461        LIST_HEAD(doneq);
1462        int num_entries = 0;
1463
1464        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1465
1466        if (afu_is_irqpoll_enabled(afu)) {
1467                irq_poll_sched(&hwq->irqpoll);
1468                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1469                return IRQ_HANDLED;
1470        }
1471
1472        num_entries = process_hrrq(hwq, &doneq, -1);
1473        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1474
1475        if (num_entries == 0)
1476                return IRQ_NONE;
1477
1478        process_cmd_doneq(&doneq);
1479        return IRQ_HANDLED;
1480}
1481
1482/*
1483 * Asynchronous interrupt information table
1484 *
1485 * NOTE:
1486 *      - Order matters here as this array is indexed by bit position.
1487 *
1488 *      - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1489 *        as complex and complains due to a lack of parentheses/braces.
1490 */
1491#define ASTATUS_FC(_a, _b, _c, _d)                                       \
1492        { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1493
1494#define BUILD_SISL_ASTATUS_FC_PORT(_a)                                   \
1495        ASTATUS_FC(_a, LINK_UP, "link up", 0),                           \
1496        ASTATUS_FC(_a, LINK_DN, "link down", 0),                         \
1497        ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),            \
1498        ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),            \
1499        ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1500        ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),     \
1501        ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),                \
1502        ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1503
1504static const struct asyc_intr_info ainfo[] = {
1505        BUILD_SISL_ASTATUS_FC_PORT(1),
1506        BUILD_SISL_ASTATUS_FC_PORT(0),
1507        BUILD_SISL_ASTATUS_FC_PORT(3),
1508        BUILD_SISL_ASTATUS_FC_PORT(2)
1509};
1510
1511/**
1512 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1513 * @irq:        Interrupt number.
1514 * @data:       Private data provided at interrupt registration, the AFU.
1515 *
1516 * Return: Always return IRQ_HANDLED.
1517 */
1518static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1519{
1520        struct hwq *hwq = (struct hwq *)data;
1521        struct afu *afu = hwq->afu;
1522        struct cxlflash_cfg *cfg = afu->parent;
1523        struct device *dev = &cfg->dev->dev;
1524        const struct asyc_intr_info *info;
1525        struct sisl_global_map __iomem *global = &afu->afu_map->global;
1526        __be64 __iomem *fc_port_regs;
1527        u64 reg_unmasked;
1528        u64 reg;
1529        u64 bit;
1530        u8 port;
1531
1532        reg = readq_be(&global->regs.aintr_status);
1533        reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1534
1535        if (unlikely(reg_unmasked == 0)) {
1536                dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1537                        __func__, reg);
1538                goto out;
1539        }
1540
1541        /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1542        writeq_be(reg_unmasked, &global->regs.aintr_clear);
1543
1544        /* Check each bit that is on */
1545        for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1546                if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1547                        WARN_ON_ONCE(1);
1548                        continue;
1549                }
1550
1551                info = &ainfo[bit];
1552                if (unlikely(info->status != 1ULL << bit)) {
1553                        WARN_ON_ONCE(1);
1554                        continue;
1555                }
1556
1557                port = info->port;
1558                fc_port_regs = get_fc_port_regs(cfg, port);
1559
1560                dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1561                        __func__, port, info->desc,
1562                       readq_be(&fc_port_regs[FC_STATUS / 8]));
1563
1564                /*
1565                 * Do link reset first, some OTHER errors will set FC_ERROR
1566                 * again if cleared before or w/o a reset
1567                 */
1568                if (info->action & LINK_RESET) {
1569                        dev_err(dev, "%s: FC Port %d: resetting link\n",
1570                                __func__, port);
1571                        cfg->lr_state = LINK_RESET_REQUIRED;
1572                        cfg->lr_port = port;
1573                        schedule_work(&cfg->work_q);
1574                }
1575
1576                if (info->action & CLR_FC_ERROR) {
1577                        reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1578
1579                        /*
1580                         * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1581                         * should be the same and tracing one is sufficient.
1582                         */
1583
1584                        dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1585                                __func__, port, reg);
1586
1587                        writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1588                        writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1589                }
1590
1591                if (info->action & SCAN_HOST) {
1592                        atomic_inc(&cfg->scan_host_needed);
1593                        schedule_work(&cfg->work_q);
1594                }
1595        }
1596
1597out:
1598        return IRQ_HANDLED;
1599}
1600
1601/**
1602 * read_vpd() - obtains the WWPNs from VPD
1603 * @cfg:        Internal structure associated with the host.
1604 * @wwpn:       Array of size MAX_FC_PORTS to pass back WWPNs
1605 *
1606 * Return: 0 on success, -errno on failure
1607 */
1608static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1609{
1610        struct device *dev = &cfg->dev->dev;
1611        struct pci_dev *pdev = cfg->dev;
1612        int rc = 0;
1613        int ro_start, ro_size, i, j, k;
1614        ssize_t vpd_size;
1615        char vpd_data[CXLFLASH_VPD_LEN];
1616        char tmp_buf[WWPN_BUF_LEN] = { 0 };
1617        const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1618                                                cfg->dev_id->driver_data;
1619        const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1620        const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1621
1622        /* Get the VPD data from the device */
1623        vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1624        if (unlikely(vpd_size <= 0)) {
1625                dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1626                        __func__, vpd_size);
1627                rc = -ENODEV;
1628                goto out;
1629        }
1630
1631        /* Get the read only section offset */
1632        ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1633                                    PCI_VPD_LRDT_RO_DATA);
1634        if (unlikely(ro_start < 0)) {
1635                dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1636                rc = -ENODEV;
1637                goto out;
1638        }
1639
1640        /* Get the read only section size, cap when extends beyond read VPD */
1641        ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1642        j = ro_size;
1643        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1644        if (unlikely((i + j) > vpd_size)) {
1645                dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1646                        __func__, (i + j), vpd_size);
1647                ro_size = vpd_size - i;
1648        }
1649
1650        /*
1651         * Find the offset of the WWPN tag within the read only
1652         * VPD data and validate the found field (partials are
1653         * no good to us). Convert the ASCII data to an integer
1654         * value. Note that we must copy to a temporary buffer
1655         * because the conversion service requires that the ASCII
1656         * string be terminated.
1657         *
1658         * Allow for WWPN not being found for all devices, setting
1659         * the returned WWPN to zero when not found. Notify with a
1660         * log error for cards that should have had WWPN keywords
1661         * in the VPD - cards requiring WWPN will not have their
1662         * ports programmed and operate in an undefined state.
1663         */
1664        for (k = 0; k < cfg->num_fc_ports; k++) {
1665                j = ro_size;
1666                i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1667
1668                i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1669                if (i < 0) {
1670                        if (wwpn_vpd_required)
1671                                dev_err(dev, "%s: Port %d WWPN not found\n",
1672                                        __func__, k);
1673                        wwpn[k] = 0ULL;
1674                        continue;
1675                }
1676
1677                j = pci_vpd_info_field_size(&vpd_data[i]);
1678                i += PCI_VPD_INFO_FLD_HDR_SIZE;
1679                if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1680                        dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1681                                __func__, k);
1682                        rc = -ENODEV;
1683                        goto out;
1684                }
1685
1686                memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1687                rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1688                if (unlikely(rc)) {
1689                        dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1690                                __func__, k);
1691                        rc = -ENODEV;
1692                        goto out;
1693                }
1694
1695                dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1696        }
1697
1698out:
1699        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1700        return rc;
1701}
1702
1703/**
1704 * init_pcr() - initialize the provisioning and control registers
1705 * @cfg:        Internal structure associated with the host.
1706 *
1707 * Also sets up fast access to the mapped registers and initializes AFU
1708 * command fields that never change.
1709 */
1710static void init_pcr(struct cxlflash_cfg *cfg)
1711{
1712        struct afu *afu = cfg->afu;
1713        struct sisl_ctrl_map __iomem *ctrl_map;
1714        struct hwq *hwq;
1715        void *cookie;
1716        int i;
1717
1718        for (i = 0; i < MAX_CONTEXT; i++) {
1719                ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1720                /* Disrupt any clients that could be running */
1721                /* e.g. clients that survived a master restart */
1722                writeq_be(0, &ctrl_map->rht_start);
1723                writeq_be(0, &ctrl_map->rht_cnt_id);
1724                writeq_be(0, &ctrl_map->ctx_cap);
1725        }
1726
1727        /* Copy frequently used fields into hwq */
1728        for (i = 0; i < afu->num_hwqs; i++) {
1729                hwq = get_hwq(afu, i);
1730                cookie = hwq->ctx_cookie;
1731
1732                hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1733                hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1734                hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1735
1736                /* Program the Endian Control for the master context */
1737                writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1738        }
1739}
1740
1741/**
1742 * init_global() - initialize AFU global registers
1743 * @cfg:        Internal structure associated with the host.
1744 */
1745static int init_global(struct cxlflash_cfg *cfg)
1746{
1747        struct afu *afu = cfg->afu;
1748        struct device *dev = &cfg->dev->dev;
1749        struct hwq *hwq;
1750        struct sisl_host_map __iomem *hmap;
1751        __be64 __iomem *fc_port_regs;
1752        u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1753        int i = 0, num_ports = 0;
1754        int rc = 0;
1755        u64 reg;
1756
1757        rc = read_vpd(cfg, &wwpn[0]);
1758        if (rc) {
1759                dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1760                goto out;
1761        }
1762
1763        /* Set up RRQ and SQ in HWQ for master issued cmds */
1764        for (i = 0; i < afu->num_hwqs; i++) {
1765                hwq = get_hwq(afu, i);
1766                hmap = hwq->host_map;
1767
1768                writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1769                writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1770
1771                if (afu_is_sq_cmd_mode(afu)) {
1772                        writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1773                        writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1774                }
1775        }
1776
1777        /* AFU configuration */
1778        reg = readq_be(&afu->afu_map->global.regs.afu_config);
1779        reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1780        /* enable all auto retry options and control endianness */
1781        /* leave others at default: */
1782        /* CTX_CAP write protected, mbox_r does not clear on read and */
1783        /* checker on if dual afu */
1784        writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1785
1786        /* Global port select: select either port */
1787        if (afu->internal_lun) {
1788                /* Only use port 0 */
1789                writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1790                num_ports = 0;
1791        } else {
1792                writeq_be(PORT_MASK(cfg->num_fc_ports),
1793                          &afu->afu_map->global.regs.afu_port_sel);
1794                num_ports = cfg->num_fc_ports;
1795        }
1796
1797        for (i = 0; i < num_ports; i++) {
1798                fc_port_regs = get_fc_port_regs(cfg, i);
1799
1800                /* Unmask all errors (but they are still masked at AFU) */
1801                writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1802                /* Clear CRC error cnt & set a threshold */
1803                (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1804                writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1805
1806                /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1807                if (wwpn[i] != 0)
1808                        afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1809                /* Programming WWPN back to back causes additional
1810                 * offline/online transitions and a PLOGI
1811                 */
1812                msleep(100);
1813        }
1814
1815        /* Set up master's own CTX_CAP to allow real mode, host translation */
1816        /* tables, afu cmds and read/write GSCSI cmds. */
1817        /* First, unlock ctx_cap write by reading mbox */
1818        for (i = 0; i < afu->num_hwqs; i++) {
1819                hwq = get_hwq(afu, i);
1820
1821                (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1822                writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1823                        SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1824                        SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1825                        &hwq->ctrl_map->ctx_cap);
1826        }
1827
1828        /*
1829         * Determine write-same unmap support for host by evaluating the unmap
1830         * sector support bit of the context control register associated with
1831         * the primary hardware queue. Note that while this status is reflected
1832         * in a context register, the outcome can be assumed to be host-wide.
1833         */
1834        hwq = get_hwq(afu, PRIMARY_HWQ);
1835        reg = readq_be(&hwq->host_map->ctx_ctrl);
1836        if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1837                cfg->ws_unmap = true;
1838
1839        /* Initialize heartbeat */
1840        afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1841out:
1842        return rc;
1843}
1844
1845/**
1846 * start_afu() - initializes and starts the AFU
1847 * @cfg:        Internal structure associated with the host.
1848 */
1849static int start_afu(struct cxlflash_cfg *cfg)
1850{
1851        struct afu *afu = cfg->afu;
1852        struct device *dev = &cfg->dev->dev;
1853        struct hwq *hwq;
1854        int rc = 0;
1855        int i;
1856
1857        init_pcr(cfg);
1858
1859        /* Initialize each HWQ */
1860        for (i = 0; i < afu->num_hwqs; i++) {
1861                hwq = get_hwq(afu, i);
1862
1863                /* After an AFU reset, RRQ entries are stale, clear them */
1864                memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1865
1866                /* Initialize RRQ pointers */
1867                hwq->hrrq_start = &hwq->rrq_entry[0];
1868                hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1869                hwq->hrrq_curr = hwq->hrrq_start;
1870                hwq->toggle = 1;
1871
1872                /* Initialize spin locks */
1873                spin_lock_init(&hwq->hrrq_slock);
1874                spin_lock_init(&hwq->hsq_slock);
1875
1876                /* Initialize SQ */
1877                if (afu_is_sq_cmd_mode(afu)) {
1878                        memset(&hwq->sq, 0, sizeof(hwq->sq));
1879                        hwq->hsq_start = &hwq->sq[0];
1880                        hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1881                        hwq->hsq_curr = hwq->hsq_start;
1882
1883                        atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1884                }
1885
1886                /* Initialize IRQ poll */
1887                if (afu_is_irqpoll_enabled(afu))
1888                        irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1889                                      cxlflash_irqpoll);
1890
1891        }
1892
1893        rc = init_global(cfg);
1894
1895        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1896        return rc;
1897}
1898
1899/**
1900 * init_intr() - setup interrupt handlers for the master context
1901 * @cfg:        Internal structure associated with the host.
1902 * @hwq:        Hardware queue to initialize.
1903 *
1904 * Return: 0 on success, -errno on failure
1905 */
1906static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1907                                 struct hwq *hwq)
1908{
1909        struct device *dev = &cfg->dev->dev;
1910        void *ctx = hwq->ctx_cookie;
1911        int rc = 0;
1912        enum undo_level level = UNDO_NOOP;
1913        bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1914        int num_irqs = is_primary_hwq ? 3 : 2;
1915
1916        rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1917        if (unlikely(rc)) {
1918                dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1919                        __func__, rc);
1920                level = UNDO_NOOP;
1921                goto out;
1922        }
1923
1924        rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1925                                   "SISL_MSI_SYNC_ERROR");
1926        if (unlikely(rc <= 0)) {
1927                dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1928                level = FREE_IRQ;
1929                goto out;
1930        }
1931
1932        rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1933                                   "SISL_MSI_RRQ_UPDATED");
1934        if (unlikely(rc <= 0)) {
1935                dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1936                level = UNMAP_ONE;
1937                goto out;
1938        }
1939
1940        /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1941        if (!is_primary_hwq)
1942                goto out;
1943
1944        rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1945                                   "SISL_MSI_ASYNC_ERROR");
1946        if (unlikely(rc <= 0)) {
1947                dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1948                level = UNMAP_TWO;
1949                goto out;
1950        }
1951out:
1952        return level;
1953}
1954
1955/**
1956 * init_mc() - create and register as the master context
1957 * @cfg:        Internal structure associated with the host.
1958 * index:       HWQ Index of the master context.
1959 *
1960 * Return: 0 on success, -errno on failure
1961 */
1962static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1963{
1964        void *ctx;
1965        struct device *dev = &cfg->dev->dev;
1966        struct hwq *hwq = get_hwq(cfg->afu, index);
1967        int rc = 0;
1968        enum undo_level level;
1969
1970        hwq->afu = cfg->afu;
1971        hwq->index = index;
1972        INIT_LIST_HEAD(&hwq->pending_cmds);
1973
1974        if (index == PRIMARY_HWQ)
1975                ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
1976        else
1977                ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1978        if (IS_ERR_OR_NULL(ctx)) {
1979                rc = -ENOMEM;
1980                goto err1;
1981        }
1982
1983        WARN_ON(hwq->ctx_cookie);
1984        hwq->ctx_cookie = ctx;
1985
1986        /* Set it up as a master with the CXL */
1987        cfg->ops->set_master(ctx);
1988
1989        /* Reset AFU when initializing primary context */
1990        if (index == PRIMARY_HWQ) {
1991                rc = cfg->ops->afu_reset(ctx);
1992                if (unlikely(rc)) {
1993                        dev_err(dev, "%s: AFU reset failed rc=%d\n",
1994                                      __func__, rc);
1995                        goto err1;
1996                }
1997        }
1998
1999        level = init_intr(cfg, hwq);
2000        if (unlikely(level)) {
2001                dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2002                goto err2;
2003        }
2004
2005        /* Finally, activate the context by starting it */
2006        rc = cfg->ops->start_context(hwq->ctx_cookie);
2007        if (unlikely(rc)) {
2008                dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2009                level = UNMAP_THREE;
2010                goto err2;
2011        }
2012
2013out:
2014        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2015        return rc;
2016err2:
2017        term_intr(cfg, level, index);
2018        if (index != PRIMARY_HWQ)
2019                cfg->ops->release_context(ctx);
2020err1:
2021        hwq->ctx_cookie = NULL;
2022        goto out;
2023}
2024
2025/**
2026 * get_num_afu_ports() - determines and configures the number of AFU ports
2027 * @cfg:        Internal structure associated with the host.
2028 *
2029 * This routine determines the number of AFU ports by converting the global
2030 * port selection mask. The converted value is only valid following an AFU
2031 * reset (explicit or power-on). This routine must be invoked shortly after
2032 * mapping as other routines are dependent on the number of ports during the
2033 * initialization sequence.
2034 *
2035 * To support legacy AFUs that might not have reflected an initial global
2036 * port mask (value read is 0), default to the number of ports originally
2037 * supported by the cxlflash driver (2) before hardware with other port
2038 * offerings was introduced.
2039 */
2040static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2041{
2042        struct afu *afu = cfg->afu;
2043        struct device *dev = &cfg->dev->dev;
2044        u64 port_mask;
2045        int num_fc_ports = LEGACY_FC_PORTS;
2046
2047        port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2048        if (port_mask != 0ULL)
2049                num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2050
2051        dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2052                __func__, port_mask, num_fc_ports);
2053
2054        cfg->num_fc_ports = num_fc_ports;
2055        cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2056}
2057
2058/**
2059 * init_afu() - setup as master context and start AFU
2060 * @cfg:        Internal structure associated with the host.
2061 *
2062 * This routine is a higher level of control for configuring the
2063 * AFU on probe and reset paths.
2064 *
2065 * Return: 0 on success, -errno on failure
2066 */
2067static int init_afu(struct cxlflash_cfg *cfg)
2068{
2069        u64 reg;
2070        int rc = 0;
2071        struct afu *afu = cfg->afu;
2072        struct device *dev = &cfg->dev->dev;
2073        struct hwq *hwq;
2074        int i;
2075
2076        cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2077
2078        afu->num_hwqs = afu->desired_hwqs;
2079        for (i = 0; i < afu->num_hwqs; i++) {
2080                rc = init_mc(cfg, i);
2081                if (rc) {
2082                        dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2083                                __func__, rc, i);
2084                        goto err1;
2085                }
2086        }
2087
2088        /* Map the entire MMIO space of the AFU using the first context */
2089        hwq = get_hwq(afu, PRIMARY_HWQ);
2090        afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2091        if (!afu->afu_map) {
2092                dev_err(dev, "%s: psa_map failed\n", __func__);
2093                rc = -ENOMEM;
2094                goto err1;
2095        }
2096
2097        /* No byte reverse on reading afu_version or string will be backwards */
2098        reg = readq(&afu->afu_map->global.regs.afu_version);
2099        memcpy(afu->version, &reg, sizeof(reg));
2100        afu->interface_version =
2101            readq_be(&afu->afu_map->global.regs.interface_version);
2102        if ((afu->interface_version + 1) == 0) {
2103                dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2104                        "interface version %016llx\n", afu->version,
2105                       afu->interface_version);
2106                rc = -EINVAL;
2107                goto err1;
2108        }
2109
2110        if (afu_is_sq_cmd_mode(afu)) {
2111                afu->send_cmd = send_cmd_sq;
2112                afu->context_reset = context_reset_sq;
2113        } else {
2114                afu->send_cmd = send_cmd_ioarrin;
2115                afu->context_reset = context_reset_ioarrin;
2116        }
2117
2118        dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2119                afu->version, afu->interface_version);
2120
2121        get_num_afu_ports(cfg);
2122
2123        rc = start_afu(cfg);
2124        if (rc) {
2125                dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2126                goto err1;
2127        }
2128
2129        afu_err_intr_init(cfg->afu);
2130        for (i = 0; i < afu->num_hwqs; i++) {
2131                hwq = get_hwq(afu, i);
2132
2133                hwq->room = readq_be(&hwq->host_map->cmd_room);
2134        }
2135
2136        /* Restore the LUN mappings */
2137        cxlflash_restore_luntable(cfg);
2138out:
2139        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2140        return rc;
2141
2142err1:
2143        for (i = afu->num_hwqs - 1; i >= 0; i--) {
2144                term_intr(cfg, UNMAP_THREE, i);
2145                term_mc(cfg, i);
2146        }
2147        goto out;
2148}
2149
2150/**
2151 * afu_reset() - resets the AFU
2152 * @cfg:        Internal structure associated with the host.
2153 *
2154 * Return: 0 on success, -errno on failure
2155 */
2156static int afu_reset(struct cxlflash_cfg *cfg)
2157{
2158        struct device *dev = &cfg->dev->dev;
2159        int rc = 0;
2160
2161        /* Stop the context before the reset. Since the context is
2162         * no longer available restart it after the reset is complete
2163         */
2164        term_afu(cfg);
2165
2166        rc = init_afu(cfg);
2167
2168        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2169        return rc;
2170}
2171
2172/**
2173 * drain_ioctls() - wait until all currently executing ioctls have completed
2174 * @cfg:        Internal structure associated with the host.
2175 *
2176 * Obtain write access to read/write semaphore that wraps ioctl
2177 * handling to 'drain' ioctls currently executing.
2178 */
2179static void drain_ioctls(struct cxlflash_cfg *cfg)
2180{
2181        down_write(&cfg->ioctl_rwsem);
2182        up_write(&cfg->ioctl_rwsem);
2183}
2184
2185/**
2186 * cxlflash_async_reset_host() - asynchronous host reset handler
2187 * @data:       Private data provided while scheduling reset.
2188 * @cookie:     Cookie that can be used for checkpointing.
2189 */
2190static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2191{
2192        struct cxlflash_cfg *cfg = data;
2193        struct device *dev = &cfg->dev->dev;
2194        int rc = 0;
2195
2196        if (cfg->state != STATE_RESET) {
2197                dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2198                        __func__, cfg->state);
2199                goto out;
2200        }
2201
2202        drain_ioctls(cfg);
2203        cxlflash_mark_contexts_error(cfg);
2204        rc = afu_reset(cfg);
2205        if (rc)
2206                cfg->state = STATE_FAILTERM;
2207        else
2208                cfg->state = STATE_NORMAL;
2209        wake_up_all(&cfg->reset_waitq);
2210
2211out:
2212        scsi_unblock_requests(cfg->host);
2213}
2214
2215/**
2216 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2217 * @cfg:        Internal structure associated with the host.
2218 */
2219static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2220{
2221        struct device *dev = &cfg->dev->dev;
2222
2223        if (cfg->state != STATE_NORMAL) {
2224                dev_dbg(dev, "%s: Not performing reset state=%d\n",
2225                        __func__, cfg->state);
2226                return;
2227        }
2228
2229        cfg->state = STATE_RESET;
2230        scsi_block_requests(cfg->host);
2231        cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2232                                                 cfg);
2233}
2234
2235/**
2236 * send_afu_cmd() - builds and sends an internal AFU command
2237 * @afu:        AFU associated with the host.
2238 * @rcb:        Pre-populated IOARCB describing command to send.
2239 *
2240 * The AFU can only take one internal AFU command at a time. This limitation is
2241 * enforced by using a mutex to provide exclusive access to the AFU during the
2242 * operation. This design point requires calling threads to not be on interrupt
2243 * context due to the possibility of sleeping during concurrent AFU operations.
2244 *
2245 * The command status is optionally passed back to the caller when the caller
2246 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2247 *
2248 * Return:
2249 *      0 on success, -errno on failure
2250 */
2251static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2252{
2253        struct cxlflash_cfg *cfg = afu->parent;
2254        struct device *dev = &cfg->dev->dev;
2255        struct afu_cmd *cmd = NULL;
2256        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2257        char *buf = NULL;
2258        int rc = 0;
2259        int nretry = 0;
2260        static DEFINE_MUTEX(sync_active);
2261
2262        if (cfg->state != STATE_NORMAL) {
2263                dev_dbg(dev, "%s: Sync not required state=%u\n",
2264                        __func__, cfg->state);
2265                return 0;
2266        }
2267
2268        mutex_lock(&sync_active);
2269        atomic_inc(&afu->cmds_active);
2270        buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2271        if (unlikely(!buf)) {
2272                dev_err(dev, "%s: no memory for command\n", __func__);
2273                rc = -ENOMEM;
2274                goto out;
2275        }
2276
2277        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2278
2279retry:
2280        memset(cmd, 0, sizeof(*cmd));
2281        memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2282        INIT_LIST_HEAD(&cmd->queue);
2283        init_completion(&cmd->cevent);
2284        cmd->parent = afu;
2285        cmd->hwq_index = hwq->index;
2286        cmd->rcb.ctx_id = hwq->ctx_hndl;
2287
2288        dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2289                __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2290
2291        rc = afu->send_cmd(afu, cmd);
2292        if (unlikely(rc)) {
2293                rc = -ENOBUFS;
2294                goto out;
2295        }
2296
2297        rc = wait_resp(afu, cmd);
2298        switch (rc) {
2299        case -ETIMEDOUT:
2300                rc = afu->context_reset(hwq);
2301                if (rc) {
2302                        cxlflash_schedule_async_reset(cfg);
2303                        break;
2304                }
2305                /* fall through to retry */
2306        case -EAGAIN:
2307                if (++nretry < 2)
2308                        goto retry;
2309                /* fall through to exit */
2310        default:
2311                break;
2312        }
2313
2314        if (rcb->ioasa)
2315                *rcb->ioasa = cmd->sa;
2316out:
2317        atomic_dec(&afu->cmds_active);
2318        mutex_unlock(&sync_active);
2319        kfree(buf);
2320        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2321        return rc;
2322}
2323
2324/**
2325 * cxlflash_afu_sync() - builds and sends an AFU sync command
2326 * @afu:        AFU associated with the host.
2327 * @ctx:        Identifies context requesting sync.
2328 * @res:        Identifies resource requesting sync.
2329 * @mode:       Type of sync to issue (lightweight, heavyweight, global).
2330 *
2331 * AFU sync operations are only necessary and allowed when the device is
2332 * operating normally. When not operating normally, sync requests can occur as
2333 * part of cleaning up resources associated with an adapter prior to removal.
2334 * In this scenario, these requests are simply ignored (safe due to the AFU
2335 * going away).
2336 *
2337 * Return:
2338 *      0 on success, -errno on failure
2339 */
2340int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2341{
2342        struct cxlflash_cfg *cfg = afu->parent;
2343        struct device *dev = &cfg->dev->dev;
2344        struct sisl_ioarcb rcb = { 0 };
2345
2346        dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2347                __func__, afu, ctx, res, mode);
2348
2349        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2350        rcb.msi = SISL_MSI_RRQ_UPDATED;
2351        rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2352
2353        rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2354        rcb.cdb[1] = mode;
2355        put_unaligned_be16(ctx, &rcb.cdb[2]);
2356        put_unaligned_be32(res, &rcb.cdb[4]);
2357
2358        return send_afu_cmd(afu, &rcb);
2359}
2360
2361/**
2362 * cxlflash_eh_abort_handler() - abort a SCSI command
2363 * @scp:        SCSI command to abort.
2364 *
2365 * CXL Flash devices do not support a single command abort. Reset the context
2366 * as per SISLite specification. Flush any pending commands in the hardware
2367 * queue before the reset.
2368 *
2369 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2370 */
2371static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2372{
2373        int rc = FAILED;
2374        struct Scsi_Host *host = scp->device->host;
2375        struct cxlflash_cfg *cfg = shost_priv(host);
2376        struct afu_cmd *cmd = sc_to_afuc(scp);
2377        struct device *dev = &cfg->dev->dev;
2378        struct afu *afu = cfg->afu;
2379        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2380
2381        dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2382                "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2383                scp->device->channel, scp->device->id, scp->device->lun,
2384                get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2385                get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2386                get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2387                get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2388
2389        /* When the state is not normal, another reset/reload is in progress.
2390         * Return failed and the mid-layer will invoke host reset handler.
2391         */
2392        if (cfg->state != STATE_NORMAL) {
2393                dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2394                        __func__, cfg->state);
2395                goto out;
2396        }
2397
2398        rc = afu->context_reset(hwq);
2399        if (unlikely(rc))
2400                goto out;
2401
2402        rc = SUCCESS;
2403
2404out:
2405        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2406        return rc;
2407}
2408
2409/**
2410 * cxlflash_eh_device_reset_handler() - reset a single LUN
2411 * @scp:        SCSI command to send.
2412 *
2413 * Return:
2414 *      SUCCESS as defined in scsi/scsi.h
2415 *      FAILED as defined in scsi/scsi.h
2416 */
2417static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2418{
2419        int rc = SUCCESS;
2420        struct scsi_device *sdev = scp->device;
2421        struct Scsi_Host *host = sdev->host;
2422        struct cxlflash_cfg *cfg = shost_priv(host);
2423        struct device *dev = &cfg->dev->dev;
2424        int rcr = 0;
2425
2426        dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2427                host->host_no, sdev->channel, sdev->id, sdev->lun);
2428retry:
2429        switch (cfg->state) {
2430        case STATE_NORMAL:
2431                rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2432                if (unlikely(rcr))
2433                        rc = FAILED;
2434                break;
2435        case STATE_RESET:
2436                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2437                goto retry;
2438        default:
2439                rc = FAILED;
2440                break;
2441        }
2442
2443        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2444        return rc;
2445}
2446
2447/**
2448 * cxlflash_eh_host_reset_handler() - reset the host adapter
2449 * @scp:        SCSI command from stack identifying host.
2450 *
2451 * Following a reset, the state is evaluated again in case an EEH occurred
2452 * during the reset. In such a scenario, the host reset will either yield
2453 * until the EEH recovery is complete or return success or failure based
2454 * upon the current device state.
2455 *
2456 * Return:
2457 *      SUCCESS as defined in scsi/scsi.h
2458 *      FAILED as defined in scsi/scsi.h
2459 */
2460static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2461{
2462        int rc = SUCCESS;
2463        int rcr = 0;
2464        struct Scsi_Host *host = scp->device->host;
2465        struct cxlflash_cfg *cfg = shost_priv(host);
2466        struct device *dev = &cfg->dev->dev;
2467
2468        dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2469
2470        switch (cfg->state) {
2471        case STATE_NORMAL:
2472                cfg->state = STATE_RESET;
2473                drain_ioctls(cfg);
2474                cxlflash_mark_contexts_error(cfg);
2475                rcr = afu_reset(cfg);
2476                if (rcr) {
2477                        rc = FAILED;
2478                        cfg->state = STATE_FAILTERM;
2479                } else
2480                        cfg->state = STATE_NORMAL;
2481                wake_up_all(&cfg->reset_waitq);
2482                ssleep(1);
2483                /* fall through */
2484        case STATE_RESET:
2485                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2486                if (cfg->state == STATE_NORMAL)
2487                        break;
2488                /* fall through */
2489        default:
2490                rc = FAILED;
2491                break;
2492        }
2493
2494        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2495        return rc;
2496}
2497
2498/**
2499 * cxlflash_change_queue_depth() - change the queue depth for the device
2500 * @sdev:       SCSI device destined for queue depth change.
2501 * @qdepth:     Requested queue depth value to set.
2502 *
2503 * The requested queue depth is capped to the maximum supported value.
2504 *
2505 * Return: The actual queue depth set.
2506 */
2507static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2508{
2509
2510        if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2511                qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2512
2513        scsi_change_queue_depth(sdev, qdepth);
2514        return sdev->queue_depth;
2515}
2516
2517/**
2518 * cxlflash_show_port_status() - queries and presents the current port status
2519 * @port:       Desired port for status reporting.
2520 * @cfg:        Internal structure associated with the host.
2521 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2522 *
2523 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2524 */
2525static ssize_t cxlflash_show_port_status(u32 port,
2526                                         struct cxlflash_cfg *cfg,
2527                                         char *buf)
2528{
2529        struct device *dev = &cfg->dev->dev;
2530        char *disp_status;
2531        u64 status;
2532        __be64 __iomem *fc_port_regs;
2533
2534        WARN_ON(port >= MAX_FC_PORTS);
2535
2536        if (port >= cfg->num_fc_ports) {
2537                dev_info(dev, "%s: Port %d not supported on this card.\n",
2538                        __func__, port);
2539                return -EINVAL;
2540        }
2541
2542        fc_port_regs = get_fc_port_regs(cfg, port);
2543        status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2544        status &= FC_MTIP_STATUS_MASK;
2545
2546        if (status == FC_MTIP_STATUS_ONLINE)
2547                disp_status = "online";
2548        else if (status == FC_MTIP_STATUS_OFFLINE)
2549                disp_status = "offline";
2550        else
2551                disp_status = "unknown";
2552
2553        return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2554}
2555
2556/**
2557 * port0_show() - queries and presents the current status of port 0
2558 * @dev:        Generic device associated with the host owning the port.
2559 * @attr:       Device attribute representing the port.
2560 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2561 *
2562 * Return: The size of the ASCII string returned in @buf.
2563 */
2564static ssize_t port0_show(struct device *dev,
2565                          struct device_attribute *attr,
2566                          char *buf)
2567{
2568        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2569
2570        return cxlflash_show_port_status(0, cfg, buf);
2571}
2572
2573/**
2574 * port1_show() - queries and presents the current status of port 1
2575 * @dev:        Generic device associated with the host owning the port.
2576 * @attr:       Device attribute representing the port.
2577 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2578 *
2579 * Return: The size of the ASCII string returned in @buf.
2580 */
2581static ssize_t port1_show(struct device *dev,
2582                          struct device_attribute *attr,
2583                          char *buf)
2584{
2585        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2586
2587        return cxlflash_show_port_status(1, cfg, buf);
2588}
2589
2590/**
2591 * port2_show() - queries and presents the current status of port 2
2592 * @dev:        Generic device associated with the host owning the port.
2593 * @attr:       Device attribute representing the port.
2594 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2595 *
2596 * Return: The size of the ASCII string returned in @buf.
2597 */
2598static ssize_t port2_show(struct device *dev,
2599                          struct device_attribute *attr,
2600                          char *buf)
2601{
2602        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2603
2604        return cxlflash_show_port_status(2, cfg, buf);
2605}
2606
2607/**
2608 * port3_show() - queries and presents the current status of port 3
2609 * @dev:        Generic device associated with the host owning the port.
2610 * @attr:       Device attribute representing the port.
2611 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2612 *
2613 * Return: The size of the ASCII string returned in @buf.
2614 */
2615static ssize_t port3_show(struct device *dev,
2616                          struct device_attribute *attr,
2617                          char *buf)
2618{
2619        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2620
2621        return cxlflash_show_port_status(3, cfg, buf);
2622}
2623
2624/**
2625 * lun_mode_show() - presents the current LUN mode of the host
2626 * @dev:        Generic device associated with the host.
2627 * @attr:       Device attribute representing the LUN mode.
2628 * @buf:        Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2629 *
2630 * Return: The size of the ASCII string returned in @buf.
2631 */
2632static ssize_t lun_mode_show(struct device *dev,
2633                             struct device_attribute *attr, char *buf)
2634{
2635        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2636        struct afu *afu = cfg->afu;
2637
2638        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2639}
2640
2641/**
2642 * lun_mode_store() - sets the LUN mode of the host
2643 * @dev:        Generic device associated with the host.
2644 * @attr:       Device attribute representing the LUN mode.
2645 * @buf:        Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2646 * @count:      Length of data resizing in @buf.
2647 *
2648 * The CXL Flash AFU supports a dummy LUN mode where the external
2649 * links and storage are not required. Space on the FPGA is used
2650 * to create 1 or 2 small LUNs which are presented to the system
2651 * as if they were a normal storage device. This feature is useful
2652 * during development and also provides manufacturing with a way
2653 * to test the AFU without an actual device.
2654 *
2655 * 0 = external LUN[s] (default)
2656 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2657 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2658 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2659 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2660 *
2661 * Return: The size of the ASCII string returned in @buf.
2662 */
2663static ssize_t lun_mode_store(struct device *dev,
2664                              struct device_attribute *attr,
2665                              const char *buf, size_t count)
2666{
2667        struct Scsi_Host *shost = class_to_shost(dev);
2668        struct cxlflash_cfg *cfg = shost_priv(shost);
2669        struct afu *afu = cfg->afu;
2670        int rc;
2671        u32 lun_mode;
2672
2673        rc = kstrtouint(buf, 10, &lun_mode);
2674        if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2675                afu->internal_lun = lun_mode;
2676
2677                /*
2678                 * When configured for internal LUN, there is only one channel,
2679                 * channel number 0, else there will be one less than the number
2680                 * of fc ports for this card.
2681                 */
2682                if (afu->internal_lun)
2683                        shost->max_channel = 0;
2684                else
2685                        shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2686
2687                afu_reset(cfg);
2688                scsi_scan_host(cfg->host);
2689        }
2690
2691        return count;
2692}
2693
2694/**
2695 * ioctl_version_show() - presents the current ioctl version of the host
2696 * @dev:        Generic device associated with the host.
2697 * @attr:       Device attribute representing the ioctl version.
2698 * @buf:        Buffer of length PAGE_SIZE to report back the ioctl version.
2699 *
2700 * Return: The size of the ASCII string returned in @buf.
2701 */
2702static ssize_t ioctl_version_show(struct device *dev,
2703                                  struct device_attribute *attr, char *buf)
2704{
2705        ssize_t bytes = 0;
2706
2707        bytes = scnprintf(buf, PAGE_SIZE,
2708                          "disk: %u\n", DK_CXLFLASH_VERSION_0);
2709        bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2710                           "host: %u\n", HT_CXLFLASH_VERSION_0);
2711
2712        return bytes;
2713}
2714
2715/**
2716 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2717 * @port:       Desired port for status reporting.
2718 * @cfg:        Internal structure associated with the host.
2719 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2720 *
2721 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2722 */
2723static ssize_t cxlflash_show_port_lun_table(u32 port,
2724                                            struct cxlflash_cfg *cfg,
2725                                            char *buf)
2726{
2727        struct device *dev = &cfg->dev->dev;
2728        __be64 __iomem *fc_port_luns;
2729        int i;
2730        ssize_t bytes = 0;
2731
2732        WARN_ON(port >= MAX_FC_PORTS);
2733
2734        if (port >= cfg->num_fc_ports) {
2735                dev_info(dev, "%s: Port %d not supported on this card.\n",
2736                        __func__, port);
2737                return -EINVAL;
2738        }
2739
2740        fc_port_luns = get_fc_port_luns(cfg, port);
2741
2742        for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2743                bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2744                                   "%03d: %016llx\n",
2745                                   i, readq_be(&fc_port_luns[i]));
2746        return bytes;
2747}
2748
2749/**
2750 * port0_lun_table_show() - presents the current LUN table of port 0
2751 * @dev:        Generic device associated with the host owning the port.
2752 * @attr:       Device attribute representing the port.
2753 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2754 *
2755 * Return: The size of the ASCII string returned in @buf.
2756 */
2757static ssize_t port0_lun_table_show(struct device *dev,
2758                                    struct device_attribute *attr,
2759                                    char *buf)
2760{
2761        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2762
2763        return cxlflash_show_port_lun_table(0, cfg, buf);
2764}
2765
2766/**
2767 * port1_lun_table_show() - presents the current LUN table of port 1
2768 * @dev:        Generic device associated with the host owning the port.
2769 * @attr:       Device attribute representing the port.
2770 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2771 *
2772 * Return: The size of the ASCII string returned in @buf.
2773 */
2774static ssize_t port1_lun_table_show(struct device *dev,
2775                                    struct device_attribute *attr,
2776                                    char *buf)
2777{
2778        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2779
2780        return cxlflash_show_port_lun_table(1, cfg, buf);
2781}
2782
2783/**
2784 * port2_lun_table_show() - presents the current LUN table of port 2
2785 * @dev:        Generic device associated with the host owning the port.
2786 * @attr:       Device attribute representing the port.
2787 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2788 *
2789 * Return: The size of the ASCII string returned in @buf.
2790 */
2791static ssize_t port2_lun_table_show(struct device *dev,
2792                                    struct device_attribute *attr,
2793                                    char *buf)
2794{
2795        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2796
2797        return cxlflash_show_port_lun_table(2, cfg, buf);
2798}
2799
2800/**
2801 * port3_lun_table_show() - presents the current LUN table of port 3
2802 * @dev:        Generic device associated with the host owning the port.
2803 * @attr:       Device attribute representing the port.
2804 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2805 *
2806 * Return: The size of the ASCII string returned in @buf.
2807 */
2808static ssize_t port3_lun_table_show(struct device *dev,
2809                                    struct device_attribute *attr,
2810                                    char *buf)
2811{
2812        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2813
2814        return cxlflash_show_port_lun_table(3, cfg, buf);
2815}
2816
2817/**
2818 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2819 * @dev:        Generic device associated with the host.
2820 * @attr:       Device attribute representing the IRQ poll weight.
2821 * @buf:        Buffer of length PAGE_SIZE to report back the current IRQ poll
2822 *              weight in ASCII.
2823 *
2824 * An IRQ poll weight of 0 indicates polling is disabled.
2825 *
2826 * Return: The size of the ASCII string returned in @buf.
2827 */
2828static ssize_t irqpoll_weight_show(struct device *dev,
2829                                   struct device_attribute *attr, char *buf)
2830{
2831        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2832        struct afu *afu = cfg->afu;
2833
2834        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2835}
2836
2837/**
2838 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2839 * @dev:        Generic device associated with the host.
2840 * @attr:       Device attribute representing the IRQ poll weight.
2841 * @buf:        Buffer of length PAGE_SIZE containing the desired IRQ poll
2842 *              weight in ASCII.
2843 * @count:      Length of data resizing in @buf.
2844 *
2845 * An IRQ poll weight of 0 indicates polling is disabled.
2846 *
2847 * Return: The size of the ASCII string returned in @buf.
2848 */
2849static ssize_t irqpoll_weight_store(struct device *dev,
2850                                    struct device_attribute *attr,
2851                                    const char *buf, size_t count)
2852{
2853        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2854        struct device *cfgdev = &cfg->dev->dev;
2855        struct afu *afu = cfg->afu;
2856        struct hwq *hwq;
2857        u32 weight;
2858        int rc, i;
2859
2860        rc = kstrtouint(buf, 10, &weight);
2861        if (rc)
2862                return -EINVAL;
2863
2864        if (weight > 256) {
2865                dev_info(cfgdev,
2866                         "Invalid IRQ poll weight. It must be 256 or less.\n");
2867                return -EINVAL;
2868        }
2869
2870        if (weight == afu->irqpoll_weight) {
2871                dev_info(cfgdev,
2872                         "Current IRQ poll weight has the same weight.\n");
2873                return -EINVAL;
2874        }
2875
2876        if (afu_is_irqpoll_enabled(afu)) {
2877                for (i = 0; i < afu->num_hwqs; i++) {
2878                        hwq = get_hwq(afu, i);
2879
2880                        irq_poll_disable(&hwq->irqpoll);
2881                }
2882        }
2883
2884        afu->irqpoll_weight = weight;
2885
2886        if (weight > 0) {
2887                for (i = 0; i < afu->num_hwqs; i++) {
2888                        hwq = get_hwq(afu, i);
2889
2890                        irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2891                }
2892        }
2893
2894        return count;
2895}
2896
2897/**
2898 * num_hwqs_show() - presents the number of hardware queues for the host
2899 * @dev:        Generic device associated with the host.
2900 * @attr:       Device attribute representing the number of hardware queues.
2901 * @buf:        Buffer of length PAGE_SIZE to report back the number of hardware
2902 *              queues in ASCII.
2903 *
2904 * Return: The size of the ASCII string returned in @buf.
2905 */
2906static ssize_t num_hwqs_show(struct device *dev,
2907                             struct device_attribute *attr, char *buf)
2908{
2909        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2910        struct afu *afu = cfg->afu;
2911
2912        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2913}
2914
2915/**
2916 * num_hwqs_store() - sets the number of hardware queues for the host
2917 * @dev:        Generic device associated with the host.
2918 * @attr:       Device attribute representing the number of hardware queues.
2919 * @buf:        Buffer of length PAGE_SIZE containing the number of hardware
2920 *              queues in ASCII.
2921 * @count:      Length of data resizing in @buf.
2922 *
2923 * n > 0: num_hwqs = n
2924 * n = 0: num_hwqs = num_online_cpus()
2925 * n < 0: num_online_cpus() / abs(n)
2926 *
2927 * Return: The size of the ASCII string returned in @buf.
2928 */
2929static ssize_t num_hwqs_store(struct device *dev,
2930                              struct device_attribute *attr,
2931                              const char *buf, size_t count)
2932{
2933        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2934        struct afu *afu = cfg->afu;
2935        int rc;
2936        int nhwqs, num_hwqs;
2937
2938        rc = kstrtoint(buf, 10, &nhwqs);
2939        if (rc)
2940                return -EINVAL;
2941
2942        if (nhwqs >= 1)
2943                num_hwqs = nhwqs;
2944        else if (nhwqs == 0)
2945                num_hwqs = num_online_cpus();
2946        else
2947                num_hwqs = num_online_cpus() / abs(nhwqs);
2948
2949        afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
2950        WARN_ON_ONCE(afu->desired_hwqs == 0);
2951
2952retry:
2953        switch (cfg->state) {
2954        case STATE_NORMAL:
2955                cfg->state = STATE_RESET;
2956                drain_ioctls(cfg);
2957                cxlflash_mark_contexts_error(cfg);
2958                rc = afu_reset(cfg);
2959                if (rc)
2960                        cfg->state = STATE_FAILTERM;
2961                else
2962                        cfg->state = STATE_NORMAL;
2963                wake_up_all(&cfg->reset_waitq);
2964                break;
2965        case STATE_RESET:
2966                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2967                if (cfg->state == STATE_NORMAL)
2968                        goto retry;
2969        default:
2970                /* Ideally should not happen */
2971                dev_err(dev, "%s: Device is not ready, state=%d\n",
2972                        __func__, cfg->state);
2973                break;
2974        }
2975
2976        return count;
2977}
2978
2979static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
2980
2981/**
2982 * hwq_mode_show() - presents the HWQ steering mode for the host
2983 * @dev:        Generic device associated with the host.
2984 * @attr:       Device attribute representing the HWQ steering mode.
2985 * @buf:        Buffer of length PAGE_SIZE to report back the HWQ steering mode
2986 *              as a character string.
2987 *
2988 * Return: The size of the ASCII string returned in @buf.
2989 */
2990static ssize_t hwq_mode_show(struct device *dev,
2991                             struct device_attribute *attr, char *buf)
2992{
2993        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2994        struct afu *afu = cfg->afu;
2995
2996        return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
2997}
2998
2999/**
3000 * hwq_mode_store() - sets the HWQ steering mode for the host
3001 * @dev:        Generic device associated with the host.
3002 * @attr:       Device attribute representing the HWQ steering mode.
3003 * @buf:        Buffer of length PAGE_SIZE containing the HWQ steering mode
3004 *              as a character string.
3005 * @count:      Length of data resizing in @buf.
3006 *
3007 * rr = Round-Robin
3008 * tag = Block MQ Tagging
3009 * cpu = CPU Affinity
3010 *
3011 * Return: The size of the ASCII string returned in @buf.
3012 */
3013static ssize_t hwq_mode_store(struct device *dev,
3014                              struct device_attribute *attr,
3015                              const char *buf, size_t count)
3016{
3017        struct Scsi_Host *shost = class_to_shost(dev);
3018        struct cxlflash_cfg *cfg = shost_priv(shost);
3019        struct device *cfgdev = &cfg->dev->dev;
3020        struct afu *afu = cfg->afu;
3021        int i;
3022        u32 mode = MAX_HWQ_MODE;
3023
3024        for (i = 0; i < MAX_HWQ_MODE; i++) {
3025                if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3026                        mode = i;
3027                        break;
3028                }
3029        }
3030
3031        if (mode >= MAX_HWQ_MODE) {
3032                dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3033                return -EINVAL;
3034        }
3035
3036        if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
3037                dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
3038                         "HWQ steering mode.\n");
3039                return -EINVAL;
3040        }
3041
3042        afu->hwq_mode = mode;
3043
3044        return count;
3045}
3046
3047/**
3048 * mode_show() - presents the current mode of the device
3049 * @dev:        Generic device associated with the device.
3050 * @attr:       Device attribute representing the device mode.
3051 * @buf:        Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3052 *
3053 * Return: The size of the ASCII string returned in @buf.
3054 */
3055static ssize_t mode_show(struct device *dev,
3056                         struct device_attribute *attr, char *buf)
3057{
3058        struct scsi_device *sdev = to_scsi_device(dev);
3059
3060        return scnprintf(buf, PAGE_SIZE, "%s\n",
3061                         sdev->hostdata ? "superpipe" : "legacy");
3062}
3063
3064/*
3065 * Host attributes
3066 */
3067static DEVICE_ATTR_RO(port0);
3068static DEVICE_ATTR_RO(port1);
3069static DEVICE_ATTR_RO(port2);
3070static DEVICE_ATTR_RO(port3);
3071static DEVICE_ATTR_RW(lun_mode);
3072static DEVICE_ATTR_RO(ioctl_version);
3073static DEVICE_ATTR_RO(port0_lun_table);
3074static DEVICE_ATTR_RO(port1_lun_table);
3075static DEVICE_ATTR_RO(port2_lun_table);
3076static DEVICE_ATTR_RO(port3_lun_table);
3077static DEVICE_ATTR_RW(irqpoll_weight);
3078static DEVICE_ATTR_RW(num_hwqs);
3079static DEVICE_ATTR_RW(hwq_mode);
3080
3081static struct device_attribute *cxlflash_host_attrs[] = {
3082        &dev_attr_port0,
3083        &dev_attr_port1,
3084        &dev_attr_port2,
3085        &dev_attr_port3,
3086        &dev_attr_lun_mode,
3087        &dev_attr_ioctl_version,
3088        &dev_attr_port0_lun_table,
3089        &dev_attr_port1_lun_table,
3090        &dev_attr_port2_lun_table,
3091        &dev_attr_port3_lun_table,
3092        &dev_attr_irqpoll_weight,
3093        &dev_attr_num_hwqs,
3094        &dev_attr_hwq_mode,
3095        NULL
3096};
3097
3098/*
3099 * Device attributes
3100 */
3101static DEVICE_ATTR_RO(mode);
3102
3103static struct device_attribute *cxlflash_dev_attrs[] = {
3104        &dev_attr_mode,
3105        NULL
3106};
3107
3108/*
3109 * Host template
3110 */
3111static struct scsi_host_template driver_template = {
3112        .module = THIS_MODULE,
3113        .name = CXLFLASH_ADAPTER_NAME,
3114        .info = cxlflash_driver_info,
3115        .ioctl = cxlflash_ioctl,
3116        .proc_name = CXLFLASH_NAME,
3117        .queuecommand = cxlflash_queuecommand,
3118        .eh_abort_handler = cxlflash_eh_abort_handler,
3119        .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3120        .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3121        .change_queue_depth = cxlflash_change_queue_depth,
3122        .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3123        .can_queue = CXLFLASH_MAX_CMDS,
3124        .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3125        .this_id = -1,
3126        .sg_tablesize = 1,      /* No scatter gather support */
3127        .max_sectors = CXLFLASH_MAX_SECTORS,
3128        .use_clustering = ENABLE_CLUSTERING,
3129        .shost_attrs = cxlflash_host_attrs,
3130        .sdev_attrs = cxlflash_dev_attrs,
3131};
3132
3133/*
3134 * Device dependent values
3135 */
3136static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3137                                        CXLFLASH_WWPN_VPD_REQUIRED };
3138static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3139                                        CXLFLASH_NOTIFY_SHUTDOWN };
3140static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3141                                        CXLFLASH_NOTIFY_SHUTDOWN };
3142
3143/*
3144 * PCI device binding table
3145 */
3146static struct pci_device_id cxlflash_pci_table[] = {
3147        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3148         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3149        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3150         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3151        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3152         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3153        {}
3154};
3155
3156MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3157
3158/**
3159 * cxlflash_worker_thread() - work thread handler for the AFU
3160 * @work:       Work structure contained within cxlflash associated with host.
3161 *
3162 * Handles the following events:
3163 * - Link reset which cannot be performed on interrupt context due to
3164 * blocking up to a few seconds
3165 * - Rescan the host
3166 */
3167static void cxlflash_worker_thread(struct work_struct *work)
3168{
3169        struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3170                                                work_q);
3171        struct afu *afu = cfg->afu;
3172        struct device *dev = &cfg->dev->dev;
3173        __be64 __iomem *fc_port_regs;
3174        int port;
3175        ulong lock_flags;
3176
3177        /* Avoid MMIO if the device has failed */
3178
3179        if (cfg->state != STATE_NORMAL)
3180                return;
3181
3182        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3183
3184        if (cfg->lr_state == LINK_RESET_REQUIRED) {
3185                port = cfg->lr_port;
3186                if (port < 0)
3187                        dev_err(dev, "%s: invalid port index %d\n",
3188                                __func__, port);
3189                else {
3190                        spin_unlock_irqrestore(cfg->host->host_lock,
3191                                               lock_flags);
3192
3193                        /* The reset can block... */
3194                        fc_port_regs = get_fc_port_regs(cfg, port);
3195                        afu_link_reset(afu, port, fc_port_regs);
3196                        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3197                }
3198
3199                cfg->lr_state = LINK_RESET_COMPLETE;
3200        }
3201
3202        spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3203
3204        if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3205                scsi_scan_host(cfg->host);
3206}
3207
3208/**
3209 * cxlflash_chr_open() - character device open handler
3210 * @inode:      Device inode associated with this character device.
3211 * @file:       File pointer for this device.
3212 *
3213 * Only users with admin privileges are allowed to open the character device.
3214 *
3215 * Return: 0 on success, -errno on failure
3216 */
3217static int cxlflash_chr_open(struct inode *inode, struct file *file)
3218{
3219        struct cxlflash_cfg *cfg;
3220
3221        if (!capable(CAP_SYS_ADMIN))
3222                return -EACCES;
3223
3224        cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3225        file->private_data = cfg;
3226
3227        return 0;
3228}
3229
3230/**
3231 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3232 * @cmd:        The host ioctl command to decode.
3233 *
3234 * Return: A string identifying the decoded host ioctl.
3235 */
3236static char *decode_hioctl(int cmd)
3237{
3238        switch (cmd) {
3239        case HT_CXLFLASH_LUN_PROVISION:
3240                return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3241        }
3242
3243        return "UNKNOWN";
3244}
3245
3246/**
3247 * cxlflash_lun_provision() - host LUN provisioning handler
3248 * @cfg:        Internal structure associated with the host.
3249 * @arg:        Kernel copy of userspace ioctl data structure.
3250 *
3251 * Return: 0 on success, -errno on failure
3252 */
3253static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3254                                  struct ht_cxlflash_lun_provision *lunprov)
3255{
3256        struct afu *afu = cfg->afu;
3257        struct device *dev = &cfg->dev->dev;
3258        struct sisl_ioarcb rcb;
3259        struct sisl_ioasa asa;
3260        __be64 __iomem *fc_port_regs;
3261        u16 port = lunprov->port;
3262        u16 scmd = lunprov->hdr.subcmd;
3263        u16 type;
3264        u64 reg;
3265        u64 size;
3266        u64 lun_id;
3267        int rc = 0;
3268
3269        if (!afu_is_lun_provision(afu)) {
3270                rc = -ENOTSUPP;
3271                goto out;
3272        }
3273
3274        if (port >= cfg->num_fc_ports) {
3275                rc = -EINVAL;
3276                goto out;
3277        }
3278
3279        switch (scmd) {
3280        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3281                type = SISL_AFU_LUN_PROVISION_CREATE;
3282                size = lunprov->size;
3283                lun_id = 0;
3284                break;
3285        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3286                type = SISL_AFU_LUN_PROVISION_DELETE;
3287                size = 0;
3288                lun_id = lunprov->lun_id;
3289                break;
3290        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3291                fc_port_regs = get_fc_port_regs(cfg, port);
3292
3293                reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3294                lunprov->max_num_luns = reg;
3295                reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3296                lunprov->cur_num_luns = reg;
3297                reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3298                lunprov->max_cap_port = reg;
3299                reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3300                lunprov->cur_cap_port = reg;
3301
3302                goto out;
3303        default:
3304                rc = -EINVAL;
3305                goto out;
3306        }
3307
3308        memset(&rcb, 0, sizeof(rcb));
3309        memset(&asa, 0, sizeof(asa));
3310        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3311        rcb.lun_id = lun_id;
3312        rcb.msi = SISL_MSI_RRQ_UPDATED;
3313        rcb.timeout = MC_LUN_PROV_TIMEOUT;
3314        rcb.ioasa = &asa;
3315
3316        rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3317        rcb.cdb[1] = type;
3318        rcb.cdb[2] = port;
3319        put_unaligned_be64(size, &rcb.cdb[8]);
3320
3321        rc = send_afu_cmd(afu, &rcb);
3322        if (rc) {
3323                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3324                        __func__, rc, asa.ioasc, asa.afu_extra);
3325                goto out;
3326        }
3327
3328        if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3329                lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3330                memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3331        }
3332out:
3333        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3334        return rc;
3335}
3336
3337/**
3338 * cxlflash_afu_debug() - host AFU debug handler
3339 * @cfg:        Internal structure associated with the host.
3340 * @arg:        Kernel copy of userspace ioctl data structure.
3341 *
3342 * For debug requests requiring a data buffer, always provide an aligned
3343 * (cache line) buffer to the AFU to appease any alignment requirements.
3344 *
3345 * Return: 0 on success, -errno on failure
3346 */
3347static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3348                              struct ht_cxlflash_afu_debug *afu_dbg)
3349{
3350        struct afu *afu = cfg->afu;
3351        struct device *dev = &cfg->dev->dev;
3352        struct sisl_ioarcb rcb;
3353        struct sisl_ioasa asa;
3354        char *buf = NULL;
3355        char *kbuf = NULL;
3356        void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3357        u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3358        u32 ulen = afu_dbg->data_len;
3359        bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3360        int rc = 0;
3361
3362        if (!afu_is_afu_debug(afu)) {
3363                rc = -ENOTSUPP;
3364                goto out;
3365        }
3366
3367        if (ulen) {
3368                req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3369
3370                if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3371                        rc = -EINVAL;
3372                        goto out;
3373                }
3374
3375                buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3376                if (unlikely(!buf)) {
3377                        rc = -ENOMEM;
3378                        goto out;
3379                }
3380
3381                kbuf = PTR_ALIGN(buf, cache_line_size());
3382
3383                if (is_write) {
3384                        req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3385
3386                        if (copy_from_user(kbuf, ubuf, ulen)) {
3387                                rc = -EFAULT;
3388                                goto out;
3389                        }
3390                }
3391        }
3392
3393        memset(&rcb, 0, sizeof(rcb));
3394        memset(&asa, 0, sizeof(asa));
3395
3396        rcb.req_flags = req_flags;
3397        rcb.msi = SISL_MSI_RRQ_UPDATED;
3398        rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3399        rcb.ioasa = &asa;
3400
3401        if (ulen) {
3402                rcb.data_len = ulen;
3403                rcb.data_ea = (uintptr_t)kbuf;
3404        }
3405
3406        rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3407        memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3408               HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3409
3410        rc = send_afu_cmd(afu, &rcb);
3411        if (rc) {
3412                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3413                        __func__, rc, asa.ioasc, asa.afu_extra);
3414                goto out;
3415        }
3416
3417        if (ulen && !is_write) {
3418                if (copy_to_user(ubuf, kbuf, ulen))
3419                        rc = -EFAULT;
3420        }
3421out:
3422        kfree(buf);
3423        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3424        return rc;
3425}
3426
3427/**
3428 * cxlflash_chr_ioctl() - character device IOCTL handler
3429 * @file:       File pointer for this device.
3430 * @cmd:        IOCTL command.
3431 * @arg:        Userspace ioctl data structure.
3432 *
3433 * A read/write semaphore is used to implement a 'drain' of currently
3434 * running ioctls. The read semaphore is taken at the beginning of each
3435 * ioctl thread and released upon concluding execution. Additionally the
3436 * semaphore should be released and then reacquired in any ioctl execution
3437 * path which will wait for an event to occur that is outside the scope of
3438 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3439 * a thread simply needs to acquire the write semaphore.
3440 *
3441 * Return: 0 on success, -errno on failure
3442 */
3443static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3444                               unsigned long arg)
3445{
3446        typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3447
3448        struct cxlflash_cfg *cfg = file->private_data;
3449        struct device *dev = &cfg->dev->dev;
3450        char buf[sizeof(union cxlflash_ht_ioctls)];
3451        void __user *uarg = (void __user *)arg;
3452        struct ht_cxlflash_hdr *hdr;
3453        size_t size = 0;
3454        bool known_ioctl = false;
3455        int idx = 0;
3456        int rc = 0;
3457        hioctl do_ioctl = NULL;
3458
3459        static const struct {
3460                size_t size;
3461                hioctl ioctl;
3462        } ioctl_tbl[] = {       /* NOTE: order matters here */
3463        { sizeof(struct ht_cxlflash_lun_provision),
3464                (hioctl)cxlflash_lun_provision },
3465        { sizeof(struct ht_cxlflash_afu_debug),
3466                (hioctl)cxlflash_afu_debug },
3467        };
3468
3469        /* Hold read semaphore so we can drain if needed */
3470        down_read(&cfg->ioctl_rwsem);
3471
3472        dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3473                __func__, cmd, idx, sizeof(ioctl_tbl));
3474
3475        switch (cmd) {
3476        case HT_CXLFLASH_LUN_PROVISION:
3477        case HT_CXLFLASH_AFU_DEBUG:
3478                known_ioctl = true;
3479                idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3480                size = ioctl_tbl[idx].size;
3481                do_ioctl = ioctl_tbl[idx].ioctl;
3482
3483                if (likely(do_ioctl))
3484                        break;
3485
3486                /* fall through */
3487        default:
3488                rc = -EINVAL;
3489                goto out;
3490        }
3491
3492        if (unlikely(copy_from_user(&buf, uarg, size))) {
3493                dev_err(dev, "%s: copy_from_user() fail "
3494                        "size=%lu cmd=%d (%s) uarg=%p\n",
3495                        __func__, size, cmd, decode_hioctl(cmd), uarg);
3496                rc = -EFAULT;
3497                goto out;
3498        }
3499
3500        hdr = (struct ht_cxlflash_hdr *)&buf;
3501        if (hdr->version != HT_CXLFLASH_VERSION_0) {
3502                dev_dbg(dev, "%s: Version %u not supported for %s\n",
3503                        __func__, hdr->version, decode_hioctl(cmd));
3504                rc = -EINVAL;
3505                goto out;
3506        }
3507
3508        if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3509                dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3510                rc = -EINVAL;
3511                goto out;
3512        }
3513
3514        rc = do_ioctl(cfg, (void *)&buf);
3515        if (likely(!rc))
3516                if (unlikely(copy_to_user(uarg, &buf, size))) {
3517                        dev_err(dev, "%s: copy_to_user() fail "
3518                                "size=%lu cmd=%d (%s) uarg=%p\n",
3519                                __func__, size, cmd, decode_hioctl(cmd), uarg);
3520                        rc = -EFAULT;
3521                }
3522
3523        /* fall through to exit */
3524
3525out:
3526        up_read(&cfg->ioctl_rwsem);
3527        if (unlikely(rc && known_ioctl))
3528                dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3529                        __func__, decode_hioctl(cmd), cmd, rc);
3530        else
3531                dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3532                        __func__, decode_hioctl(cmd), cmd, rc);
3533        return rc;
3534}
3535
3536/*
3537 * Character device file operations
3538 */
3539static const struct file_operations cxlflash_chr_fops = {
3540        .owner          = THIS_MODULE,
3541        .open           = cxlflash_chr_open,
3542        .unlocked_ioctl = cxlflash_chr_ioctl,
3543        .compat_ioctl   = cxlflash_chr_ioctl,
3544};
3545
3546/**
3547 * init_chrdev() - initialize the character device for the host
3548 * @cfg:        Internal structure associated with the host.
3549 *
3550 * Return: 0 on success, -errno on failure
3551 */
3552static int init_chrdev(struct cxlflash_cfg *cfg)
3553{
3554        struct device *dev = &cfg->dev->dev;
3555        struct device *char_dev;
3556        dev_t devno;
3557        int minor;
3558        int rc = 0;
3559
3560        minor = cxlflash_get_minor();
3561        if (unlikely(minor < 0)) {
3562                dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3563                rc = -ENOSPC;
3564                goto out;
3565        }
3566
3567        devno = MKDEV(cxlflash_major, minor);
3568        cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3569
3570        rc = cdev_add(&cfg->cdev, devno, 1);
3571        if (rc) {
3572                dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3573                goto err1;
3574        }
3575
3576        char_dev = device_create(cxlflash_class, NULL, devno,
3577                                 NULL, "cxlflash%d", minor);
3578        if (IS_ERR(char_dev)) {
3579                rc = PTR_ERR(char_dev);
3580                dev_err(dev, "%s: device_create failed rc=%d\n",
3581                        __func__, rc);
3582                goto err2;
3583        }
3584
3585        cfg->chardev = char_dev;
3586out:
3587        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3588        return rc;
3589err2:
3590        cdev_del(&cfg->cdev);
3591err1:
3592        cxlflash_put_minor(minor);
3593        goto out;
3594}
3595
3596/**
3597 * cxlflash_probe() - PCI entry point to add host
3598 * @pdev:       PCI device associated with the host.
3599 * @dev_id:     PCI device id associated with device.
3600 *
3601 * The device will initially start out in a 'probing' state and
3602 * transition to the 'normal' state at the end of a successful
3603 * probe. Should an EEH event occur during probe, the notification
3604 * thread (error_detected()) will wait until the probe handler
3605 * is nearly complete. At that time, the device will be moved to
3606 * a 'probed' state and the EEH thread woken up to drive the slot
3607 * reset and recovery (device moves to 'normal' state). Meanwhile,
3608 * the probe will be allowed to exit successfully.
3609 *
3610 * Return: 0 on success, -errno on failure
3611 */
3612static int cxlflash_probe(struct pci_dev *pdev,
3613                          const struct pci_device_id *dev_id)
3614{
3615        struct Scsi_Host *host;
3616        struct cxlflash_cfg *cfg = NULL;
3617        struct device *dev = &pdev->dev;
3618        struct dev_dependent_vals *ddv;
3619        int rc = 0;
3620        int k;
3621
3622        dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3623                __func__, pdev->irq);
3624
3625        ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3626        driver_template.max_sectors = ddv->max_sectors;
3627
3628        host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3629        if (!host) {
3630                dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3631                rc = -ENOMEM;
3632                goto out;
3633        }
3634
3635        host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3636        host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3637        host->unique_id = host->host_no;
3638        host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3639
3640        cfg = shost_priv(host);
3641        cfg->host = host;
3642        rc = alloc_mem(cfg);
3643        if (rc) {
3644                dev_err(dev, "%s: alloc_mem failed\n", __func__);
3645                rc = -ENOMEM;
3646                scsi_host_put(cfg->host);
3647                goto out;
3648        }
3649
3650        cfg->init_state = INIT_STATE_NONE;
3651        cfg->dev = pdev;
3652        cfg->ops = &cxlflash_cxl_ops;
3653        cfg->cxl_fops = cxlflash_cxl_fops;
3654
3655        /*
3656         * Promoted LUNs move to the top of the LUN table. The rest stay on
3657         * the bottom half. The bottom half grows from the end (index = 255),
3658         * whereas the top half grows from the beginning (index = 0).
3659         *
3660         * Initialize the last LUN index for all possible ports.
3661         */
3662        cfg->promote_lun_index = 0;
3663
3664        for (k = 0; k < MAX_FC_PORTS; k++)
3665                cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3666
3667        cfg->dev_id = (struct pci_device_id *)dev_id;
3668
3669        init_waitqueue_head(&cfg->tmf_waitq);
3670        init_waitqueue_head(&cfg->reset_waitq);
3671
3672        INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3673        cfg->lr_state = LINK_RESET_INVALID;
3674        cfg->lr_port = -1;
3675        spin_lock_init(&cfg->tmf_slock);
3676        mutex_init(&cfg->ctx_tbl_list_mutex);
3677        mutex_init(&cfg->ctx_recovery_mutex);
3678        init_rwsem(&cfg->ioctl_rwsem);
3679        INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3680        INIT_LIST_HEAD(&cfg->lluns);
3681
3682        pci_set_drvdata(pdev, cfg);
3683
3684        cfg->afu_cookie = cfg->ops->create_afu(pdev);
3685
3686        rc = init_pci(cfg);
3687        if (rc) {
3688                dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3689                goto out_remove;
3690        }
3691        cfg->init_state = INIT_STATE_PCI;
3692
3693        rc = init_afu(cfg);
3694        if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3695                dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3696                goto out_remove;
3697        }
3698        cfg->init_state = INIT_STATE_AFU;
3699
3700        rc = init_scsi(cfg);
3701        if (rc) {
3702                dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3703                goto out_remove;
3704        }
3705        cfg->init_state = INIT_STATE_SCSI;
3706
3707        rc = init_chrdev(cfg);
3708        if (rc) {
3709                dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3710                goto out_remove;
3711        }
3712        cfg->init_state = INIT_STATE_CDEV;
3713
3714        if (wq_has_sleeper(&cfg->reset_waitq)) {
3715                cfg->state = STATE_PROBED;
3716                wake_up_all(&cfg->reset_waitq);
3717        } else
3718                cfg->state = STATE_NORMAL;
3719out:
3720        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3721        return rc;
3722
3723out_remove:
3724        cxlflash_remove(pdev);
3725        goto out;
3726}
3727
3728/**
3729 * cxlflash_pci_error_detected() - called when a PCI error is detected
3730 * @pdev:       PCI device struct.
3731 * @state:      PCI channel state.
3732 *
3733 * When an EEH occurs during an active reset, wait until the reset is
3734 * complete and then take action based upon the device state.
3735 *
3736 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3737 */
3738static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3739                                                    pci_channel_state_t state)
3740{
3741        int rc = 0;
3742        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3743        struct device *dev = &cfg->dev->dev;
3744
3745        dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3746
3747        switch (state) {
3748        case pci_channel_io_frozen:
3749                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3750                                             cfg->state != STATE_PROBING);
3751                if (cfg->state == STATE_FAILTERM)
3752                        return PCI_ERS_RESULT_DISCONNECT;
3753
3754                cfg->state = STATE_RESET;
3755                scsi_block_requests(cfg->host);
3756                drain_ioctls(cfg);
3757                rc = cxlflash_mark_contexts_error(cfg);
3758                if (unlikely(rc))
3759                        dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3760                                __func__, rc);
3761                term_afu(cfg);
3762                return PCI_ERS_RESULT_NEED_RESET;
3763        case pci_channel_io_perm_failure:
3764                cfg->state = STATE_FAILTERM;
3765                wake_up_all(&cfg->reset_waitq);
3766                scsi_unblock_requests(cfg->host);
3767                return PCI_ERS_RESULT_DISCONNECT;
3768        default:
3769                break;
3770        }
3771        return PCI_ERS_RESULT_NEED_RESET;
3772}
3773
3774/**
3775 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3776 * @pdev:       PCI device struct.
3777 *
3778 * This routine is called by the pci error recovery code after the PCI
3779 * slot has been reset, just before we should resume normal operations.
3780 *
3781 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3782 */
3783static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3784{
3785        int rc = 0;
3786        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3787        struct device *dev = &cfg->dev->dev;
3788
3789        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3790
3791        rc = init_afu(cfg);
3792        if (unlikely(rc)) {
3793                dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3794                return PCI_ERS_RESULT_DISCONNECT;
3795        }
3796
3797        return PCI_ERS_RESULT_RECOVERED;
3798}
3799
3800/**
3801 * cxlflash_pci_resume() - called when normal operation can resume
3802 * @pdev:       PCI device struct
3803 */
3804static void cxlflash_pci_resume(struct pci_dev *pdev)
3805{
3806        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3807        struct device *dev = &cfg->dev->dev;
3808
3809        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3810
3811        cfg->state = STATE_NORMAL;
3812        wake_up_all(&cfg->reset_waitq);
3813        scsi_unblock_requests(cfg->host);
3814}
3815
3816/**
3817 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3818 * @dev:        Character device.
3819 * @mode:       Mode that can be used to verify access.
3820 *
3821 * Return: Allocated string describing the devtmpfs structure.
3822 */
3823static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3824{
3825        return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3826}
3827
3828/**
3829 * cxlflash_class_init() - create character device class
3830 *
3831 * Return: 0 on success, -errno on failure
3832 */
3833static int cxlflash_class_init(void)
3834{
3835        dev_t devno;
3836        int rc = 0;
3837
3838        rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3839        if (unlikely(rc)) {
3840                pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3841                goto out;
3842        }
3843
3844        cxlflash_major = MAJOR(devno);
3845
3846        cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3847        if (IS_ERR(cxlflash_class)) {
3848                rc = PTR_ERR(cxlflash_class);
3849                pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3850                goto err;
3851        }
3852
3853        cxlflash_class->devnode = cxlflash_devnode;
3854out:
3855        pr_debug("%s: returning rc=%d\n", __func__, rc);
3856        return rc;
3857err:
3858        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3859        goto out;
3860}
3861
3862/**
3863 * cxlflash_class_exit() - destroy character device class
3864 */
3865static void cxlflash_class_exit(void)
3866{
3867        dev_t devno = MKDEV(cxlflash_major, 0);
3868
3869        class_destroy(cxlflash_class);
3870        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3871}
3872
3873static const struct pci_error_handlers cxlflash_err_handler = {
3874        .error_detected = cxlflash_pci_error_detected,
3875        .slot_reset = cxlflash_pci_slot_reset,
3876        .resume = cxlflash_pci_resume,
3877};
3878
3879/*
3880 * PCI device structure
3881 */
3882static struct pci_driver cxlflash_driver = {
3883        .name = CXLFLASH_NAME,
3884        .id_table = cxlflash_pci_table,
3885        .probe = cxlflash_probe,
3886        .remove = cxlflash_remove,
3887        .shutdown = cxlflash_remove,
3888        .err_handler = &cxlflash_err_handler,
3889};
3890
3891/**
3892 * init_cxlflash() - module entry point
3893 *
3894 * Return: 0 on success, -errno on failure
3895 */
3896static int __init init_cxlflash(void)
3897{
3898        int rc;
3899
3900        check_sizes();
3901        cxlflash_list_init();
3902        rc = cxlflash_class_init();
3903        if (unlikely(rc))
3904                goto out;
3905
3906        rc = pci_register_driver(&cxlflash_driver);
3907        if (unlikely(rc))
3908                goto err;
3909out:
3910        pr_debug("%s: returning rc=%d\n", __func__, rc);
3911        return rc;
3912err:
3913        cxlflash_class_exit();
3914        goto out;
3915}
3916
3917/**
3918 * exit_cxlflash() - module exit point
3919 */
3920static void __exit exit_cxlflash(void)
3921{
3922        cxlflash_term_global_luns();
3923        cxlflash_free_errpage();
3924
3925        pci_unregister_driver(&cxlflash_driver);
3926        cxlflash_class_exit();
3927}
3928
3929module_init(init_cxlflash);
3930module_exit(exit_cxlflash);
3931