linux/drivers/scsi/cxlflash/main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * CXL Flash Device Driver
   4 *
   5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
   6 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
   7 *
   8 * Copyright (C) 2015 IBM Corporation
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/list.h>
  13#include <linux/module.h>
  14#include <linux/pci.h>
  15
  16#include <asm/unaligned.h>
  17
  18#include <scsi/scsi_cmnd.h>
  19#include <scsi/scsi_host.h>
  20#include <uapi/scsi/cxlflash_ioctl.h>
  21
  22#include "main.h"
  23#include "sislite.h"
  24#include "common.h"
  25
  26MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
  27MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
  28MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
  29MODULE_LICENSE("GPL");
  30
  31static struct class *cxlflash_class;
  32static u32 cxlflash_major;
  33static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
  34
  35/**
  36 * process_cmd_err() - command error handler
  37 * @cmd:        AFU command that experienced the error.
  38 * @scp:        SCSI command associated with the AFU command in error.
  39 *
  40 * Translates error bits from AFU command to SCSI command results.
  41 */
  42static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  43{
  44        struct afu *afu = cmd->parent;
  45        struct cxlflash_cfg *cfg = afu->parent;
  46        struct device *dev = &cfg->dev->dev;
  47        struct sisl_ioasa *ioasa;
  48        u32 resid;
  49
  50        ioasa = &(cmd->sa);
  51
  52        if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
  53                resid = ioasa->resid;
  54                scsi_set_resid(scp, resid);
  55                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
  56                        __func__, cmd, scp, resid);
  57        }
  58
  59        if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
  60                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
  61                        __func__, cmd, scp);
  62                scp->result = (DID_ERROR << 16);
  63        }
  64
  65        dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
  66                "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
  67                ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
  68                ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
  69
  70        if (ioasa->rc.scsi_rc) {
  71                /* We have a SCSI status */
  72                if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
  73                        memcpy(scp->sense_buffer, ioasa->sense_data,
  74                               SISL_SENSE_DATA_LEN);
  75                        scp->result = ioasa->rc.scsi_rc;
  76                } else
  77                        scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
  78        }
  79
  80        /*
  81         * We encountered an error. Set scp->result based on nature
  82         * of error.
  83         */
  84        if (ioasa->rc.fc_rc) {
  85                /* We have an FC status */
  86                switch (ioasa->rc.fc_rc) {
  87                case SISL_FC_RC_LINKDOWN:
  88                        scp->result = (DID_REQUEUE << 16);
  89                        break;
  90                case SISL_FC_RC_RESID:
  91                        /* This indicates an FCP resid underrun */
  92                        if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
  93                                /* If the SISL_RC_FLAGS_OVERRUN flag was set,
  94                                 * then we will handle this error else where.
  95                                 * If not then we must handle it here.
  96                                 * This is probably an AFU bug.
  97                                 */
  98                                scp->result = (DID_ERROR << 16);
  99                        }
 100                        break;
 101                case SISL_FC_RC_RESIDERR:
 102                        /* Resid mismatch between adapter and device */
 103                case SISL_FC_RC_TGTABORT:
 104                case SISL_FC_RC_ABORTOK:
 105                case SISL_FC_RC_ABORTFAIL:
 106                case SISL_FC_RC_NOLOGI:
 107                case SISL_FC_RC_ABORTPEND:
 108                case SISL_FC_RC_WRABORTPEND:
 109                case SISL_FC_RC_NOEXP:
 110                case SISL_FC_RC_INUSE:
 111                        scp->result = (DID_ERROR << 16);
 112                        break;
 113                }
 114        }
 115
 116        if (ioasa->rc.afu_rc) {
 117                /* We have an AFU error */
 118                switch (ioasa->rc.afu_rc) {
 119                case SISL_AFU_RC_NO_CHANNELS:
 120                        scp->result = (DID_NO_CONNECT << 16);
 121                        break;
 122                case SISL_AFU_RC_DATA_DMA_ERR:
 123                        switch (ioasa->afu_extra) {
 124                        case SISL_AFU_DMA_ERR_PAGE_IN:
 125                                /* Retry */
 126                                scp->result = (DID_IMM_RETRY << 16);
 127                                break;
 128                        case SISL_AFU_DMA_ERR_INVALID_EA:
 129                        default:
 130                                scp->result = (DID_ERROR << 16);
 131                        }
 132                        break;
 133                case SISL_AFU_RC_OUT_OF_DATA_BUFS:
 134                        /* Retry */
 135                        scp->result = (DID_ALLOC_FAILURE << 16);
 136                        break;
 137                default:
 138                        scp->result = (DID_ERROR << 16);
 139                }
 140        }
 141}
 142
 143/**
 144 * cmd_complete() - command completion handler
 145 * @cmd:        AFU command that has completed.
 146 *
 147 * For SCSI commands this routine prepares and submits commands that have
 148 * either completed or timed out to the SCSI stack. For internal commands
 149 * (TMF or AFU), this routine simply notifies the originator that the
 150 * command has completed.
 151 */
 152static void cmd_complete(struct afu_cmd *cmd)
 153{
 154        struct scsi_cmnd *scp;
 155        ulong lock_flags;
 156        struct afu *afu = cmd->parent;
 157        struct cxlflash_cfg *cfg = afu->parent;
 158        struct device *dev = &cfg->dev->dev;
 159        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 160
 161        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 162        list_del(&cmd->list);
 163        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 164
 165        if (cmd->scp) {
 166                scp = cmd->scp;
 167                if (unlikely(cmd->sa.ioasc))
 168                        process_cmd_err(cmd, scp);
 169                else
 170                        scp->result = (DID_OK << 16);
 171
 172                dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
 173                                    __func__, scp, scp->result, cmd->sa.ioasc);
 174                scp->scsi_done(scp);
 175        } else if (cmd->cmd_tmf) {
 176                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 177                cfg->tmf_active = false;
 178                wake_up_all_locked(&cfg->tmf_waitq);
 179                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 180        } else
 181                complete(&cmd->cevent);
 182}
 183
 184/**
 185 * flush_pending_cmds() - flush all pending commands on this hardware queue
 186 * @hwq:        Hardware queue to flush.
 187 *
 188 * The hardware send queue lock associated with this hardware queue must be
 189 * held when calling this routine.
 190 */
 191static void flush_pending_cmds(struct hwq *hwq)
 192{
 193        struct cxlflash_cfg *cfg = hwq->afu->parent;
 194        struct afu_cmd *cmd, *tmp;
 195        struct scsi_cmnd *scp;
 196        ulong lock_flags;
 197
 198        list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
 199                /* Bypass command when on a doneq, cmd_complete() will handle */
 200                if (!list_empty(&cmd->queue))
 201                        continue;
 202
 203                list_del(&cmd->list);
 204
 205                if (cmd->scp) {
 206                        scp = cmd->scp;
 207                        scp->result = (DID_IMM_RETRY << 16);
 208                        scp->scsi_done(scp);
 209                } else {
 210                        cmd->cmd_aborted = true;
 211
 212                        if (cmd->cmd_tmf) {
 213                                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 214                                cfg->tmf_active = false;
 215                                wake_up_all_locked(&cfg->tmf_waitq);
 216                                spin_unlock_irqrestore(&cfg->tmf_slock,
 217                                                       lock_flags);
 218                        } else
 219                                complete(&cmd->cevent);
 220                }
 221        }
 222}
 223
 224/**
 225 * context_reset() - reset context via specified register
 226 * @hwq:        Hardware queue owning the context to be reset.
 227 * @reset_reg:  MMIO register to perform reset.
 228 *
 229 * When the reset is successful, the SISLite specification guarantees that
 230 * the AFU has aborted all currently pending I/O. Accordingly, these commands
 231 * must be flushed.
 232 *
 233 * Return: 0 on success, -errno on failure
 234 */
 235static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
 236{
 237        struct cxlflash_cfg *cfg = hwq->afu->parent;
 238        struct device *dev = &cfg->dev->dev;
 239        int rc = -ETIMEDOUT;
 240        int nretry = 0;
 241        u64 val = 0x1;
 242        ulong lock_flags;
 243
 244        dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
 245
 246        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 247
 248        writeq_be(val, reset_reg);
 249        do {
 250                val = readq_be(reset_reg);
 251                if ((val & 0x1) == 0x0) {
 252                        rc = 0;
 253                        break;
 254                }
 255
 256                /* Double delay each time */
 257                udelay(1 << nretry);
 258        } while (nretry++ < MC_ROOM_RETRY_CNT);
 259
 260        if (!rc)
 261                flush_pending_cmds(hwq);
 262
 263        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 264
 265        dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
 266                __func__, rc, val, nretry);
 267        return rc;
 268}
 269
 270/**
 271 * context_reset_ioarrin() - reset context via IOARRIN register
 272 * @hwq:        Hardware queue owning the context to be reset.
 273 *
 274 * Return: 0 on success, -errno on failure
 275 */
 276static int context_reset_ioarrin(struct hwq *hwq)
 277{
 278        return context_reset(hwq, &hwq->host_map->ioarrin);
 279}
 280
 281/**
 282 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
 283 * @hwq:        Hardware queue owning the context to be reset.
 284 *
 285 * Return: 0 on success, -errno on failure
 286 */
 287static int context_reset_sq(struct hwq *hwq)
 288{
 289        return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
 290}
 291
 292/**
 293 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
 294 * @afu:        AFU associated with the host.
 295 * @cmd:        AFU command to send.
 296 *
 297 * Return:
 298 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 299 */
 300static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
 301{
 302        struct cxlflash_cfg *cfg = afu->parent;
 303        struct device *dev = &cfg->dev->dev;
 304        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 305        int rc = 0;
 306        s64 room;
 307        ulong lock_flags;
 308
 309        /*
 310         * To avoid the performance penalty of MMIO, spread the update of
 311         * 'room' over multiple commands.
 312         */
 313        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 314        if (--hwq->room < 0) {
 315                room = readq_be(&hwq->host_map->cmd_room);
 316                if (room <= 0) {
 317                        dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
 318                                            "0x%02X, room=0x%016llX\n",
 319                                            __func__, cmd->rcb.cdb[0], room);
 320                        hwq->room = 0;
 321                        rc = SCSI_MLQUEUE_HOST_BUSY;
 322                        goto out;
 323                }
 324                hwq->room = room - 1;
 325        }
 326
 327        list_add(&cmd->list, &hwq->pending_cmds);
 328        writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
 329out:
 330        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 331        dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
 332                __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
 333        return rc;
 334}
 335
 336/**
 337 * send_cmd_sq() - sends an AFU command via SQ ring
 338 * @afu:        AFU associated with the host.
 339 * @cmd:        AFU command to send.
 340 *
 341 * Return:
 342 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 343 */
 344static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
 345{
 346        struct cxlflash_cfg *cfg = afu->parent;
 347        struct device *dev = &cfg->dev->dev;
 348        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 349        int rc = 0;
 350        int newval;
 351        ulong lock_flags;
 352
 353        newval = atomic_dec_if_positive(&hwq->hsq_credits);
 354        if (newval <= 0) {
 355                rc = SCSI_MLQUEUE_HOST_BUSY;
 356                goto out;
 357        }
 358
 359        cmd->rcb.ioasa = &cmd->sa;
 360
 361        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 362
 363        *hwq->hsq_curr = cmd->rcb;
 364        if (hwq->hsq_curr < hwq->hsq_end)
 365                hwq->hsq_curr++;
 366        else
 367                hwq->hsq_curr = hwq->hsq_start;
 368
 369        list_add(&cmd->list, &hwq->pending_cmds);
 370        writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
 371
 372        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 373out:
 374        dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
 375               "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
 376               cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
 377               readq_be(&hwq->host_map->sq_head),
 378               readq_be(&hwq->host_map->sq_tail));
 379        return rc;
 380}
 381
 382/**
 383 * wait_resp() - polls for a response or timeout to a sent AFU command
 384 * @afu:        AFU associated with the host.
 385 * @cmd:        AFU command that was sent.
 386 *
 387 * Return: 0 on success, -errno on failure
 388 */
 389static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 390{
 391        struct cxlflash_cfg *cfg = afu->parent;
 392        struct device *dev = &cfg->dev->dev;
 393        int rc = 0;
 394        ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
 395
 396        timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
 397        if (!timeout)
 398                rc = -ETIMEDOUT;
 399
 400        if (cmd->cmd_aborted)
 401                rc = -EAGAIN;
 402
 403        if (unlikely(cmd->sa.ioasc != 0)) {
 404                dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
 405                        __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
 406                rc = -EIO;
 407        }
 408
 409        return rc;
 410}
 411
 412/**
 413 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
 414 * @host:       SCSI host associated with device.
 415 * @scp:        SCSI command to send.
 416 * @afu:        SCSI command to send.
 417 *
 418 * Hashes a command based upon the hardware queue mode.
 419 *
 420 * Return: Trusted index of target hardware queue
 421 */
 422static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
 423                             struct afu *afu)
 424{
 425        u32 tag;
 426        u32 hwq = 0;
 427
 428        if (afu->num_hwqs == 1)
 429                return 0;
 430
 431        switch (afu->hwq_mode) {
 432        case HWQ_MODE_RR:
 433                hwq = afu->hwq_rr_count++ % afu->num_hwqs;
 434                break;
 435        case HWQ_MODE_TAG:
 436                tag = blk_mq_unique_tag(scp->request);
 437                hwq = blk_mq_unique_tag_to_hwq(tag);
 438                break;
 439        case HWQ_MODE_CPU:
 440                hwq = smp_processor_id() % afu->num_hwqs;
 441                break;
 442        default:
 443                WARN_ON_ONCE(1);
 444        }
 445
 446        return hwq;
 447}
 448
 449/**
 450 * send_tmf() - sends a Task Management Function (TMF)
 451 * @cfg:        Internal structure associated with the host.
 452 * @sdev:       SCSI device destined for TMF.
 453 * @tmfcmd:     TMF command to send.
 454 *
 455 * Return:
 456 *      0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
 457 */
 458static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 459                    u64 tmfcmd)
 460{
 461        struct afu *afu = cfg->afu;
 462        struct afu_cmd *cmd = NULL;
 463        struct device *dev = &cfg->dev->dev;
 464        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
 465        bool needs_deletion = false;
 466        char *buf = NULL;
 467        ulong lock_flags;
 468        int rc = 0;
 469        ulong to;
 470
 471        buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
 472        if (unlikely(!buf)) {
 473                dev_err(dev, "%s: no memory for command\n", __func__);
 474                rc = -ENOMEM;
 475                goto out;
 476        }
 477
 478        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
 479        INIT_LIST_HEAD(&cmd->queue);
 480
 481        /* When Task Management Function is active do not send another */
 482        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 483        if (cfg->tmf_active)
 484                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 485                                                  !cfg->tmf_active,
 486                                                  cfg->tmf_slock);
 487        cfg->tmf_active = true;
 488        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 489
 490        cmd->parent = afu;
 491        cmd->cmd_tmf = true;
 492        cmd->hwq_index = hwq->index;
 493
 494        cmd->rcb.ctx_id = hwq->ctx_hndl;
 495        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 496        cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
 497        cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
 498        cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
 499                              SISL_REQ_FLAGS_SUP_UNDERRUN |
 500                              SISL_REQ_FLAGS_TMF_CMD);
 501        memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 502
 503        rc = afu->send_cmd(afu, cmd);
 504        if (unlikely(rc)) {
 505                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 506                cfg->tmf_active = false;
 507                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 508                goto out;
 509        }
 510
 511        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 512        to = msecs_to_jiffies(5000);
 513        to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
 514                                                       !cfg->tmf_active,
 515                                                       cfg->tmf_slock,
 516                                                       to);
 517        if (!to) {
 518                dev_err(dev, "%s: TMF timed out\n", __func__);
 519                rc = -ETIMEDOUT;
 520                needs_deletion = true;
 521        } else if (cmd->cmd_aborted) {
 522                dev_err(dev, "%s: TMF aborted\n", __func__);
 523                rc = -EAGAIN;
 524        } else if (cmd->sa.ioasc) {
 525                dev_err(dev, "%s: TMF failed ioasc=%08x\n",
 526                        __func__, cmd->sa.ioasc);
 527                rc = -EIO;
 528        }
 529        cfg->tmf_active = false;
 530        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 531
 532        if (needs_deletion) {
 533                spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 534                list_del(&cmd->list);
 535                spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 536        }
 537out:
 538        kfree(buf);
 539        return rc;
 540}
 541
 542/**
 543 * cxlflash_driver_info() - information handler for this host driver
 544 * @host:       SCSI host associated with device.
 545 *
 546 * Return: A string describing the device.
 547 */
 548static const char *cxlflash_driver_info(struct Scsi_Host *host)
 549{
 550        return CXLFLASH_ADAPTER_NAME;
 551}
 552
 553/**
 554 * cxlflash_queuecommand() - sends a mid-layer request
 555 * @host:       SCSI host associated with device.
 556 * @scp:        SCSI command to send.
 557 *
 558 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 559 */
 560static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 561{
 562        struct cxlflash_cfg *cfg = shost_priv(host);
 563        struct afu *afu = cfg->afu;
 564        struct device *dev = &cfg->dev->dev;
 565        struct afu_cmd *cmd = sc_to_afuci(scp);
 566        struct scatterlist *sg = scsi_sglist(scp);
 567        int hwq_index = cmd_to_target_hwq(host, scp, afu);
 568        struct hwq *hwq = get_hwq(afu, hwq_index);
 569        u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
 570        ulong lock_flags;
 571        int rc = 0;
 572
 573        dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
 574                            "cdb=(%08x-%08x-%08x-%08x)\n",
 575                            __func__, scp, host->host_no, scp->device->channel,
 576                            scp->device->id, scp->device->lun,
 577                            get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
 578                            get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
 579                            get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
 580                            get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 581
 582        /*
 583         * If a Task Management Function is active, wait for it to complete
 584         * before continuing with regular commands.
 585         */
 586        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 587        if (cfg->tmf_active) {
 588                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 589                rc = SCSI_MLQUEUE_HOST_BUSY;
 590                goto out;
 591        }
 592        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 593
 594        switch (cfg->state) {
 595        case STATE_PROBING:
 596        case STATE_PROBED:
 597        case STATE_RESET:
 598                dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
 599                rc = SCSI_MLQUEUE_HOST_BUSY;
 600                goto out;
 601        case STATE_FAILTERM:
 602                dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
 603                scp->result = (DID_NO_CONNECT << 16);
 604                scp->scsi_done(scp);
 605                rc = 0;
 606                goto out;
 607        default:
 608                atomic_inc(&afu->cmds_active);
 609                break;
 610        }
 611
 612        if (likely(sg)) {
 613                cmd->rcb.data_len = sg->length;
 614                cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
 615        }
 616
 617        cmd->scp = scp;
 618        cmd->parent = afu;
 619        cmd->hwq_index = hwq_index;
 620
 621        cmd->sa.ioasc = 0;
 622        cmd->rcb.ctx_id = hwq->ctx_hndl;
 623        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 624        cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
 625        cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
 626
 627        if (scp->sc_data_direction == DMA_TO_DEVICE)
 628                req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
 629
 630        cmd->rcb.req_flags = req_flags;
 631        memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 632
 633        rc = afu->send_cmd(afu, cmd);
 634        atomic_dec(&afu->cmds_active);
 635out:
 636        return rc;
 637}
 638
 639/**
 640 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
 641 * @cfg:        Internal structure associated with the host.
 642 */
 643static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
 644{
 645        struct pci_dev *pdev = cfg->dev;
 646
 647        if (pci_channel_offline(pdev))
 648                wait_event_timeout(cfg->reset_waitq,
 649                                   !pci_channel_offline(pdev),
 650                                   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
 651}
 652
 653/**
 654 * free_mem() - free memory associated with the AFU
 655 * @cfg:        Internal structure associated with the host.
 656 */
 657static void free_mem(struct cxlflash_cfg *cfg)
 658{
 659        struct afu *afu = cfg->afu;
 660
 661        if (cfg->afu) {
 662                free_pages((ulong)afu, get_order(sizeof(struct afu)));
 663                cfg->afu = NULL;
 664        }
 665}
 666
 667/**
 668 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
 669 * @cfg:        Internal structure associated with the host.
 670 */
 671static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
 672{
 673        if (cfg->async_reset_cookie == 0)
 674                return;
 675
 676        /* Wait until all async calls prior to this cookie have completed */
 677        async_synchronize_cookie(cfg->async_reset_cookie + 1);
 678        cfg->async_reset_cookie = 0;
 679}
 680
 681/**
 682 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
 683 * @cfg:        Internal structure associated with the host.
 684 *
 685 * Safe to call with AFU in a partially allocated/initialized state.
 686 *
 687 * Cancels scheduled worker threads, waits for any active internal AFU
 688 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
 689 */
 690static void stop_afu(struct cxlflash_cfg *cfg)
 691{
 692        struct afu *afu = cfg->afu;
 693        struct hwq *hwq;
 694        int i;
 695
 696        cancel_work_sync(&cfg->work_q);
 697        if (!current_is_async())
 698                cxlflash_reset_sync(cfg);
 699
 700        if (likely(afu)) {
 701                while (atomic_read(&afu->cmds_active))
 702                        ssleep(1);
 703
 704                if (afu_is_irqpoll_enabled(afu)) {
 705                        for (i = 0; i < afu->num_hwqs; i++) {
 706                                hwq = get_hwq(afu, i);
 707
 708                                irq_poll_disable(&hwq->irqpoll);
 709                        }
 710                }
 711
 712                if (likely(afu->afu_map)) {
 713                        cfg->ops->psa_unmap(afu->afu_map);
 714                        afu->afu_map = NULL;
 715                }
 716        }
 717}
 718
 719/**
 720 * term_intr() - disables all AFU interrupts
 721 * @cfg:        Internal structure associated with the host.
 722 * @level:      Depth of allocation, where to begin waterfall tear down.
 723 * @index:      Index of the hardware queue.
 724 *
 725 * Safe to call with AFU/MC in partially allocated/initialized state.
 726 */
 727static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
 728                      u32 index)
 729{
 730        struct afu *afu = cfg->afu;
 731        struct device *dev = &cfg->dev->dev;
 732        struct hwq *hwq;
 733
 734        if (!afu) {
 735                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 736                return;
 737        }
 738
 739        hwq = get_hwq(afu, index);
 740
 741        if (!hwq->ctx_cookie) {
 742                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 743                return;
 744        }
 745
 746        switch (level) {
 747        case UNMAP_THREE:
 748                /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
 749                if (index == PRIMARY_HWQ)
 750                        cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
 751                fallthrough;
 752        case UNMAP_TWO:
 753                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
 754                fallthrough;
 755        case UNMAP_ONE:
 756                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
 757                fallthrough;
 758        case FREE_IRQ:
 759                cfg->ops->free_afu_irqs(hwq->ctx_cookie);
 760                fallthrough;
 761        case UNDO_NOOP:
 762                /* No action required */
 763                break;
 764        }
 765}
 766
 767/**
 768 * term_mc() - terminates the master context
 769 * @cfg:        Internal structure associated with the host.
 770 * @index:      Index of the hardware queue.
 771 *
 772 * Safe to call with AFU/MC in partially allocated/initialized state.
 773 */
 774static void term_mc(struct cxlflash_cfg *cfg, u32 index)
 775{
 776        struct afu *afu = cfg->afu;
 777        struct device *dev = &cfg->dev->dev;
 778        struct hwq *hwq;
 779        ulong lock_flags;
 780
 781        if (!afu) {
 782                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 783                return;
 784        }
 785
 786        hwq = get_hwq(afu, index);
 787
 788        if (!hwq->ctx_cookie) {
 789                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 790                return;
 791        }
 792
 793        WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
 794        if (index != PRIMARY_HWQ)
 795                WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
 796        hwq->ctx_cookie = NULL;
 797
 798        spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
 799        hwq->hrrq_online = false;
 800        spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
 801
 802        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 803        flush_pending_cmds(hwq);
 804        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 805}
 806
 807/**
 808 * term_afu() - terminates the AFU
 809 * @cfg:        Internal structure associated with the host.
 810 *
 811 * Safe to call with AFU/MC in partially allocated/initialized state.
 812 */
 813static void term_afu(struct cxlflash_cfg *cfg)
 814{
 815        struct device *dev = &cfg->dev->dev;
 816        int k;
 817
 818        /*
 819         * Tear down is carefully orchestrated to ensure
 820         * no interrupts can come in when the problem state
 821         * area is unmapped.
 822         *
 823         * 1) Disable all AFU interrupts for each master
 824         * 2) Unmap the problem state area
 825         * 3) Stop each master context
 826         */
 827        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 828                term_intr(cfg, UNMAP_THREE, k);
 829
 830        stop_afu(cfg);
 831
 832        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 833                term_mc(cfg, k);
 834
 835        dev_dbg(dev, "%s: returning\n", __func__);
 836}
 837
 838/**
 839 * notify_shutdown() - notifies device of pending shutdown
 840 * @cfg:        Internal structure associated with the host.
 841 * @wait:       Whether to wait for shutdown processing to complete.
 842 *
 843 * This function will notify the AFU that the adapter is being shutdown
 844 * and will wait for shutdown processing to complete if wait is true.
 845 * This notification should flush pending I/Os to the device and halt
 846 * further I/Os until the next AFU reset is issued and device restarted.
 847 */
 848static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
 849{
 850        struct afu *afu = cfg->afu;
 851        struct device *dev = &cfg->dev->dev;
 852        struct dev_dependent_vals *ddv;
 853        __be64 __iomem *fc_port_regs;
 854        u64 reg, status;
 855        int i, retry_cnt = 0;
 856
 857        ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
 858        if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
 859                return;
 860
 861        if (!afu || !afu->afu_map) {
 862                dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
 863                return;
 864        }
 865
 866        /* Notify AFU */
 867        for (i = 0; i < cfg->num_fc_ports; i++) {
 868                fc_port_regs = get_fc_port_regs(cfg, i);
 869
 870                reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
 871                reg |= SISL_FC_SHUTDOWN_NORMAL;
 872                writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
 873        }
 874
 875        if (!wait)
 876                return;
 877
 878        /* Wait up to 1.5 seconds for shutdown processing to complete */
 879        for (i = 0; i < cfg->num_fc_ports; i++) {
 880                fc_port_regs = get_fc_port_regs(cfg, i);
 881                retry_cnt = 0;
 882
 883                while (true) {
 884                        status = readq_be(&fc_port_regs[FC_STATUS / 8]);
 885                        if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
 886                                break;
 887                        if (++retry_cnt >= MC_RETRY_CNT) {
 888                                dev_dbg(dev, "%s: port %d shutdown processing "
 889                                        "not yet completed\n", __func__, i);
 890                                break;
 891                        }
 892                        msleep(100 * retry_cnt);
 893                }
 894        }
 895}
 896
 897/**
 898 * cxlflash_get_minor() - gets the first available minor number
 899 *
 900 * Return: Unique minor number that can be used to create the character device.
 901 */
 902static int cxlflash_get_minor(void)
 903{
 904        int minor;
 905        long bit;
 906
 907        bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
 908        if (bit >= CXLFLASH_MAX_ADAPTERS)
 909                return -1;
 910
 911        minor = bit & MINORMASK;
 912        set_bit(minor, cxlflash_minor);
 913        return minor;
 914}
 915
 916/**
 917 * cxlflash_put_minor() - releases the minor number
 918 * @minor:      Minor number that is no longer needed.
 919 */
 920static void cxlflash_put_minor(int minor)
 921{
 922        clear_bit(minor, cxlflash_minor);
 923}
 924
 925/**
 926 * cxlflash_release_chrdev() - release the character device for the host
 927 * @cfg:        Internal structure associated with the host.
 928 */
 929static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
 930{
 931        device_unregister(cfg->chardev);
 932        cfg->chardev = NULL;
 933        cdev_del(&cfg->cdev);
 934        cxlflash_put_minor(MINOR(cfg->cdev.dev));
 935}
 936
 937/**
 938 * cxlflash_remove() - PCI entry point to tear down host
 939 * @pdev:       PCI device associated with the host.
 940 *
 941 * Safe to use as a cleanup in partially allocated/initialized state. Note that
 942 * the reset_waitq is flushed as part of the stop/termination of user contexts.
 943 */
 944static void cxlflash_remove(struct pci_dev *pdev)
 945{
 946        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
 947        struct device *dev = &pdev->dev;
 948        ulong lock_flags;
 949
 950        if (!pci_is_enabled(pdev)) {
 951                dev_dbg(dev, "%s: Device is disabled\n", __func__);
 952                return;
 953        }
 954
 955        /* Yield to running recovery threads before continuing with remove */
 956        wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
 957                                     cfg->state != STATE_PROBING);
 958        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 959        if (cfg->tmf_active)
 960                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 961                                                  !cfg->tmf_active,
 962                                                  cfg->tmf_slock);
 963        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 964
 965        /* Notify AFU and wait for shutdown processing to complete */
 966        notify_shutdown(cfg, true);
 967
 968        cfg->state = STATE_FAILTERM;
 969        cxlflash_stop_term_user_contexts(cfg);
 970
 971        switch (cfg->init_state) {
 972        case INIT_STATE_CDEV:
 973                cxlflash_release_chrdev(cfg);
 974                fallthrough;
 975        case INIT_STATE_SCSI:
 976                cxlflash_term_local_luns(cfg);
 977                scsi_remove_host(cfg->host);
 978                fallthrough;
 979        case INIT_STATE_AFU:
 980                term_afu(cfg);
 981                fallthrough;
 982        case INIT_STATE_PCI:
 983                cfg->ops->destroy_afu(cfg->afu_cookie);
 984                pci_disable_device(pdev);
 985                fallthrough;
 986        case INIT_STATE_NONE:
 987                free_mem(cfg);
 988                scsi_host_put(cfg->host);
 989                break;
 990        }
 991
 992        dev_dbg(dev, "%s: returning\n", __func__);
 993}
 994
 995/**
 996 * alloc_mem() - allocates the AFU and its command pool
 997 * @cfg:        Internal structure associated with the host.
 998 *
 999 * A partially allocated state remains on failure.
1000 *
1001 * Return:
1002 *      0 on success
1003 *      -ENOMEM on failure to allocate memory
1004 */
1005static int alloc_mem(struct cxlflash_cfg *cfg)
1006{
1007        int rc = 0;
1008        struct device *dev = &cfg->dev->dev;
1009
1010        /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1011        cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1012                                            get_order(sizeof(struct afu)));
1013        if (unlikely(!cfg->afu)) {
1014                dev_err(dev, "%s: cannot get %d free pages\n",
1015                        __func__, get_order(sizeof(struct afu)));
1016                rc = -ENOMEM;
1017                goto out;
1018        }
1019        cfg->afu->parent = cfg;
1020        cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1021        cfg->afu->afu_map = NULL;
1022out:
1023        return rc;
1024}
1025
1026/**
1027 * init_pci() - initializes the host as a PCI device
1028 * @cfg:        Internal structure associated with the host.
1029 *
1030 * Return: 0 on success, -errno on failure
1031 */
1032static int init_pci(struct cxlflash_cfg *cfg)
1033{
1034        struct pci_dev *pdev = cfg->dev;
1035        struct device *dev = &cfg->dev->dev;
1036        int rc = 0;
1037
1038        rc = pci_enable_device(pdev);
1039        if (rc || pci_channel_offline(pdev)) {
1040                if (pci_channel_offline(pdev)) {
1041                        cxlflash_wait_for_pci_err_recovery(cfg);
1042                        rc = pci_enable_device(pdev);
1043                }
1044
1045                if (rc) {
1046                        dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1047                        cxlflash_wait_for_pci_err_recovery(cfg);
1048                        goto out;
1049                }
1050        }
1051
1052out:
1053        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1054        return rc;
1055}
1056
1057/**
1058 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1059 * @cfg:        Internal structure associated with the host.
1060 *
1061 * Return: 0 on success, -errno on failure
1062 */
1063static int init_scsi(struct cxlflash_cfg *cfg)
1064{
1065        struct pci_dev *pdev = cfg->dev;
1066        struct device *dev = &cfg->dev->dev;
1067        int rc = 0;
1068
1069        rc = scsi_add_host(cfg->host, &pdev->dev);
1070        if (rc) {
1071                dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1072                goto out;
1073        }
1074
1075        scsi_scan_host(cfg->host);
1076
1077out:
1078        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1079        return rc;
1080}
1081
1082/**
1083 * set_port_online() - transitions the specified host FC port to online state
1084 * @fc_regs:    Top of MMIO region defined for specified port.
1085 *
1086 * The provided MMIO region must be mapped prior to call. Online state means
1087 * that the FC link layer has synced, completed the handshaking process, and
1088 * is ready for login to start.
1089 */
1090static void set_port_online(__be64 __iomem *fc_regs)
1091{
1092        u64 cmdcfg;
1093
1094        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1095        cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1096        cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);   /* set ON_LINE */
1097        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1098}
1099
1100/**
1101 * set_port_offline() - transitions the specified host FC port to offline state
1102 * @fc_regs:    Top of MMIO region defined for specified port.
1103 *
1104 * The provided MMIO region must be mapped prior to call.
1105 */
1106static void set_port_offline(__be64 __iomem *fc_regs)
1107{
1108        u64 cmdcfg;
1109
1110        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1111        cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);  /* clear ON_LINE */
1112        cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);  /* set OFF_LINE */
1113        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1114}
1115
1116/**
1117 * wait_port_online() - waits for the specified host FC port come online
1118 * @fc_regs:    Top of MMIO region defined for specified port.
1119 * @delay_us:   Number of microseconds to delay between reading port status.
1120 * @nretry:     Number of cycles to retry reading port status.
1121 *
1122 * The provided MMIO region must be mapped prior to call. This will timeout
1123 * when the cable is not plugged in.
1124 *
1125 * Return:
1126 *      TRUE (1) when the specified port is online
1127 *      FALSE (0) when the specified port fails to come online after timeout
1128 */
1129static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1130{
1131        u64 status;
1132
1133        WARN_ON(delay_us < 1000);
1134
1135        do {
1136                msleep(delay_us / 1000);
1137                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1138                if (status == U64_MAX)
1139                        nretry /= 2;
1140        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1141                 nretry--);
1142
1143        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1144}
1145
1146/**
1147 * wait_port_offline() - waits for the specified host FC port go offline
1148 * @fc_regs:    Top of MMIO region defined for specified port.
1149 * @delay_us:   Number of microseconds to delay between reading port status.
1150 * @nretry:     Number of cycles to retry reading port status.
1151 *
1152 * The provided MMIO region must be mapped prior to call.
1153 *
1154 * Return:
1155 *      TRUE (1) when the specified port is offline
1156 *      FALSE (0) when the specified port fails to go offline after timeout
1157 */
1158static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1159{
1160        u64 status;
1161
1162        WARN_ON(delay_us < 1000);
1163
1164        do {
1165                msleep(delay_us / 1000);
1166                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1167                if (status == U64_MAX)
1168                        nretry /= 2;
1169        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1170                 nretry--);
1171
1172        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1173}
1174
1175/**
1176 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1177 * @afu:        AFU associated with the host that owns the specified FC port.
1178 * @port:       Port number being configured.
1179 * @fc_regs:    Top of MMIO region defined for specified port.
1180 * @wwpn:       The world-wide-port-number previously discovered for port.
1181 *
1182 * The provided MMIO region must be mapped prior to call. As part of the
1183 * sequence to configure the WWPN, the port is toggled offline and then back
1184 * online. This toggling action can cause this routine to delay up to a few
1185 * seconds. When configured to use the internal LUN feature of the AFU, a
1186 * failure to come online is overridden.
1187 */
1188static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1189                         u64 wwpn)
1190{
1191        struct cxlflash_cfg *cfg = afu->parent;
1192        struct device *dev = &cfg->dev->dev;
1193
1194        set_port_offline(fc_regs);
1195        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1196                               FC_PORT_STATUS_RETRY_CNT)) {
1197                dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1198                        __func__, port);
1199        }
1200
1201        writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1202
1203        set_port_online(fc_regs);
1204        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1205                              FC_PORT_STATUS_RETRY_CNT)) {
1206                dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1207                        __func__, port);
1208        }
1209}
1210
1211/**
1212 * afu_link_reset() - resets the specified host FC port
1213 * @afu:        AFU associated with the host that owns the specified FC port.
1214 * @port:       Port number being configured.
1215 * @fc_regs:    Top of MMIO region defined for specified port.
1216 *
1217 * The provided MMIO region must be mapped prior to call. The sequence to
1218 * reset the port involves toggling it offline and then back online. This
1219 * action can cause this routine to delay up to a few seconds. An effort
1220 * is made to maintain link with the device by switching to host to use
1221 * the alternate port exclusively while the reset takes place.
1222 * failure to come online is overridden.
1223 */
1224static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1225{
1226        struct cxlflash_cfg *cfg = afu->parent;
1227        struct device *dev = &cfg->dev->dev;
1228        u64 port_sel;
1229
1230        /* first switch the AFU to the other links, if any */
1231        port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1232        port_sel &= ~(1ULL << port);
1233        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1234        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1235
1236        set_port_offline(fc_regs);
1237        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1238                               FC_PORT_STATUS_RETRY_CNT))
1239                dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1240                        __func__, port);
1241
1242        set_port_online(fc_regs);
1243        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1244                              FC_PORT_STATUS_RETRY_CNT))
1245                dev_err(dev, "%s: wait on port %d to go online timed out\n",
1246                        __func__, port);
1247
1248        /* switch back to include this port */
1249        port_sel |= (1ULL << port);
1250        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1251        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1252
1253        dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1254}
1255
1256/**
1257 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1258 * @afu:        AFU associated with the host.
1259 */
1260static void afu_err_intr_init(struct afu *afu)
1261{
1262        struct cxlflash_cfg *cfg = afu->parent;
1263        __be64 __iomem *fc_port_regs;
1264        int i;
1265        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1266        u64 reg;
1267
1268        /* global async interrupts: AFU clears afu_ctrl on context exit
1269         * if async interrupts were sent to that context. This prevents
1270         * the AFU form sending further async interrupts when
1271         * there is
1272         * nobody to receive them.
1273         */
1274
1275        /* mask all */
1276        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1277        /* set LISN# to send and point to primary master context */
1278        reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1279
1280        if (afu->internal_lun)
1281                reg |= 1;       /* Bit 63 indicates local lun */
1282        writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1283        /* clear all */
1284        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1285        /* unmask bits that are of interest */
1286        /* note: afu can send an interrupt after this step */
1287        writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1288        /* clear again in case a bit came on after previous clear but before */
1289        /* unmask */
1290        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1291
1292        /* Clear/Set internal lun bits */
1293        fc_port_regs = get_fc_port_regs(cfg, 0);
1294        reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1295        reg &= SISL_FC_INTERNAL_MASK;
1296        if (afu->internal_lun)
1297                reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1298        writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1299
1300        /* now clear FC errors */
1301        for (i = 0; i < cfg->num_fc_ports; i++) {
1302                fc_port_regs = get_fc_port_regs(cfg, i);
1303
1304                writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1305                writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1306        }
1307
1308        /* sync interrupts for master's IOARRIN write */
1309        /* note that unlike asyncs, there can be no pending sync interrupts */
1310        /* at this time (this is a fresh context and master has not written */
1311        /* IOARRIN yet), so there is nothing to clear. */
1312
1313        /* set LISN#, it is always sent to the context that wrote IOARRIN */
1314        for (i = 0; i < afu->num_hwqs; i++) {
1315                hwq = get_hwq(afu, i);
1316
1317                reg = readq_be(&hwq->host_map->ctx_ctrl);
1318                WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1319                reg |= SISL_MSI_SYNC_ERROR;
1320                writeq_be(reg, &hwq->host_map->ctx_ctrl);
1321                writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1322        }
1323}
1324
1325/**
1326 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1327 * @irq:        Interrupt number.
1328 * @data:       Private data provided at interrupt registration, the AFU.
1329 *
1330 * Return: Always return IRQ_HANDLED.
1331 */
1332static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1333{
1334        struct hwq *hwq = (struct hwq *)data;
1335        struct cxlflash_cfg *cfg = hwq->afu->parent;
1336        struct device *dev = &cfg->dev->dev;
1337        u64 reg;
1338        u64 reg_unmasked;
1339
1340        reg = readq_be(&hwq->host_map->intr_status);
1341        reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1342
1343        if (reg_unmasked == 0UL) {
1344                dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1345                        __func__, reg);
1346                goto cxlflash_sync_err_irq_exit;
1347        }
1348
1349        dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1350                __func__, reg);
1351
1352        writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1353
1354cxlflash_sync_err_irq_exit:
1355        return IRQ_HANDLED;
1356}
1357
1358/**
1359 * process_hrrq() - process the read-response queue
1360 * @afu:        AFU associated with the host.
1361 * @doneq:      Queue of commands harvested from the RRQ.
1362 * @budget:     Threshold of RRQ entries to process.
1363 *
1364 * This routine must be called holding the disabled RRQ spin lock.
1365 *
1366 * Return: The number of entries processed.
1367 */
1368static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1369{
1370        struct afu *afu = hwq->afu;
1371        struct afu_cmd *cmd;
1372        struct sisl_ioasa *ioasa;
1373        struct sisl_ioarcb *ioarcb;
1374        bool toggle = hwq->toggle;
1375        int num_hrrq = 0;
1376        u64 entry,
1377            *hrrq_start = hwq->hrrq_start,
1378            *hrrq_end = hwq->hrrq_end,
1379            *hrrq_curr = hwq->hrrq_curr;
1380
1381        /* Process ready RRQ entries up to the specified budget (if any) */
1382        while (true) {
1383                entry = *hrrq_curr;
1384
1385                if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1386                        break;
1387
1388                entry &= ~SISL_RESP_HANDLE_T_BIT;
1389
1390                if (afu_is_sq_cmd_mode(afu)) {
1391                        ioasa = (struct sisl_ioasa *)entry;
1392                        cmd = container_of(ioasa, struct afu_cmd, sa);
1393                } else {
1394                        ioarcb = (struct sisl_ioarcb *)entry;
1395                        cmd = container_of(ioarcb, struct afu_cmd, rcb);
1396                }
1397
1398                list_add_tail(&cmd->queue, doneq);
1399
1400                /* Advance to next entry or wrap and flip the toggle bit */
1401                if (hrrq_curr < hrrq_end)
1402                        hrrq_curr++;
1403                else {
1404                        hrrq_curr = hrrq_start;
1405                        toggle ^= SISL_RESP_HANDLE_T_BIT;
1406                }
1407
1408                atomic_inc(&hwq->hsq_credits);
1409                num_hrrq++;
1410
1411                if (budget > 0 && num_hrrq >= budget)
1412                        break;
1413        }
1414
1415        hwq->hrrq_curr = hrrq_curr;
1416        hwq->toggle = toggle;
1417
1418        return num_hrrq;
1419}
1420
1421/**
1422 * process_cmd_doneq() - process a queue of harvested RRQ commands
1423 * @doneq:      Queue of completed commands.
1424 *
1425 * Note that upon return the queue can no longer be trusted.
1426 */
1427static void process_cmd_doneq(struct list_head *doneq)
1428{
1429        struct afu_cmd *cmd, *tmp;
1430
1431        WARN_ON(list_empty(doneq));
1432
1433        list_for_each_entry_safe(cmd, tmp, doneq, queue)
1434                cmd_complete(cmd);
1435}
1436
1437/**
1438 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1439 * @irqpoll:    IRQ poll structure associated with queue to poll.
1440 * @budget:     Threshold of RRQ entries to process per poll.
1441 *
1442 * Return: The number of entries processed.
1443 */
1444static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1445{
1446        struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1447        unsigned long hrrq_flags;
1448        LIST_HEAD(doneq);
1449        int num_entries = 0;
1450
1451        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1452
1453        num_entries = process_hrrq(hwq, &doneq, budget);
1454        if (num_entries < budget)
1455                irq_poll_complete(irqpoll);
1456
1457        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1458
1459        process_cmd_doneq(&doneq);
1460        return num_entries;
1461}
1462
1463/**
1464 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1465 * @irq:        Interrupt number.
1466 * @data:       Private data provided at interrupt registration, the AFU.
1467 *
1468 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1469 */
1470static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1471{
1472        struct hwq *hwq = (struct hwq *)data;
1473        struct afu *afu = hwq->afu;
1474        unsigned long hrrq_flags;
1475        LIST_HEAD(doneq);
1476        int num_entries = 0;
1477
1478        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1479
1480        /* Silently drop spurious interrupts when queue is not online */
1481        if (!hwq->hrrq_online) {
1482                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1483                return IRQ_HANDLED;
1484        }
1485
1486        if (afu_is_irqpoll_enabled(afu)) {
1487                irq_poll_sched(&hwq->irqpoll);
1488                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1489                return IRQ_HANDLED;
1490        }
1491
1492        num_entries = process_hrrq(hwq, &doneq, -1);
1493        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1494
1495        if (num_entries == 0)
1496                return IRQ_NONE;
1497
1498        process_cmd_doneq(&doneq);
1499        return IRQ_HANDLED;
1500}
1501
1502/*
1503 * Asynchronous interrupt information table
1504 *
1505 * NOTE:
1506 *      - Order matters here as this array is indexed by bit position.
1507 *
1508 *      - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1509 *        as complex and complains due to a lack of parentheses/braces.
1510 */
1511#define ASTATUS_FC(_a, _b, _c, _d)                                       \
1512        { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1513
1514#define BUILD_SISL_ASTATUS_FC_PORT(_a)                                   \
1515        ASTATUS_FC(_a, LINK_UP, "link up", 0),                           \
1516        ASTATUS_FC(_a, LINK_DN, "link down", 0),                         \
1517        ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),            \
1518        ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),            \
1519        ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1520        ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),     \
1521        ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),                \
1522        ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1523
1524static const struct asyc_intr_info ainfo[] = {
1525        BUILD_SISL_ASTATUS_FC_PORT(1),
1526        BUILD_SISL_ASTATUS_FC_PORT(0),
1527        BUILD_SISL_ASTATUS_FC_PORT(3),
1528        BUILD_SISL_ASTATUS_FC_PORT(2)
1529};
1530
1531/**
1532 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1533 * @irq:        Interrupt number.
1534 * @data:       Private data provided at interrupt registration, the AFU.
1535 *
1536 * Return: Always return IRQ_HANDLED.
1537 */
1538static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1539{
1540        struct hwq *hwq = (struct hwq *)data;
1541        struct afu *afu = hwq->afu;
1542        struct cxlflash_cfg *cfg = afu->parent;
1543        struct device *dev = &cfg->dev->dev;
1544        const struct asyc_intr_info *info;
1545        struct sisl_global_map __iomem *global = &afu->afu_map->global;
1546        __be64 __iomem *fc_port_regs;
1547        u64 reg_unmasked;
1548        u64 reg;
1549        u64 bit;
1550        u8 port;
1551
1552        reg = readq_be(&global->regs.aintr_status);
1553        reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1554
1555        if (unlikely(reg_unmasked == 0)) {
1556                dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1557                        __func__, reg);
1558                goto out;
1559        }
1560
1561        /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1562        writeq_be(reg_unmasked, &global->regs.aintr_clear);
1563
1564        /* Check each bit that is on */
1565        for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1566                if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1567                        WARN_ON_ONCE(1);
1568                        continue;
1569                }
1570
1571                info = &ainfo[bit];
1572                if (unlikely(info->status != 1ULL << bit)) {
1573                        WARN_ON_ONCE(1);
1574                        continue;
1575                }
1576
1577                port = info->port;
1578                fc_port_regs = get_fc_port_regs(cfg, port);
1579
1580                dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1581                        __func__, port, info->desc,
1582                       readq_be(&fc_port_regs[FC_STATUS / 8]));
1583
1584                /*
1585                 * Do link reset first, some OTHER errors will set FC_ERROR
1586                 * again if cleared before or w/o a reset
1587                 */
1588                if (info->action & LINK_RESET) {
1589                        dev_err(dev, "%s: FC Port %d: resetting link\n",
1590                                __func__, port);
1591                        cfg->lr_state = LINK_RESET_REQUIRED;
1592                        cfg->lr_port = port;
1593                        schedule_work(&cfg->work_q);
1594                }
1595
1596                if (info->action & CLR_FC_ERROR) {
1597                        reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1598
1599                        /*
1600                         * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1601                         * should be the same and tracing one is sufficient.
1602                         */
1603
1604                        dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1605                                __func__, port, reg);
1606
1607                        writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1608                        writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1609                }
1610
1611                if (info->action & SCAN_HOST) {
1612                        atomic_inc(&cfg->scan_host_needed);
1613                        schedule_work(&cfg->work_q);
1614                }
1615        }
1616
1617out:
1618        return IRQ_HANDLED;
1619}
1620
1621/**
1622 * read_vpd() - obtains the WWPNs from VPD
1623 * @cfg:        Internal structure associated with the host.
1624 * @wwpn:       Array of size MAX_FC_PORTS to pass back WWPNs
1625 *
1626 * Return: 0 on success, -errno on failure
1627 */
1628static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1629{
1630        struct device *dev = &cfg->dev->dev;
1631        struct pci_dev *pdev = cfg->dev;
1632        int rc = 0;
1633        int ro_start, ro_size, i, j, k;
1634        ssize_t vpd_size;
1635        char vpd_data[CXLFLASH_VPD_LEN];
1636        char tmp_buf[WWPN_BUF_LEN] = { 0 };
1637        const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1638                                                cfg->dev_id->driver_data;
1639        const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1640        const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1641
1642        /* Get the VPD data from the device */
1643        vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1644        if (unlikely(vpd_size <= 0)) {
1645                dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1646                        __func__, vpd_size);
1647                rc = -ENODEV;
1648                goto out;
1649        }
1650
1651        /* Get the read only section offset */
1652        ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1653                                    PCI_VPD_LRDT_RO_DATA);
1654        if (unlikely(ro_start < 0)) {
1655                dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1656                rc = -ENODEV;
1657                goto out;
1658        }
1659
1660        /* Get the read only section size, cap when extends beyond read VPD */
1661        ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1662        j = ro_size;
1663        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1664        if (unlikely((i + j) > vpd_size)) {
1665                dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1666                        __func__, (i + j), vpd_size);
1667                ro_size = vpd_size - i;
1668        }
1669
1670        /*
1671         * Find the offset of the WWPN tag within the read only
1672         * VPD data and validate the found field (partials are
1673         * no good to us). Convert the ASCII data to an integer
1674         * value. Note that we must copy to a temporary buffer
1675         * because the conversion service requires that the ASCII
1676         * string be terminated.
1677         *
1678         * Allow for WWPN not being found for all devices, setting
1679         * the returned WWPN to zero when not found. Notify with a
1680         * log error for cards that should have had WWPN keywords
1681         * in the VPD - cards requiring WWPN will not have their
1682         * ports programmed and operate in an undefined state.
1683         */
1684        for (k = 0; k < cfg->num_fc_ports; k++) {
1685                j = ro_size;
1686                i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1687
1688                i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1689                if (i < 0) {
1690                        if (wwpn_vpd_required)
1691                                dev_err(dev, "%s: Port %d WWPN not found\n",
1692                                        __func__, k);
1693                        wwpn[k] = 0ULL;
1694                        continue;
1695                }
1696
1697                j = pci_vpd_info_field_size(&vpd_data[i]);
1698                i += PCI_VPD_INFO_FLD_HDR_SIZE;
1699                if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1700                        dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1701                                __func__, k);
1702                        rc = -ENODEV;
1703                        goto out;
1704                }
1705
1706                memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1707                rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1708                if (unlikely(rc)) {
1709                        dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1710                                __func__, k);
1711                        rc = -ENODEV;
1712                        goto out;
1713                }
1714
1715                dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1716        }
1717
1718out:
1719        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1720        return rc;
1721}
1722
1723/**
1724 * init_pcr() - initialize the provisioning and control registers
1725 * @cfg:        Internal structure associated with the host.
1726 *
1727 * Also sets up fast access to the mapped registers and initializes AFU
1728 * command fields that never change.
1729 */
1730static void init_pcr(struct cxlflash_cfg *cfg)
1731{
1732        struct afu *afu = cfg->afu;
1733        struct sisl_ctrl_map __iomem *ctrl_map;
1734        struct hwq *hwq;
1735        void *cookie;
1736        int i;
1737
1738        for (i = 0; i < MAX_CONTEXT; i++) {
1739                ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1740                /* Disrupt any clients that could be running */
1741                /* e.g. clients that survived a master restart */
1742                writeq_be(0, &ctrl_map->rht_start);
1743                writeq_be(0, &ctrl_map->rht_cnt_id);
1744                writeq_be(0, &ctrl_map->ctx_cap);
1745        }
1746
1747        /* Copy frequently used fields into hwq */
1748        for (i = 0; i < afu->num_hwqs; i++) {
1749                hwq = get_hwq(afu, i);
1750                cookie = hwq->ctx_cookie;
1751
1752                hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1753                hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1754                hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1755
1756                /* Program the Endian Control for the master context */
1757                writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1758        }
1759}
1760
1761/**
1762 * init_global() - initialize AFU global registers
1763 * @cfg:        Internal structure associated with the host.
1764 */
1765static int init_global(struct cxlflash_cfg *cfg)
1766{
1767        struct afu *afu = cfg->afu;
1768        struct device *dev = &cfg->dev->dev;
1769        struct hwq *hwq;
1770        struct sisl_host_map __iomem *hmap;
1771        __be64 __iomem *fc_port_regs;
1772        u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1773        int i = 0, num_ports = 0;
1774        int rc = 0;
1775        int j;
1776        void *ctx;
1777        u64 reg;
1778
1779        rc = read_vpd(cfg, &wwpn[0]);
1780        if (rc) {
1781                dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1782                goto out;
1783        }
1784
1785        /* Set up RRQ and SQ in HWQ for master issued cmds */
1786        for (i = 0; i < afu->num_hwqs; i++) {
1787                hwq = get_hwq(afu, i);
1788                hmap = hwq->host_map;
1789
1790                writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1791                writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1792                hwq->hrrq_online = true;
1793
1794                if (afu_is_sq_cmd_mode(afu)) {
1795                        writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1796                        writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1797                }
1798        }
1799
1800        /* AFU configuration */
1801        reg = readq_be(&afu->afu_map->global.regs.afu_config);
1802        reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1803        /* enable all auto retry options and control endianness */
1804        /* leave others at default: */
1805        /* CTX_CAP write protected, mbox_r does not clear on read and */
1806        /* checker on if dual afu */
1807        writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1808
1809        /* Global port select: select either port */
1810        if (afu->internal_lun) {
1811                /* Only use port 0 */
1812                writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1813                num_ports = 0;
1814        } else {
1815                writeq_be(PORT_MASK(cfg->num_fc_ports),
1816                          &afu->afu_map->global.regs.afu_port_sel);
1817                num_ports = cfg->num_fc_ports;
1818        }
1819
1820        for (i = 0; i < num_ports; i++) {
1821                fc_port_regs = get_fc_port_regs(cfg, i);
1822
1823                /* Unmask all errors (but they are still masked at AFU) */
1824                writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1825                /* Clear CRC error cnt & set a threshold */
1826                (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1827                writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1828
1829                /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1830                if (wwpn[i] != 0)
1831                        afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1832                /* Programming WWPN back to back causes additional
1833                 * offline/online transitions and a PLOGI
1834                 */
1835                msleep(100);
1836        }
1837
1838        if (afu_is_ocxl_lisn(afu)) {
1839                /* Set up the LISN effective address for each master */
1840                for (i = 0; i < afu->num_hwqs; i++) {
1841                        hwq = get_hwq(afu, i);
1842                        ctx = hwq->ctx_cookie;
1843
1844                        for (j = 0; j < hwq->num_irqs; j++) {
1845                                reg = cfg->ops->get_irq_objhndl(ctx, j);
1846                                writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1847                        }
1848
1849                        reg = hwq->ctx_hndl;
1850                        writeq_be(SISL_LISN_PASID(reg, reg),
1851                                  &hwq->ctrl_map->lisn_pasid[0]);
1852                        writeq_be(SISL_LISN_PASID(0UL, reg),
1853                                  &hwq->ctrl_map->lisn_pasid[1]);
1854                }
1855        }
1856
1857        /* Set up master's own CTX_CAP to allow real mode, host translation */
1858        /* tables, afu cmds and read/write GSCSI cmds. */
1859        /* First, unlock ctx_cap write by reading mbox */
1860        for (i = 0; i < afu->num_hwqs; i++) {
1861                hwq = get_hwq(afu, i);
1862
1863                (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1864                writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1865                        SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1866                        SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1867                        &hwq->ctrl_map->ctx_cap);
1868        }
1869
1870        /*
1871         * Determine write-same unmap support for host by evaluating the unmap
1872         * sector support bit of the context control register associated with
1873         * the primary hardware queue. Note that while this status is reflected
1874         * in a context register, the outcome can be assumed to be host-wide.
1875         */
1876        hwq = get_hwq(afu, PRIMARY_HWQ);
1877        reg = readq_be(&hwq->host_map->ctx_ctrl);
1878        if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1879                cfg->ws_unmap = true;
1880
1881        /* Initialize heartbeat */
1882        afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1883out:
1884        return rc;
1885}
1886
1887/**
1888 * start_afu() - initializes and starts the AFU
1889 * @cfg:        Internal structure associated with the host.
1890 */
1891static int start_afu(struct cxlflash_cfg *cfg)
1892{
1893        struct afu *afu = cfg->afu;
1894        struct device *dev = &cfg->dev->dev;
1895        struct hwq *hwq;
1896        int rc = 0;
1897        int i;
1898
1899        init_pcr(cfg);
1900
1901        /* Initialize each HWQ */
1902        for (i = 0; i < afu->num_hwqs; i++) {
1903                hwq = get_hwq(afu, i);
1904
1905                /* After an AFU reset, RRQ entries are stale, clear them */
1906                memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1907
1908                /* Initialize RRQ pointers */
1909                hwq->hrrq_start = &hwq->rrq_entry[0];
1910                hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1911                hwq->hrrq_curr = hwq->hrrq_start;
1912                hwq->toggle = 1;
1913
1914                /* Initialize spin locks */
1915                spin_lock_init(&hwq->hrrq_slock);
1916                spin_lock_init(&hwq->hsq_slock);
1917
1918                /* Initialize SQ */
1919                if (afu_is_sq_cmd_mode(afu)) {
1920                        memset(&hwq->sq, 0, sizeof(hwq->sq));
1921                        hwq->hsq_start = &hwq->sq[0];
1922                        hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1923                        hwq->hsq_curr = hwq->hsq_start;
1924
1925                        atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1926                }
1927
1928                /* Initialize IRQ poll */
1929                if (afu_is_irqpoll_enabled(afu))
1930                        irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1931                                      cxlflash_irqpoll);
1932
1933        }
1934
1935        rc = init_global(cfg);
1936
1937        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1938        return rc;
1939}
1940
1941/**
1942 * init_intr() - setup interrupt handlers for the master context
1943 * @cfg:        Internal structure associated with the host.
1944 * @hwq:        Hardware queue to initialize.
1945 *
1946 * Return: 0 on success, -errno on failure
1947 */
1948static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1949                                 struct hwq *hwq)
1950{
1951        struct device *dev = &cfg->dev->dev;
1952        void *ctx = hwq->ctx_cookie;
1953        int rc = 0;
1954        enum undo_level level = UNDO_NOOP;
1955        bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1956        int num_irqs = hwq->num_irqs;
1957
1958        rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1959        if (unlikely(rc)) {
1960                dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1961                        __func__, rc);
1962                level = UNDO_NOOP;
1963                goto out;
1964        }
1965
1966        rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1967                                   "SISL_MSI_SYNC_ERROR");
1968        if (unlikely(rc <= 0)) {
1969                dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1970                level = FREE_IRQ;
1971                goto out;
1972        }
1973
1974        rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1975                                   "SISL_MSI_RRQ_UPDATED");
1976        if (unlikely(rc <= 0)) {
1977                dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1978                level = UNMAP_ONE;
1979                goto out;
1980        }
1981
1982        /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1983        if (!is_primary_hwq)
1984                goto out;
1985
1986        rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1987                                   "SISL_MSI_ASYNC_ERROR");
1988        if (unlikely(rc <= 0)) {
1989                dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1990                level = UNMAP_TWO;
1991                goto out;
1992        }
1993out:
1994        return level;
1995}
1996
1997/**
1998 * init_mc() - create and register as the master context
1999 * @cfg:        Internal structure associated with the host.
2000 * index:       HWQ Index of the master context.
2001 *
2002 * Return: 0 on success, -errno on failure
2003 */
2004static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2005{
2006        void *ctx;
2007        struct device *dev = &cfg->dev->dev;
2008        struct hwq *hwq = get_hwq(cfg->afu, index);
2009        int rc = 0;
2010        int num_irqs;
2011        enum undo_level level;
2012
2013        hwq->afu = cfg->afu;
2014        hwq->index = index;
2015        INIT_LIST_HEAD(&hwq->pending_cmds);
2016
2017        if (index == PRIMARY_HWQ) {
2018                ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2019                num_irqs = 3;
2020        } else {
2021                ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2022                num_irqs = 2;
2023        }
2024        if (IS_ERR_OR_NULL(ctx)) {
2025                rc = -ENOMEM;
2026                goto err1;
2027        }
2028
2029        WARN_ON(hwq->ctx_cookie);
2030        hwq->ctx_cookie = ctx;
2031        hwq->num_irqs = num_irqs;
2032
2033        /* Set it up as a master with the CXL */
2034        cfg->ops->set_master(ctx);
2035
2036        /* Reset AFU when initializing primary context */
2037        if (index == PRIMARY_HWQ) {
2038                rc = cfg->ops->afu_reset(ctx);
2039                if (unlikely(rc)) {
2040                        dev_err(dev, "%s: AFU reset failed rc=%d\n",
2041                                      __func__, rc);
2042                        goto err1;
2043                }
2044        }
2045
2046        level = init_intr(cfg, hwq);
2047        if (unlikely(level)) {
2048                dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2049                goto err2;
2050        }
2051
2052        /* Finally, activate the context by starting it */
2053        rc = cfg->ops->start_context(hwq->ctx_cookie);
2054        if (unlikely(rc)) {
2055                dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2056                level = UNMAP_THREE;
2057                goto err2;
2058        }
2059
2060out:
2061        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2062        return rc;
2063err2:
2064        term_intr(cfg, level, index);
2065        if (index != PRIMARY_HWQ)
2066                cfg->ops->release_context(ctx);
2067err1:
2068        hwq->ctx_cookie = NULL;
2069        goto out;
2070}
2071
2072/**
2073 * get_num_afu_ports() - determines and configures the number of AFU ports
2074 * @cfg:        Internal structure associated with the host.
2075 *
2076 * This routine determines the number of AFU ports by converting the global
2077 * port selection mask. The converted value is only valid following an AFU
2078 * reset (explicit or power-on). This routine must be invoked shortly after
2079 * mapping as other routines are dependent on the number of ports during the
2080 * initialization sequence.
2081 *
2082 * To support legacy AFUs that might not have reflected an initial global
2083 * port mask (value read is 0), default to the number of ports originally
2084 * supported by the cxlflash driver (2) before hardware with other port
2085 * offerings was introduced.
2086 */
2087static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2088{
2089        struct afu *afu = cfg->afu;
2090        struct device *dev = &cfg->dev->dev;
2091        u64 port_mask;
2092        int num_fc_ports = LEGACY_FC_PORTS;
2093
2094        port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2095        if (port_mask != 0ULL)
2096                num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2097
2098        dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2099                __func__, port_mask, num_fc_ports);
2100
2101        cfg->num_fc_ports = num_fc_ports;
2102        cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2103}
2104
2105/**
2106 * init_afu() - setup as master context and start AFU
2107 * @cfg:        Internal structure associated with the host.
2108 *
2109 * This routine is a higher level of control for configuring the
2110 * AFU on probe and reset paths.
2111 *
2112 * Return: 0 on success, -errno on failure
2113 */
2114static int init_afu(struct cxlflash_cfg *cfg)
2115{
2116        u64 reg;
2117        int rc = 0;
2118        struct afu *afu = cfg->afu;
2119        struct device *dev = &cfg->dev->dev;
2120        struct hwq *hwq;
2121        int i;
2122
2123        cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2124
2125        mutex_init(&afu->sync_active);
2126        afu->num_hwqs = afu->desired_hwqs;
2127        for (i = 0; i < afu->num_hwqs; i++) {
2128                rc = init_mc(cfg, i);
2129                if (rc) {
2130                        dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2131                                __func__, rc, i);
2132                        goto err1;
2133                }
2134        }
2135
2136        /* Map the entire MMIO space of the AFU using the first context */
2137        hwq = get_hwq(afu, PRIMARY_HWQ);
2138        afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2139        if (!afu->afu_map) {
2140                dev_err(dev, "%s: psa_map failed\n", __func__);
2141                rc = -ENOMEM;
2142                goto err1;
2143        }
2144
2145        /* No byte reverse on reading afu_version or string will be backwards */
2146        reg = readq(&afu->afu_map->global.regs.afu_version);
2147        memcpy(afu->version, &reg, sizeof(reg));
2148        afu->interface_version =
2149            readq_be(&afu->afu_map->global.regs.interface_version);
2150        if ((afu->interface_version + 1) == 0) {
2151                dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2152                        "interface version %016llx\n", afu->version,
2153                       afu->interface_version);
2154                rc = -EINVAL;
2155                goto err1;
2156        }
2157
2158        if (afu_is_sq_cmd_mode(afu)) {
2159                afu->send_cmd = send_cmd_sq;
2160                afu->context_reset = context_reset_sq;
2161        } else {
2162                afu->send_cmd = send_cmd_ioarrin;
2163                afu->context_reset = context_reset_ioarrin;
2164        }
2165
2166        dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2167                afu->version, afu->interface_version);
2168
2169        get_num_afu_ports(cfg);
2170
2171        rc = start_afu(cfg);
2172        if (rc) {
2173                dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2174                goto err1;
2175        }
2176
2177        afu_err_intr_init(cfg->afu);
2178        for (i = 0; i < afu->num_hwqs; i++) {
2179                hwq = get_hwq(afu, i);
2180
2181                hwq->room = readq_be(&hwq->host_map->cmd_room);
2182        }
2183
2184        /* Restore the LUN mappings */
2185        cxlflash_restore_luntable(cfg);
2186out:
2187        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2188        return rc;
2189
2190err1:
2191        for (i = afu->num_hwqs - 1; i >= 0; i--) {
2192                term_intr(cfg, UNMAP_THREE, i);
2193                term_mc(cfg, i);
2194        }
2195        goto out;
2196}
2197
2198/**
2199 * afu_reset() - resets the AFU
2200 * @cfg:        Internal structure associated with the host.
2201 *
2202 * Return: 0 on success, -errno on failure
2203 */
2204static int afu_reset(struct cxlflash_cfg *cfg)
2205{
2206        struct device *dev = &cfg->dev->dev;
2207        int rc = 0;
2208
2209        /* Stop the context before the reset. Since the context is
2210         * no longer available restart it after the reset is complete
2211         */
2212        term_afu(cfg);
2213
2214        rc = init_afu(cfg);
2215
2216        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2217        return rc;
2218}
2219
2220/**
2221 * drain_ioctls() - wait until all currently executing ioctls have completed
2222 * @cfg:        Internal structure associated with the host.
2223 *
2224 * Obtain write access to read/write semaphore that wraps ioctl
2225 * handling to 'drain' ioctls currently executing.
2226 */
2227static void drain_ioctls(struct cxlflash_cfg *cfg)
2228{
2229        down_write(&cfg->ioctl_rwsem);
2230        up_write(&cfg->ioctl_rwsem);
2231}
2232
2233/**
2234 * cxlflash_async_reset_host() - asynchronous host reset handler
2235 * @data:       Private data provided while scheduling reset.
2236 * @cookie:     Cookie that can be used for checkpointing.
2237 */
2238static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2239{
2240        struct cxlflash_cfg *cfg = data;
2241        struct device *dev = &cfg->dev->dev;
2242        int rc = 0;
2243
2244        if (cfg->state != STATE_RESET) {
2245                dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2246                        __func__, cfg->state);
2247                goto out;
2248        }
2249
2250        drain_ioctls(cfg);
2251        cxlflash_mark_contexts_error(cfg);
2252        rc = afu_reset(cfg);
2253        if (rc)
2254                cfg->state = STATE_FAILTERM;
2255        else
2256                cfg->state = STATE_NORMAL;
2257        wake_up_all(&cfg->reset_waitq);
2258
2259out:
2260        scsi_unblock_requests(cfg->host);
2261}
2262
2263/**
2264 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2265 * @cfg:        Internal structure associated with the host.
2266 */
2267static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2268{
2269        struct device *dev = &cfg->dev->dev;
2270
2271        if (cfg->state != STATE_NORMAL) {
2272                dev_dbg(dev, "%s: Not performing reset state=%d\n",
2273                        __func__, cfg->state);
2274                return;
2275        }
2276
2277        cfg->state = STATE_RESET;
2278        scsi_block_requests(cfg->host);
2279        cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2280                                                 cfg);
2281}
2282
2283/**
2284 * send_afu_cmd() - builds and sends an internal AFU command
2285 * @afu:        AFU associated with the host.
2286 * @rcb:        Pre-populated IOARCB describing command to send.
2287 *
2288 * The AFU can only take one internal AFU command at a time. This limitation is
2289 * enforced by using a mutex to provide exclusive access to the AFU during the
2290 * operation. This design point requires calling threads to not be on interrupt
2291 * context due to the possibility of sleeping during concurrent AFU operations.
2292 *
2293 * The command status is optionally passed back to the caller when the caller
2294 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2295 *
2296 * Return:
2297 *      0 on success, -errno on failure
2298 */
2299static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2300{
2301        struct cxlflash_cfg *cfg = afu->parent;
2302        struct device *dev = &cfg->dev->dev;
2303        struct afu_cmd *cmd = NULL;
2304        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2305        ulong lock_flags;
2306        char *buf = NULL;
2307        int rc = 0;
2308        int nretry = 0;
2309
2310        if (cfg->state != STATE_NORMAL) {
2311                dev_dbg(dev, "%s: Sync not required state=%u\n",
2312                        __func__, cfg->state);
2313                return 0;
2314        }
2315
2316        mutex_lock(&afu->sync_active);
2317        atomic_inc(&afu->cmds_active);
2318        buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2319        if (unlikely(!buf)) {
2320                dev_err(dev, "%s: no memory for command\n", __func__);
2321                rc = -ENOMEM;
2322                goto out;
2323        }
2324
2325        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2326
2327retry:
2328        memset(cmd, 0, sizeof(*cmd));
2329        memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2330        INIT_LIST_HEAD(&cmd->queue);
2331        init_completion(&cmd->cevent);
2332        cmd->parent = afu;
2333        cmd->hwq_index = hwq->index;
2334        cmd->rcb.ctx_id = hwq->ctx_hndl;
2335
2336        dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2337                __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2338
2339        rc = afu->send_cmd(afu, cmd);
2340        if (unlikely(rc)) {
2341                rc = -ENOBUFS;
2342                goto out;
2343        }
2344
2345        rc = wait_resp(afu, cmd);
2346        switch (rc) {
2347        case -ETIMEDOUT:
2348                rc = afu->context_reset(hwq);
2349                if (rc) {
2350                        /* Delete the command from pending_cmds list */
2351                        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2352                        list_del(&cmd->list);
2353                        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2354
2355                        cxlflash_schedule_async_reset(cfg);
2356                        break;
2357                }
2358                fallthrough;    /* to retry */
2359        case -EAGAIN:
2360                if (++nretry < 2)
2361                        goto retry;
2362                fallthrough;    /* to exit */
2363        default:
2364                break;
2365        }
2366
2367        if (rcb->ioasa)
2368                *rcb->ioasa = cmd->sa;
2369out:
2370        atomic_dec(&afu->cmds_active);
2371        mutex_unlock(&afu->sync_active);
2372        kfree(buf);
2373        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2374        return rc;
2375}
2376
2377/**
2378 * cxlflash_afu_sync() - builds and sends an AFU sync command
2379 * @afu:        AFU associated with the host.
2380 * @ctx:        Identifies context requesting sync.
2381 * @res:        Identifies resource requesting sync.
2382 * @mode:       Type of sync to issue (lightweight, heavyweight, global).
2383 *
2384 * AFU sync operations are only necessary and allowed when the device is
2385 * operating normally. When not operating normally, sync requests can occur as
2386 * part of cleaning up resources associated with an adapter prior to removal.
2387 * In this scenario, these requests are simply ignored (safe due to the AFU
2388 * going away).
2389 *
2390 * Return:
2391 *      0 on success, -errno on failure
2392 */
2393int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2394{
2395        struct cxlflash_cfg *cfg = afu->parent;
2396        struct device *dev = &cfg->dev->dev;
2397        struct sisl_ioarcb rcb = { 0 };
2398
2399        dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2400                __func__, afu, ctx, res, mode);
2401
2402        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2403        rcb.msi = SISL_MSI_RRQ_UPDATED;
2404        rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2405
2406        rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2407        rcb.cdb[1] = mode;
2408        put_unaligned_be16(ctx, &rcb.cdb[2]);
2409        put_unaligned_be32(res, &rcb.cdb[4]);
2410
2411        return send_afu_cmd(afu, &rcb);
2412}
2413
2414/**
2415 * cxlflash_eh_abort_handler() - abort a SCSI command
2416 * @scp:        SCSI command to abort.
2417 *
2418 * CXL Flash devices do not support a single command abort. Reset the context
2419 * as per SISLite specification. Flush any pending commands in the hardware
2420 * queue before the reset.
2421 *
2422 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2423 */
2424static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2425{
2426        int rc = FAILED;
2427        struct Scsi_Host *host = scp->device->host;
2428        struct cxlflash_cfg *cfg = shost_priv(host);
2429        struct afu_cmd *cmd = sc_to_afuc(scp);
2430        struct device *dev = &cfg->dev->dev;
2431        struct afu *afu = cfg->afu;
2432        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2433
2434        dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2435                "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2436                scp->device->channel, scp->device->id, scp->device->lun,
2437                get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2438                get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2439                get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2440                get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2441
2442        /* When the state is not normal, another reset/reload is in progress.
2443         * Return failed and the mid-layer will invoke host reset handler.
2444         */
2445        if (cfg->state != STATE_NORMAL) {
2446                dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2447                        __func__, cfg->state);
2448                goto out;
2449        }
2450
2451        rc = afu->context_reset(hwq);
2452        if (unlikely(rc))
2453                goto out;
2454
2455        rc = SUCCESS;
2456
2457out:
2458        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2459        return rc;
2460}
2461
2462/**
2463 * cxlflash_eh_device_reset_handler() - reset a single LUN
2464 * @scp:        SCSI command to send.
2465 *
2466 * Return:
2467 *      SUCCESS as defined in scsi/scsi.h
2468 *      FAILED as defined in scsi/scsi.h
2469 */
2470static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2471{
2472        int rc = SUCCESS;
2473        struct scsi_device *sdev = scp->device;
2474        struct Scsi_Host *host = sdev->host;
2475        struct cxlflash_cfg *cfg = shost_priv(host);
2476        struct device *dev = &cfg->dev->dev;
2477        int rcr = 0;
2478
2479        dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2480                host->host_no, sdev->channel, sdev->id, sdev->lun);
2481retry:
2482        switch (cfg->state) {
2483        case STATE_NORMAL:
2484                rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2485                if (unlikely(rcr))
2486                        rc = FAILED;
2487                break;
2488        case STATE_RESET:
2489                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2490                goto retry;
2491        default:
2492                rc = FAILED;
2493                break;
2494        }
2495
2496        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2497        return rc;
2498}
2499
2500/**
2501 * cxlflash_eh_host_reset_handler() - reset the host adapter
2502 * @scp:        SCSI command from stack identifying host.
2503 *
2504 * Following a reset, the state is evaluated again in case an EEH occurred
2505 * during the reset. In such a scenario, the host reset will either yield
2506 * until the EEH recovery is complete or return success or failure based
2507 * upon the current device state.
2508 *
2509 * Return:
2510 *      SUCCESS as defined in scsi/scsi.h
2511 *      FAILED as defined in scsi/scsi.h
2512 */
2513static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2514{
2515        int rc = SUCCESS;
2516        int rcr = 0;
2517        struct Scsi_Host *host = scp->device->host;
2518        struct cxlflash_cfg *cfg = shost_priv(host);
2519        struct device *dev = &cfg->dev->dev;
2520
2521        dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2522
2523        switch (cfg->state) {
2524        case STATE_NORMAL:
2525                cfg->state = STATE_RESET;
2526                drain_ioctls(cfg);
2527                cxlflash_mark_contexts_error(cfg);
2528                rcr = afu_reset(cfg);
2529                if (rcr) {
2530                        rc = FAILED;
2531                        cfg->state = STATE_FAILTERM;
2532                } else
2533                        cfg->state = STATE_NORMAL;
2534                wake_up_all(&cfg->reset_waitq);
2535                ssleep(1);
2536                fallthrough;
2537        case STATE_RESET:
2538                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2539                if (cfg->state == STATE_NORMAL)
2540                        break;
2541                fallthrough;
2542        default:
2543                rc = FAILED;
2544                break;
2545        }
2546
2547        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2548        return rc;
2549}
2550
2551/**
2552 * cxlflash_change_queue_depth() - change the queue depth for the device
2553 * @sdev:       SCSI device destined for queue depth change.
2554 * @qdepth:     Requested queue depth value to set.
2555 *
2556 * The requested queue depth is capped to the maximum supported value.
2557 *
2558 * Return: The actual queue depth set.
2559 */
2560static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2561{
2562
2563        if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2564                qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2565
2566        scsi_change_queue_depth(sdev, qdepth);
2567        return sdev->queue_depth;
2568}
2569
2570/**
2571 * cxlflash_show_port_status() - queries and presents the current port status
2572 * @port:       Desired port for status reporting.
2573 * @cfg:        Internal structure associated with the host.
2574 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2575 *
2576 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2577 */
2578static ssize_t cxlflash_show_port_status(u32 port,
2579                                         struct cxlflash_cfg *cfg,
2580                                         char *buf)
2581{
2582        struct device *dev = &cfg->dev->dev;
2583        char *disp_status;
2584        u64 status;
2585        __be64 __iomem *fc_port_regs;
2586
2587        WARN_ON(port >= MAX_FC_PORTS);
2588
2589        if (port >= cfg->num_fc_ports) {
2590                dev_info(dev, "%s: Port %d not supported on this card.\n",
2591                        __func__, port);
2592                return -EINVAL;
2593        }
2594
2595        fc_port_regs = get_fc_port_regs(cfg, port);
2596        status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2597        status &= FC_MTIP_STATUS_MASK;
2598
2599        if (status == FC_MTIP_STATUS_ONLINE)
2600                disp_status = "online";
2601        else if (status == FC_MTIP_STATUS_OFFLINE)
2602                disp_status = "offline";
2603        else
2604                disp_status = "unknown";
2605
2606        return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2607}
2608
2609/**
2610 * port0_show() - queries and presents the current status of port 0
2611 * @dev:        Generic device associated with the host owning the port.
2612 * @attr:       Device attribute representing the port.
2613 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2614 *
2615 * Return: The size of the ASCII string returned in @buf.
2616 */
2617static ssize_t port0_show(struct device *dev,
2618                          struct device_attribute *attr,
2619                          char *buf)
2620{
2621        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2622
2623        return cxlflash_show_port_status(0, cfg, buf);
2624}
2625
2626/**
2627 * port1_show() - queries and presents the current status of port 1
2628 * @dev:        Generic device associated with the host owning the port.
2629 * @attr:       Device attribute representing the port.
2630 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2631 *
2632 * Return: The size of the ASCII string returned in @buf.
2633 */
2634static ssize_t port1_show(struct device *dev,
2635                          struct device_attribute *attr,
2636                          char *buf)
2637{
2638        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2639
2640        return cxlflash_show_port_status(1, cfg, buf);
2641}
2642
2643/**
2644 * port2_show() - queries and presents the current status of port 2
2645 * @dev:        Generic device associated with the host owning the port.
2646 * @attr:       Device attribute representing the port.
2647 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2648 *
2649 * Return: The size of the ASCII string returned in @buf.
2650 */
2651static ssize_t port2_show(struct device *dev,
2652                          struct device_attribute *attr,
2653                          char *buf)
2654{
2655        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2656
2657        return cxlflash_show_port_status(2, cfg, buf);
2658}
2659
2660/**
2661 * port3_show() - queries and presents the current status of port 3
2662 * @dev:        Generic device associated with the host owning the port.
2663 * @attr:       Device attribute representing the port.
2664 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2665 *
2666 * Return: The size of the ASCII string returned in @buf.
2667 */
2668static ssize_t port3_show(struct device *dev,
2669                          struct device_attribute *attr,
2670                          char *buf)
2671{
2672        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2673
2674        return cxlflash_show_port_status(3, cfg, buf);
2675}
2676
2677/**
2678 * lun_mode_show() - presents the current LUN mode of the host
2679 * @dev:        Generic device associated with the host.
2680 * @attr:       Device attribute representing the LUN mode.
2681 * @buf:        Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2682 *
2683 * Return: The size of the ASCII string returned in @buf.
2684 */
2685static ssize_t lun_mode_show(struct device *dev,
2686                             struct device_attribute *attr, char *buf)
2687{
2688        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2689        struct afu *afu = cfg->afu;
2690
2691        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2692}
2693
2694/**
2695 * lun_mode_store() - sets the LUN mode of the host
2696 * @dev:        Generic device associated with the host.
2697 * @attr:       Device attribute representing the LUN mode.
2698 * @buf:        Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2699 * @count:      Length of data resizing in @buf.
2700 *
2701 * The CXL Flash AFU supports a dummy LUN mode where the external
2702 * links and storage are not required. Space on the FPGA is used
2703 * to create 1 or 2 small LUNs which are presented to the system
2704 * as if they were a normal storage device. This feature is useful
2705 * during development and also provides manufacturing with a way
2706 * to test the AFU without an actual device.
2707 *
2708 * 0 = external LUN[s] (default)
2709 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2710 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2711 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2712 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2713 *
2714 * Return: The size of the ASCII string returned in @buf.
2715 */
2716static ssize_t lun_mode_store(struct device *dev,
2717                              struct device_attribute *attr,
2718                              const char *buf, size_t count)
2719{
2720        struct Scsi_Host *shost = class_to_shost(dev);
2721        struct cxlflash_cfg *cfg = shost_priv(shost);
2722        struct afu *afu = cfg->afu;
2723        int rc;
2724        u32 lun_mode;
2725
2726        rc = kstrtouint(buf, 10, &lun_mode);
2727        if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2728                afu->internal_lun = lun_mode;
2729
2730                /*
2731                 * When configured for internal LUN, there is only one channel,
2732                 * channel number 0, else there will be one less than the number
2733                 * of fc ports for this card.
2734                 */
2735                if (afu->internal_lun)
2736                        shost->max_channel = 0;
2737                else
2738                        shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2739
2740                afu_reset(cfg);
2741                scsi_scan_host(cfg->host);
2742        }
2743
2744        return count;
2745}
2746
2747/**
2748 * ioctl_version_show() - presents the current ioctl version of the host
2749 * @dev:        Generic device associated with the host.
2750 * @attr:       Device attribute representing the ioctl version.
2751 * @buf:        Buffer of length PAGE_SIZE to report back the ioctl version.
2752 *
2753 * Return: The size of the ASCII string returned in @buf.
2754 */
2755static ssize_t ioctl_version_show(struct device *dev,
2756                                  struct device_attribute *attr, char *buf)
2757{
2758        ssize_t bytes = 0;
2759
2760        bytes = scnprintf(buf, PAGE_SIZE,
2761                          "disk: %u\n", DK_CXLFLASH_VERSION_0);
2762        bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2763                           "host: %u\n", HT_CXLFLASH_VERSION_0);
2764
2765        return bytes;
2766}
2767
2768/**
2769 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2770 * @port:       Desired port for status reporting.
2771 * @cfg:        Internal structure associated with the host.
2772 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2773 *
2774 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2775 */
2776static ssize_t cxlflash_show_port_lun_table(u32 port,
2777                                            struct cxlflash_cfg *cfg,
2778                                            char *buf)
2779{
2780        struct device *dev = &cfg->dev->dev;
2781        __be64 __iomem *fc_port_luns;
2782        int i;
2783        ssize_t bytes = 0;
2784
2785        WARN_ON(port >= MAX_FC_PORTS);
2786
2787        if (port >= cfg->num_fc_ports) {
2788                dev_info(dev, "%s: Port %d not supported on this card.\n",
2789                        __func__, port);
2790                return -EINVAL;
2791        }
2792
2793        fc_port_luns = get_fc_port_luns(cfg, port);
2794
2795        for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2796                bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2797                                   "%03d: %016llx\n",
2798                                   i, readq_be(&fc_port_luns[i]));
2799        return bytes;
2800}
2801
2802/**
2803 * port0_lun_table_show() - presents the current LUN table of port 0
2804 * @dev:        Generic device associated with the host owning the port.
2805 * @attr:       Device attribute representing the port.
2806 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2807 *
2808 * Return: The size of the ASCII string returned in @buf.
2809 */
2810static ssize_t port0_lun_table_show(struct device *dev,
2811                                    struct device_attribute *attr,
2812                                    char *buf)
2813{
2814        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2815
2816        return cxlflash_show_port_lun_table(0, cfg, buf);
2817}
2818
2819/**
2820 * port1_lun_table_show() - presents the current LUN table of port 1
2821 * @dev:        Generic device associated with the host owning the port.
2822 * @attr:       Device attribute representing the port.
2823 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2824 *
2825 * Return: The size of the ASCII string returned in @buf.
2826 */
2827static ssize_t port1_lun_table_show(struct device *dev,
2828                                    struct device_attribute *attr,
2829                                    char *buf)
2830{
2831        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2832
2833        return cxlflash_show_port_lun_table(1, cfg, buf);
2834}
2835
2836/**
2837 * port2_lun_table_show() - presents the current LUN table of port 2
2838 * @dev:        Generic device associated with the host owning the port.
2839 * @attr:       Device attribute representing the port.
2840 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2841 *
2842 * Return: The size of the ASCII string returned in @buf.
2843 */
2844static ssize_t port2_lun_table_show(struct device *dev,
2845                                    struct device_attribute *attr,
2846                                    char *buf)
2847{
2848        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2849
2850        return cxlflash_show_port_lun_table(2, cfg, buf);
2851}
2852
2853/**
2854 * port3_lun_table_show() - presents the current LUN table of port 3
2855 * @dev:        Generic device associated with the host owning the port.
2856 * @attr:       Device attribute representing the port.
2857 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2858 *
2859 * Return: The size of the ASCII string returned in @buf.
2860 */
2861static ssize_t port3_lun_table_show(struct device *dev,
2862                                    struct device_attribute *attr,
2863                                    char *buf)
2864{
2865        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2866
2867        return cxlflash_show_port_lun_table(3, cfg, buf);
2868}
2869
2870/**
2871 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2872 * @dev:        Generic device associated with the host.
2873 * @attr:       Device attribute representing the IRQ poll weight.
2874 * @buf:        Buffer of length PAGE_SIZE to report back the current IRQ poll
2875 *              weight in ASCII.
2876 *
2877 * An IRQ poll weight of 0 indicates polling is disabled.
2878 *
2879 * Return: The size of the ASCII string returned in @buf.
2880 */
2881static ssize_t irqpoll_weight_show(struct device *dev,
2882                                   struct device_attribute *attr, char *buf)
2883{
2884        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2885        struct afu *afu = cfg->afu;
2886
2887        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2888}
2889
2890/**
2891 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2892 * @dev:        Generic device associated with the host.
2893 * @attr:       Device attribute representing the IRQ poll weight.
2894 * @buf:        Buffer of length PAGE_SIZE containing the desired IRQ poll
2895 *              weight in ASCII.
2896 * @count:      Length of data resizing in @buf.
2897 *
2898 * An IRQ poll weight of 0 indicates polling is disabled.
2899 *
2900 * Return: The size of the ASCII string returned in @buf.
2901 */
2902static ssize_t irqpoll_weight_store(struct device *dev,
2903                                    struct device_attribute *attr,
2904                                    const char *buf, size_t count)
2905{
2906        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2907        struct device *cfgdev = &cfg->dev->dev;
2908        struct afu *afu = cfg->afu;
2909        struct hwq *hwq;
2910        u32 weight;
2911        int rc, i;
2912
2913        rc = kstrtouint(buf, 10, &weight);
2914        if (rc)
2915                return -EINVAL;
2916
2917        if (weight > 256) {
2918                dev_info(cfgdev,
2919                         "Invalid IRQ poll weight. It must be 256 or less.\n");
2920                return -EINVAL;
2921        }
2922
2923        if (weight == afu->irqpoll_weight) {
2924                dev_info(cfgdev,
2925                         "Current IRQ poll weight has the same weight.\n");
2926                return -EINVAL;
2927        }
2928
2929        if (afu_is_irqpoll_enabled(afu)) {
2930                for (i = 0; i < afu->num_hwqs; i++) {
2931                        hwq = get_hwq(afu, i);
2932
2933                        irq_poll_disable(&hwq->irqpoll);
2934                }
2935        }
2936
2937        afu->irqpoll_weight = weight;
2938
2939        if (weight > 0) {
2940                for (i = 0; i < afu->num_hwqs; i++) {
2941                        hwq = get_hwq(afu, i);
2942
2943                        irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2944                }
2945        }
2946
2947        return count;
2948}
2949
2950/**
2951 * num_hwqs_show() - presents the number of hardware queues for the host
2952 * @dev:        Generic device associated with the host.
2953 * @attr:       Device attribute representing the number of hardware queues.
2954 * @buf:        Buffer of length PAGE_SIZE to report back the number of hardware
2955 *              queues in ASCII.
2956 *
2957 * Return: The size of the ASCII string returned in @buf.
2958 */
2959static ssize_t num_hwqs_show(struct device *dev,
2960                             struct device_attribute *attr, char *buf)
2961{
2962        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2963        struct afu *afu = cfg->afu;
2964
2965        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2966}
2967
2968/**
2969 * num_hwqs_store() - sets the number of hardware queues for the host
2970 * @dev:        Generic device associated with the host.
2971 * @attr:       Device attribute representing the number of hardware queues.
2972 * @buf:        Buffer of length PAGE_SIZE containing the number of hardware
2973 *              queues in ASCII.
2974 * @count:      Length of data resizing in @buf.
2975 *
2976 * n > 0: num_hwqs = n
2977 * n = 0: num_hwqs = num_online_cpus()
2978 * n < 0: num_online_cpus() / abs(n)
2979 *
2980 * Return: The size of the ASCII string returned in @buf.
2981 */
2982static ssize_t num_hwqs_store(struct device *dev,
2983                              struct device_attribute *attr,
2984                              const char *buf, size_t count)
2985{
2986        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2987        struct afu *afu = cfg->afu;
2988        int rc;
2989        int nhwqs, num_hwqs;
2990
2991        rc = kstrtoint(buf, 10, &nhwqs);
2992        if (rc)
2993                return -EINVAL;
2994
2995        if (nhwqs >= 1)
2996                num_hwqs = nhwqs;
2997        else if (nhwqs == 0)
2998                num_hwqs = num_online_cpus();
2999        else
3000                num_hwqs = num_online_cpus() / abs(nhwqs);
3001
3002        afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3003        WARN_ON_ONCE(afu->desired_hwqs == 0);
3004
3005retry:
3006        switch (cfg->state) {
3007        case STATE_NORMAL:
3008                cfg->state = STATE_RESET;
3009                drain_ioctls(cfg);
3010                cxlflash_mark_contexts_error(cfg);
3011                rc = afu_reset(cfg);
3012                if (rc)
3013                        cfg->state = STATE_FAILTERM;
3014                else
3015                        cfg->state = STATE_NORMAL;
3016                wake_up_all(&cfg->reset_waitq);
3017                break;
3018        case STATE_RESET:
3019                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3020                if (cfg->state == STATE_NORMAL)
3021                        goto retry;
3022                fallthrough;
3023        default:
3024                /* Ideally should not happen */
3025                dev_err(dev, "%s: Device is not ready, state=%d\n",
3026                        __func__, cfg->state);
3027                break;
3028        }
3029
3030        return count;
3031}
3032
3033static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3034
3035/**
3036 * hwq_mode_show() - presents the HWQ steering mode for the host
3037 * @dev:        Generic device associated with the host.
3038 * @attr:       Device attribute representing the HWQ steering mode.
3039 * @buf:        Buffer of length PAGE_SIZE to report back the HWQ steering mode
3040 *              as a character string.
3041 *
3042 * Return: The size of the ASCII string returned in @buf.
3043 */
3044static ssize_t hwq_mode_show(struct device *dev,
3045                             struct device_attribute *attr, char *buf)
3046{
3047        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3048        struct afu *afu = cfg->afu;
3049
3050        return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3051}
3052
3053/**
3054 * hwq_mode_store() - sets the HWQ steering mode for the host
3055 * @dev:        Generic device associated with the host.
3056 * @attr:       Device attribute representing the HWQ steering mode.
3057 * @buf:        Buffer of length PAGE_SIZE containing the HWQ steering mode
3058 *              as a character string.
3059 * @count:      Length of data resizing in @buf.
3060 *
3061 * rr = Round-Robin
3062 * tag = Block MQ Tagging
3063 * cpu = CPU Affinity
3064 *
3065 * Return: The size of the ASCII string returned in @buf.
3066 */
3067static ssize_t hwq_mode_store(struct device *dev,
3068                              struct device_attribute *attr,
3069                              const char *buf, size_t count)
3070{
3071        struct Scsi_Host *shost = class_to_shost(dev);
3072        struct cxlflash_cfg *cfg = shost_priv(shost);
3073        struct device *cfgdev = &cfg->dev->dev;
3074        struct afu *afu = cfg->afu;
3075        int i;
3076        u32 mode = MAX_HWQ_MODE;
3077
3078        for (i = 0; i < MAX_HWQ_MODE; i++) {
3079                if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3080                        mode = i;
3081                        break;
3082                }
3083        }
3084
3085        if (mode >= MAX_HWQ_MODE) {
3086                dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3087                return -EINVAL;
3088        }
3089
3090        afu->hwq_mode = mode;
3091
3092        return count;
3093}
3094
3095/**
3096 * mode_show() - presents the current mode of the device
3097 * @dev:        Generic device associated with the device.
3098 * @attr:       Device attribute representing the device mode.
3099 * @buf:        Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3100 *
3101 * Return: The size of the ASCII string returned in @buf.
3102 */
3103static ssize_t mode_show(struct device *dev,
3104                         struct device_attribute *attr, char *buf)
3105{
3106        struct scsi_device *sdev = to_scsi_device(dev);
3107
3108        return scnprintf(buf, PAGE_SIZE, "%s\n",
3109                         sdev->hostdata ? "superpipe" : "legacy");
3110}
3111
3112/*
3113 * Host attributes
3114 */
3115static DEVICE_ATTR_RO(port0);
3116static DEVICE_ATTR_RO(port1);
3117static DEVICE_ATTR_RO(port2);
3118static DEVICE_ATTR_RO(port3);
3119static DEVICE_ATTR_RW(lun_mode);
3120static DEVICE_ATTR_RO(ioctl_version);
3121static DEVICE_ATTR_RO(port0_lun_table);
3122static DEVICE_ATTR_RO(port1_lun_table);
3123static DEVICE_ATTR_RO(port2_lun_table);
3124static DEVICE_ATTR_RO(port3_lun_table);
3125static DEVICE_ATTR_RW(irqpoll_weight);
3126static DEVICE_ATTR_RW(num_hwqs);
3127static DEVICE_ATTR_RW(hwq_mode);
3128
3129static struct device_attribute *cxlflash_host_attrs[] = {
3130        &dev_attr_port0,
3131        &dev_attr_port1,
3132        &dev_attr_port2,
3133        &dev_attr_port3,
3134        &dev_attr_lun_mode,
3135        &dev_attr_ioctl_version,
3136        &dev_attr_port0_lun_table,
3137        &dev_attr_port1_lun_table,
3138        &dev_attr_port2_lun_table,
3139        &dev_attr_port3_lun_table,
3140        &dev_attr_irqpoll_weight,
3141        &dev_attr_num_hwqs,
3142        &dev_attr_hwq_mode,
3143        NULL
3144};
3145
3146/*
3147 * Device attributes
3148 */
3149static DEVICE_ATTR_RO(mode);
3150
3151static struct device_attribute *cxlflash_dev_attrs[] = {
3152        &dev_attr_mode,
3153        NULL
3154};
3155
3156/*
3157 * Host template
3158 */
3159static struct scsi_host_template driver_template = {
3160        .module = THIS_MODULE,
3161        .name = CXLFLASH_ADAPTER_NAME,
3162        .info = cxlflash_driver_info,
3163        .ioctl = cxlflash_ioctl,
3164        .proc_name = CXLFLASH_NAME,
3165        .queuecommand = cxlflash_queuecommand,
3166        .eh_abort_handler = cxlflash_eh_abort_handler,
3167        .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3168        .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3169        .change_queue_depth = cxlflash_change_queue_depth,
3170        .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3171        .can_queue = CXLFLASH_MAX_CMDS,
3172        .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3173        .this_id = -1,
3174        .sg_tablesize = 1,      /* No scatter gather support */
3175        .max_sectors = CXLFLASH_MAX_SECTORS,
3176        .shost_attrs = cxlflash_host_attrs,
3177        .sdev_attrs = cxlflash_dev_attrs,
3178};
3179
3180/*
3181 * Device dependent values
3182 */
3183static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3184                                        CXLFLASH_WWPN_VPD_REQUIRED };
3185static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3186                                        CXLFLASH_NOTIFY_SHUTDOWN };
3187static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3188                                        (CXLFLASH_NOTIFY_SHUTDOWN |
3189                                        CXLFLASH_OCXL_DEV) };
3190
3191/*
3192 * PCI device binding table
3193 */
3194static struct pci_device_id cxlflash_pci_table[] = {
3195        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3196         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3197        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3198         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3199        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3200         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3201        {}
3202};
3203
3204MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3205
3206/**
3207 * cxlflash_worker_thread() - work thread handler for the AFU
3208 * @work:       Work structure contained within cxlflash associated with host.
3209 *
3210 * Handles the following events:
3211 * - Link reset which cannot be performed on interrupt context due to
3212 * blocking up to a few seconds
3213 * - Rescan the host
3214 */
3215static void cxlflash_worker_thread(struct work_struct *work)
3216{
3217        struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3218                                                work_q);
3219        struct afu *afu = cfg->afu;
3220        struct device *dev = &cfg->dev->dev;
3221        __be64 __iomem *fc_port_regs;
3222        int port;
3223        ulong lock_flags;
3224
3225        /* Avoid MMIO if the device has failed */
3226
3227        if (cfg->state != STATE_NORMAL)
3228                return;
3229
3230        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3231
3232        if (cfg->lr_state == LINK_RESET_REQUIRED) {
3233                port = cfg->lr_port;
3234                if (port < 0)
3235                        dev_err(dev, "%s: invalid port index %d\n",
3236                                __func__, port);
3237                else {
3238                        spin_unlock_irqrestore(cfg->host->host_lock,
3239                                               lock_flags);
3240
3241                        /* The reset can block... */
3242                        fc_port_regs = get_fc_port_regs(cfg, port);
3243                        afu_link_reset(afu, port, fc_port_regs);
3244                        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3245                }
3246
3247                cfg->lr_state = LINK_RESET_COMPLETE;
3248        }
3249
3250        spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3251
3252        if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3253                scsi_scan_host(cfg->host);
3254}
3255
3256/**
3257 * cxlflash_chr_open() - character device open handler
3258 * @inode:      Device inode associated with this character device.
3259 * @file:       File pointer for this device.
3260 *
3261 * Only users with admin privileges are allowed to open the character device.
3262 *
3263 * Return: 0 on success, -errno on failure
3264 */
3265static int cxlflash_chr_open(struct inode *inode, struct file *file)
3266{
3267        struct cxlflash_cfg *cfg;
3268
3269        if (!capable(CAP_SYS_ADMIN))
3270                return -EACCES;
3271
3272        cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3273        file->private_data = cfg;
3274
3275        return 0;
3276}
3277
3278/**
3279 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3280 * @cmd:        The host ioctl command to decode.
3281 *
3282 * Return: A string identifying the decoded host ioctl.
3283 */
3284static char *decode_hioctl(unsigned int cmd)
3285{
3286        switch (cmd) {
3287        case HT_CXLFLASH_LUN_PROVISION:
3288                return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3289        }
3290
3291        return "UNKNOWN";
3292}
3293
3294/**
3295 * cxlflash_lun_provision() - host LUN provisioning handler
3296 * @cfg:        Internal structure associated with the host.
3297 * @arg:        Kernel copy of userspace ioctl data structure.
3298 *
3299 * Return: 0 on success, -errno on failure
3300 */
3301static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3302                                  struct ht_cxlflash_lun_provision *lunprov)
3303{
3304        struct afu *afu = cfg->afu;
3305        struct device *dev = &cfg->dev->dev;
3306        struct sisl_ioarcb rcb;
3307        struct sisl_ioasa asa;
3308        __be64 __iomem *fc_port_regs;
3309        u16 port = lunprov->port;
3310        u16 scmd = lunprov->hdr.subcmd;
3311        u16 type;
3312        u64 reg;
3313        u64 size;
3314        u64 lun_id;
3315        int rc = 0;
3316
3317        if (!afu_is_lun_provision(afu)) {
3318                rc = -ENOTSUPP;
3319                goto out;
3320        }
3321
3322        if (port >= cfg->num_fc_ports) {
3323                rc = -EINVAL;
3324                goto out;
3325        }
3326
3327        switch (scmd) {
3328        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3329                type = SISL_AFU_LUN_PROVISION_CREATE;
3330                size = lunprov->size;
3331                lun_id = 0;
3332                break;
3333        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3334                type = SISL_AFU_LUN_PROVISION_DELETE;
3335                size = 0;
3336                lun_id = lunprov->lun_id;
3337                break;
3338        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3339                fc_port_regs = get_fc_port_regs(cfg, port);
3340
3341                reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3342                lunprov->max_num_luns = reg;
3343                reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3344                lunprov->cur_num_luns = reg;
3345                reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3346                lunprov->max_cap_port = reg;
3347                reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3348                lunprov->cur_cap_port = reg;
3349
3350                goto out;
3351        default:
3352                rc = -EINVAL;
3353                goto out;
3354        }
3355
3356        memset(&rcb, 0, sizeof(rcb));
3357        memset(&asa, 0, sizeof(asa));
3358        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3359        rcb.lun_id = lun_id;
3360        rcb.msi = SISL_MSI_RRQ_UPDATED;
3361        rcb.timeout = MC_LUN_PROV_TIMEOUT;
3362        rcb.ioasa = &asa;
3363
3364        rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3365        rcb.cdb[1] = type;
3366        rcb.cdb[2] = port;
3367        put_unaligned_be64(size, &rcb.cdb[8]);
3368
3369        rc = send_afu_cmd(afu, &rcb);
3370        if (rc) {
3371                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3372                        __func__, rc, asa.ioasc, asa.afu_extra);
3373                goto out;
3374        }
3375
3376        if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3377                lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3378                memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3379        }
3380out:
3381        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3382        return rc;
3383}
3384
3385/**
3386 * cxlflash_afu_debug() - host AFU debug handler
3387 * @cfg:        Internal structure associated with the host.
3388 * @arg:        Kernel copy of userspace ioctl data structure.
3389 *
3390 * For debug requests requiring a data buffer, always provide an aligned
3391 * (cache line) buffer to the AFU to appease any alignment requirements.
3392 *
3393 * Return: 0 on success, -errno on failure
3394 */
3395static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3396                              struct ht_cxlflash_afu_debug *afu_dbg)
3397{
3398        struct afu *afu = cfg->afu;
3399        struct device *dev = &cfg->dev->dev;
3400        struct sisl_ioarcb rcb;
3401        struct sisl_ioasa asa;
3402        char *buf = NULL;
3403        char *kbuf = NULL;
3404        void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3405        u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3406        u32 ulen = afu_dbg->data_len;
3407        bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3408        int rc = 0;
3409
3410        if (!afu_is_afu_debug(afu)) {
3411                rc = -ENOTSUPP;
3412                goto out;
3413        }
3414
3415        if (ulen) {
3416                req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3417
3418                if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3419                        rc = -EINVAL;
3420                        goto out;
3421                }
3422
3423                buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3424                if (unlikely(!buf)) {
3425                        rc = -ENOMEM;
3426                        goto out;
3427                }
3428
3429                kbuf = PTR_ALIGN(buf, cache_line_size());
3430
3431                if (is_write) {
3432                        req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3433
3434                        if (copy_from_user(kbuf, ubuf, ulen)) {
3435                                rc = -EFAULT;
3436                                goto out;
3437                        }
3438                }
3439        }
3440
3441        memset(&rcb, 0, sizeof(rcb));
3442        memset(&asa, 0, sizeof(asa));
3443
3444        rcb.req_flags = req_flags;
3445        rcb.msi = SISL_MSI_RRQ_UPDATED;
3446        rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3447        rcb.ioasa = &asa;
3448
3449        if (ulen) {
3450                rcb.data_len = ulen;
3451                rcb.data_ea = (uintptr_t)kbuf;
3452        }
3453
3454        rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3455        memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3456               HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3457
3458        rc = send_afu_cmd(afu, &rcb);
3459        if (rc) {
3460                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3461                        __func__, rc, asa.ioasc, asa.afu_extra);
3462                goto out;
3463        }
3464
3465        if (ulen && !is_write) {
3466                if (copy_to_user(ubuf, kbuf, ulen))
3467                        rc = -EFAULT;
3468        }
3469out:
3470        kfree(buf);
3471        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3472        return rc;
3473}
3474
3475/**
3476 * cxlflash_chr_ioctl() - character device IOCTL handler
3477 * @file:       File pointer for this device.
3478 * @cmd:        IOCTL command.
3479 * @arg:        Userspace ioctl data structure.
3480 *
3481 * A read/write semaphore is used to implement a 'drain' of currently
3482 * running ioctls. The read semaphore is taken at the beginning of each
3483 * ioctl thread and released upon concluding execution. Additionally the
3484 * semaphore should be released and then reacquired in any ioctl execution
3485 * path which will wait for an event to occur that is outside the scope of
3486 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3487 * a thread simply needs to acquire the write semaphore.
3488 *
3489 * Return: 0 on success, -errno on failure
3490 */
3491static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3492                               unsigned long arg)
3493{
3494        typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3495
3496        struct cxlflash_cfg *cfg = file->private_data;
3497        struct device *dev = &cfg->dev->dev;
3498        char buf[sizeof(union cxlflash_ht_ioctls)];
3499        void __user *uarg = (void __user *)arg;
3500        struct ht_cxlflash_hdr *hdr;
3501        size_t size = 0;
3502        bool known_ioctl = false;
3503        int idx = 0;
3504        int rc = 0;
3505        hioctl do_ioctl = NULL;
3506
3507        static const struct {
3508                size_t size;
3509                hioctl ioctl;
3510        } ioctl_tbl[] = {       /* NOTE: order matters here */
3511        { sizeof(struct ht_cxlflash_lun_provision),
3512                (hioctl)cxlflash_lun_provision },
3513        { sizeof(struct ht_cxlflash_afu_debug),
3514                (hioctl)cxlflash_afu_debug },
3515        };
3516
3517        /* Hold read semaphore so we can drain if needed */
3518        down_read(&cfg->ioctl_rwsem);
3519
3520        dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3521                __func__, cmd, idx, sizeof(ioctl_tbl));
3522
3523        switch (cmd) {
3524        case HT_CXLFLASH_LUN_PROVISION:
3525        case HT_CXLFLASH_AFU_DEBUG:
3526                known_ioctl = true;
3527                idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3528                size = ioctl_tbl[idx].size;
3529                do_ioctl = ioctl_tbl[idx].ioctl;
3530
3531                if (likely(do_ioctl))
3532                        break;
3533
3534                fallthrough;
3535        default:
3536                rc = -EINVAL;
3537                goto out;
3538        }
3539
3540        if (unlikely(copy_from_user(&buf, uarg, size))) {
3541                dev_err(dev, "%s: copy_from_user() fail "
3542                        "size=%lu cmd=%d (%s) uarg=%p\n",
3543                        __func__, size, cmd, decode_hioctl(cmd), uarg);
3544                rc = -EFAULT;
3545                goto out;
3546        }
3547
3548        hdr = (struct ht_cxlflash_hdr *)&buf;
3549        if (hdr->version != HT_CXLFLASH_VERSION_0) {
3550                dev_dbg(dev, "%s: Version %u not supported for %s\n",
3551                        __func__, hdr->version, decode_hioctl(cmd));
3552                rc = -EINVAL;
3553                goto out;
3554        }
3555
3556        if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3557                dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3558                rc = -EINVAL;
3559                goto out;
3560        }
3561
3562        rc = do_ioctl(cfg, (void *)&buf);
3563        if (likely(!rc))
3564                if (unlikely(copy_to_user(uarg, &buf, size))) {
3565                        dev_err(dev, "%s: copy_to_user() fail "
3566                                "size=%lu cmd=%d (%s) uarg=%p\n",
3567                                __func__, size, cmd, decode_hioctl(cmd), uarg);
3568                        rc = -EFAULT;
3569                }
3570
3571        /* fall through to exit */
3572
3573out:
3574        up_read(&cfg->ioctl_rwsem);
3575        if (unlikely(rc && known_ioctl))
3576                dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3577                        __func__, decode_hioctl(cmd), cmd, rc);
3578        else
3579                dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3580                        __func__, decode_hioctl(cmd), cmd, rc);
3581        return rc;
3582}
3583
3584/*
3585 * Character device file operations
3586 */
3587static const struct file_operations cxlflash_chr_fops = {
3588        .owner          = THIS_MODULE,
3589        .open           = cxlflash_chr_open,
3590        .unlocked_ioctl = cxlflash_chr_ioctl,
3591        .compat_ioctl   = compat_ptr_ioctl,
3592};
3593
3594/**
3595 * init_chrdev() - initialize the character device for the host
3596 * @cfg:        Internal structure associated with the host.
3597 *
3598 * Return: 0 on success, -errno on failure
3599 */
3600static int init_chrdev(struct cxlflash_cfg *cfg)
3601{
3602        struct device *dev = &cfg->dev->dev;
3603        struct device *char_dev;
3604        dev_t devno;
3605        int minor;
3606        int rc = 0;
3607
3608        minor = cxlflash_get_minor();
3609        if (unlikely(minor < 0)) {
3610                dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3611                rc = -ENOSPC;
3612                goto out;
3613        }
3614
3615        devno = MKDEV(cxlflash_major, minor);
3616        cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3617
3618        rc = cdev_add(&cfg->cdev, devno, 1);
3619        if (rc) {
3620                dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3621                goto err1;
3622        }
3623
3624        char_dev = device_create(cxlflash_class, NULL, devno,
3625                                 NULL, "cxlflash%d", minor);
3626        if (IS_ERR(char_dev)) {
3627                rc = PTR_ERR(char_dev);
3628                dev_err(dev, "%s: device_create failed rc=%d\n",
3629                        __func__, rc);
3630                goto err2;
3631        }
3632
3633        cfg->chardev = char_dev;
3634out:
3635        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3636        return rc;
3637err2:
3638        cdev_del(&cfg->cdev);
3639err1:
3640        cxlflash_put_minor(minor);
3641        goto out;
3642}
3643
3644/**
3645 * cxlflash_probe() - PCI entry point to add host
3646 * @pdev:       PCI device associated with the host.
3647 * @dev_id:     PCI device id associated with device.
3648 *
3649 * The device will initially start out in a 'probing' state and
3650 * transition to the 'normal' state at the end of a successful
3651 * probe. Should an EEH event occur during probe, the notification
3652 * thread (error_detected()) will wait until the probe handler
3653 * is nearly complete. At that time, the device will be moved to
3654 * a 'probed' state and the EEH thread woken up to drive the slot
3655 * reset and recovery (device moves to 'normal' state). Meanwhile,
3656 * the probe will be allowed to exit successfully.
3657 *
3658 * Return: 0 on success, -errno on failure
3659 */
3660static int cxlflash_probe(struct pci_dev *pdev,
3661                          const struct pci_device_id *dev_id)
3662{
3663        struct Scsi_Host *host;
3664        struct cxlflash_cfg *cfg = NULL;
3665        struct device *dev = &pdev->dev;
3666        struct dev_dependent_vals *ddv;
3667        int rc = 0;
3668        int k;
3669
3670        dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3671                __func__, pdev->irq);
3672
3673        ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3674        driver_template.max_sectors = ddv->max_sectors;
3675
3676        host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3677        if (!host) {
3678                dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3679                rc = -ENOMEM;
3680                goto out;
3681        }
3682
3683        host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3684        host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3685        host->unique_id = host->host_no;
3686        host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3687
3688        cfg = shost_priv(host);
3689        cfg->state = STATE_PROBING;
3690        cfg->host = host;
3691        rc = alloc_mem(cfg);
3692        if (rc) {
3693                dev_err(dev, "%s: alloc_mem failed\n", __func__);
3694                rc = -ENOMEM;
3695                scsi_host_put(cfg->host);
3696                goto out;
3697        }
3698
3699        cfg->init_state = INIT_STATE_NONE;
3700        cfg->dev = pdev;
3701        cfg->cxl_fops = cxlflash_cxl_fops;
3702        cfg->ops = cxlflash_assign_ops(ddv);
3703        WARN_ON_ONCE(!cfg->ops);
3704
3705        /*
3706         * Promoted LUNs move to the top of the LUN table. The rest stay on
3707         * the bottom half. The bottom half grows from the end (index = 255),
3708         * whereas the top half grows from the beginning (index = 0).
3709         *
3710         * Initialize the last LUN index for all possible ports.
3711         */
3712        cfg->promote_lun_index = 0;
3713
3714        for (k = 0; k < MAX_FC_PORTS; k++)
3715                cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3716
3717        cfg->dev_id = (struct pci_device_id *)dev_id;
3718
3719        init_waitqueue_head(&cfg->tmf_waitq);
3720        init_waitqueue_head(&cfg->reset_waitq);
3721
3722        INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3723        cfg->lr_state = LINK_RESET_INVALID;
3724        cfg->lr_port = -1;
3725        spin_lock_init(&cfg->tmf_slock);
3726        mutex_init(&cfg->ctx_tbl_list_mutex);
3727        mutex_init(&cfg->ctx_recovery_mutex);
3728        init_rwsem(&cfg->ioctl_rwsem);
3729        INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3730        INIT_LIST_HEAD(&cfg->lluns);
3731
3732        pci_set_drvdata(pdev, cfg);
3733
3734        rc = init_pci(cfg);
3735        if (rc) {
3736                dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3737                goto out_remove;
3738        }
3739        cfg->init_state = INIT_STATE_PCI;
3740
3741        cfg->afu_cookie = cfg->ops->create_afu(pdev);
3742        if (unlikely(!cfg->afu_cookie)) {
3743                dev_err(dev, "%s: create_afu failed\n", __func__);
3744                rc = -ENOMEM;
3745                goto out_remove;
3746        }
3747
3748        rc = init_afu(cfg);
3749        if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3750                dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3751                goto out_remove;
3752        }
3753        cfg->init_state = INIT_STATE_AFU;
3754
3755        rc = init_scsi(cfg);
3756        if (rc) {
3757                dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3758                goto out_remove;
3759        }
3760        cfg->init_state = INIT_STATE_SCSI;
3761
3762        rc = init_chrdev(cfg);
3763        if (rc) {
3764                dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3765                goto out_remove;
3766        }
3767        cfg->init_state = INIT_STATE_CDEV;
3768
3769        if (wq_has_sleeper(&cfg->reset_waitq)) {
3770                cfg->state = STATE_PROBED;
3771                wake_up_all(&cfg->reset_waitq);
3772        } else
3773                cfg->state = STATE_NORMAL;
3774out:
3775        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3776        return rc;
3777
3778out_remove:
3779        cfg->state = STATE_PROBED;
3780        cxlflash_remove(pdev);
3781        goto out;
3782}
3783
3784/**
3785 * cxlflash_pci_error_detected() - called when a PCI error is detected
3786 * @pdev:       PCI device struct.
3787 * @state:      PCI channel state.
3788 *
3789 * When an EEH occurs during an active reset, wait until the reset is
3790 * complete and then take action based upon the device state.
3791 *
3792 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3793 */
3794static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3795                                                    pci_channel_state_t state)
3796{
3797        int rc = 0;
3798        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3799        struct device *dev = &cfg->dev->dev;
3800
3801        dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3802
3803        switch (state) {
3804        case pci_channel_io_frozen:
3805                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3806                                             cfg->state != STATE_PROBING);
3807                if (cfg->state == STATE_FAILTERM)
3808                        return PCI_ERS_RESULT_DISCONNECT;
3809
3810                cfg->state = STATE_RESET;
3811                scsi_block_requests(cfg->host);
3812                drain_ioctls(cfg);
3813                rc = cxlflash_mark_contexts_error(cfg);
3814                if (unlikely(rc))
3815                        dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3816                                __func__, rc);
3817                term_afu(cfg);
3818                return PCI_ERS_RESULT_NEED_RESET;
3819        case pci_channel_io_perm_failure:
3820                cfg->state = STATE_FAILTERM;
3821                wake_up_all(&cfg->reset_waitq);
3822                scsi_unblock_requests(cfg->host);
3823                return PCI_ERS_RESULT_DISCONNECT;
3824        default:
3825                break;
3826        }
3827        return PCI_ERS_RESULT_NEED_RESET;
3828}
3829
3830/**
3831 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3832 * @pdev:       PCI device struct.
3833 *
3834 * This routine is called by the pci error recovery code after the PCI
3835 * slot has been reset, just before we should resume normal operations.
3836 *
3837 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3838 */
3839static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3840{
3841        int rc = 0;
3842        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3843        struct device *dev = &cfg->dev->dev;
3844
3845        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3846
3847        rc = init_afu(cfg);
3848        if (unlikely(rc)) {
3849                dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3850                return PCI_ERS_RESULT_DISCONNECT;
3851        }
3852
3853        return PCI_ERS_RESULT_RECOVERED;
3854}
3855
3856/**
3857 * cxlflash_pci_resume() - called when normal operation can resume
3858 * @pdev:       PCI device struct
3859 */
3860static void cxlflash_pci_resume(struct pci_dev *pdev)
3861{
3862        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3863        struct device *dev = &cfg->dev->dev;
3864
3865        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3866
3867        cfg->state = STATE_NORMAL;
3868        wake_up_all(&cfg->reset_waitq);
3869        scsi_unblock_requests(cfg->host);
3870}
3871
3872/**
3873 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3874 * @dev:        Character device.
3875 * @mode:       Mode that can be used to verify access.
3876 *
3877 * Return: Allocated string describing the devtmpfs structure.
3878 */
3879static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3880{
3881        return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3882}
3883
3884/**
3885 * cxlflash_class_init() - create character device class
3886 *
3887 * Return: 0 on success, -errno on failure
3888 */
3889static int cxlflash_class_init(void)
3890{
3891        dev_t devno;
3892        int rc = 0;
3893
3894        rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3895        if (unlikely(rc)) {
3896                pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3897                goto out;
3898        }
3899
3900        cxlflash_major = MAJOR(devno);
3901
3902        cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3903        if (IS_ERR(cxlflash_class)) {
3904                rc = PTR_ERR(cxlflash_class);
3905                pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3906                goto err;
3907        }
3908
3909        cxlflash_class->devnode = cxlflash_devnode;
3910out:
3911        pr_debug("%s: returning rc=%d\n", __func__, rc);
3912        return rc;
3913err:
3914        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3915        goto out;
3916}
3917
3918/**
3919 * cxlflash_class_exit() - destroy character device class
3920 */
3921static void cxlflash_class_exit(void)
3922{
3923        dev_t devno = MKDEV(cxlflash_major, 0);
3924
3925        class_destroy(cxlflash_class);
3926        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3927}
3928
3929static const struct pci_error_handlers cxlflash_err_handler = {
3930        .error_detected = cxlflash_pci_error_detected,
3931        .slot_reset = cxlflash_pci_slot_reset,
3932        .resume = cxlflash_pci_resume,
3933};
3934
3935/*
3936 * PCI device structure
3937 */
3938static struct pci_driver cxlflash_driver = {
3939        .name = CXLFLASH_NAME,
3940        .id_table = cxlflash_pci_table,
3941        .probe = cxlflash_probe,
3942        .remove = cxlflash_remove,
3943        .shutdown = cxlflash_remove,
3944        .err_handler = &cxlflash_err_handler,
3945};
3946
3947/**
3948 * init_cxlflash() - module entry point
3949 *
3950 * Return: 0 on success, -errno on failure
3951 */
3952static int __init init_cxlflash(void)
3953{
3954        int rc;
3955
3956        check_sizes();
3957        cxlflash_list_init();
3958        rc = cxlflash_class_init();
3959        if (unlikely(rc))
3960                goto out;
3961
3962        rc = pci_register_driver(&cxlflash_driver);
3963        if (unlikely(rc))
3964                goto err;
3965out:
3966        pr_debug("%s: returning rc=%d\n", __func__, rc);
3967        return rc;
3968err:
3969        cxlflash_class_exit();
3970        goto out;
3971}
3972
3973/**
3974 * exit_cxlflash() - module exit point
3975 */
3976static void __exit exit_cxlflash(void)
3977{
3978        cxlflash_term_global_luns();
3979        cxlflash_free_errpage();
3980
3981        pci_unregister_driver(&cxlflash_driver);
3982        cxlflash_class_exit();
3983}
3984
3985module_init(init_cxlflash);
3986module_exit(exit_cxlflash);
3987