linux/drivers/scsi/cxlflash/main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * CXL Flash Device Driver
   4 *
   5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
   6 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
   7 *
   8 * Copyright (C) 2015 IBM Corporation
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/list.h>
  13#include <linux/module.h>
  14#include <linux/pci.h>
  15
  16#include <asm/unaligned.h>
  17
  18#include <scsi/scsi_cmnd.h>
  19#include <scsi/scsi_host.h>
  20#include <uapi/scsi/cxlflash_ioctl.h>
  21
  22#include "main.h"
  23#include "sislite.h"
  24#include "common.h"
  25
  26MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
  27MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
  28MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
  29MODULE_LICENSE("GPL");
  30
  31static struct class *cxlflash_class;
  32static u32 cxlflash_major;
  33static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
  34
  35/**
  36 * process_cmd_err() - command error handler
  37 * @cmd:        AFU command that experienced the error.
  38 * @scp:        SCSI command associated with the AFU command in error.
  39 *
  40 * Translates error bits from AFU command to SCSI command results.
  41 */
  42static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  43{
  44        struct afu *afu = cmd->parent;
  45        struct cxlflash_cfg *cfg = afu->parent;
  46        struct device *dev = &cfg->dev->dev;
  47        struct sisl_ioarcb *ioarcb;
  48        struct sisl_ioasa *ioasa;
  49        u32 resid;
  50
  51        if (unlikely(!cmd))
  52                return;
  53
  54        ioarcb = &(cmd->rcb);
  55        ioasa = &(cmd->sa);
  56
  57        if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
  58                resid = ioasa->resid;
  59                scsi_set_resid(scp, resid);
  60                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
  61                        __func__, cmd, scp, resid);
  62        }
  63
  64        if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
  65                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
  66                        __func__, cmd, scp);
  67                scp->result = (DID_ERROR << 16);
  68        }
  69
  70        dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
  71                "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
  72                ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
  73                ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
  74
  75        if (ioasa->rc.scsi_rc) {
  76                /* We have a SCSI status */
  77                if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
  78                        memcpy(scp->sense_buffer, ioasa->sense_data,
  79                               SISL_SENSE_DATA_LEN);
  80                        scp->result = ioasa->rc.scsi_rc;
  81                } else
  82                        scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
  83        }
  84
  85        /*
  86         * We encountered an error. Set scp->result based on nature
  87         * of error.
  88         */
  89        if (ioasa->rc.fc_rc) {
  90                /* We have an FC status */
  91                switch (ioasa->rc.fc_rc) {
  92                case SISL_FC_RC_LINKDOWN:
  93                        scp->result = (DID_REQUEUE << 16);
  94                        break;
  95                case SISL_FC_RC_RESID:
  96                        /* This indicates an FCP resid underrun */
  97                        if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
  98                                /* If the SISL_RC_FLAGS_OVERRUN flag was set,
  99                                 * then we will handle this error else where.
 100                                 * If not then we must handle it here.
 101                                 * This is probably an AFU bug.
 102                                 */
 103                                scp->result = (DID_ERROR << 16);
 104                        }
 105                        break;
 106                case SISL_FC_RC_RESIDERR:
 107                        /* Resid mismatch between adapter and device */
 108                case SISL_FC_RC_TGTABORT:
 109                case SISL_FC_RC_ABORTOK:
 110                case SISL_FC_RC_ABORTFAIL:
 111                case SISL_FC_RC_NOLOGI:
 112                case SISL_FC_RC_ABORTPEND:
 113                case SISL_FC_RC_WRABORTPEND:
 114                case SISL_FC_RC_NOEXP:
 115                case SISL_FC_RC_INUSE:
 116                        scp->result = (DID_ERROR << 16);
 117                        break;
 118                }
 119        }
 120
 121        if (ioasa->rc.afu_rc) {
 122                /* We have an AFU error */
 123                switch (ioasa->rc.afu_rc) {
 124                case SISL_AFU_RC_NO_CHANNELS:
 125                        scp->result = (DID_NO_CONNECT << 16);
 126                        break;
 127                case SISL_AFU_RC_DATA_DMA_ERR:
 128                        switch (ioasa->afu_extra) {
 129                        case SISL_AFU_DMA_ERR_PAGE_IN:
 130                                /* Retry */
 131                                scp->result = (DID_IMM_RETRY << 16);
 132                                break;
 133                        case SISL_AFU_DMA_ERR_INVALID_EA:
 134                        default:
 135                                scp->result = (DID_ERROR << 16);
 136                        }
 137                        break;
 138                case SISL_AFU_RC_OUT_OF_DATA_BUFS:
 139                        /* Retry */
 140                        scp->result = (DID_ALLOC_FAILURE << 16);
 141                        break;
 142                default:
 143                        scp->result = (DID_ERROR << 16);
 144                }
 145        }
 146}
 147
 148/**
 149 * cmd_complete() - command completion handler
 150 * @cmd:        AFU command that has completed.
 151 *
 152 * For SCSI commands this routine prepares and submits commands that have
 153 * either completed or timed out to the SCSI stack. For internal commands
 154 * (TMF or AFU), this routine simply notifies the originator that the
 155 * command has completed.
 156 */
 157static void cmd_complete(struct afu_cmd *cmd)
 158{
 159        struct scsi_cmnd *scp;
 160        ulong lock_flags;
 161        struct afu *afu = cmd->parent;
 162        struct cxlflash_cfg *cfg = afu->parent;
 163        struct device *dev = &cfg->dev->dev;
 164        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 165
 166        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 167        list_del(&cmd->list);
 168        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 169
 170        if (cmd->scp) {
 171                scp = cmd->scp;
 172                if (unlikely(cmd->sa.ioasc))
 173                        process_cmd_err(cmd, scp);
 174                else
 175                        scp->result = (DID_OK << 16);
 176
 177                dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
 178                                    __func__, scp, scp->result, cmd->sa.ioasc);
 179                scp->scsi_done(scp);
 180        } else if (cmd->cmd_tmf) {
 181                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 182                cfg->tmf_active = false;
 183                wake_up_all_locked(&cfg->tmf_waitq);
 184                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 185        } else
 186                complete(&cmd->cevent);
 187}
 188
 189/**
 190 * flush_pending_cmds() - flush all pending commands on this hardware queue
 191 * @hwq:        Hardware queue to flush.
 192 *
 193 * The hardware send queue lock associated with this hardware queue must be
 194 * held when calling this routine.
 195 */
 196static void flush_pending_cmds(struct hwq *hwq)
 197{
 198        struct cxlflash_cfg *cfg = hwq->afu->parent;
 199        struct afu_cmd *cmd, *tmp;
 200        struct scsi_cmnd *scp;
 201        ulong lock_flags;
 202
 203        list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
 204                /* Bypass command when on a doneq, cmd_complete() will handle */
 205                if (!list_empty(&cmd->queue))
 206                        continue;
 207
 208                list_del(&cmd->list);
 209
 210                if (cmd->scp) {
 211                        scp = cmd->scp;
 212                        scp->result = (DID_IMM_RETRY << 16);
 213                        scp->scsi_done(scp);
 214                } else {
 215                        cmd->cmd_aborted = true;
 216
 217                        if (cmd->cmd_tmf) {
 218                                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 219                                cfg->tmf_active = false;
 220                                wake_up_all_locked(&cfg->tmf_waitq);
 221                                spin_unlock_irqrestore(&cfg->tmf_slock,
 222                                                       lock_flags);
 223                        } else
 224                                complete(&cmd->cevent);
 225                }
 226        }
 227}
 228
 229/**
 230 * context_reset() - reset context via specified register
 231 * @hwq:        Hardware queue owning the context to be reset.
 232 * @reset_reg:  MMIO register to perform reset.
 233 *
 234 * When the reset is successful, the SISLite specification guarantees that
 235 * the AFU has aborted all currently pending I/O. Accordingly, these commands
 236 * must be flushed.
 237 *
 238 * Return: 0 on success, -errno on failure
 239 */
 240static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
 241{
 242        struct cxlflash_cfg *cfg = hwq->afu->parent;
 243        struct device *dev = &cfg->dev->dev;
 244        int rc = -ETIMEDOUT;
 245        int nretry = 0;
 246        u64 val = 0x1;
 247        ulong lock_flags;
 248
 249        dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
 250
 251        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 252
 253        writeq_be(val, reset_reg);
 254        do {
 255                val = readq_be(reset_reg);
 256                if ((val & 0x1) == 0x0) {
 257                        rc = 0;
 258                        break;
 259                }
 260
 261                /* Double delay each time */
 262                udelay(1 << nretry);
 263        } while (nretry++ < MC_ROOM_RETRY_CNT);
 264
 265        if (!rc)
 266                flush_pending_cmds(hwq);
 267
 268        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 269
 270        dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
 271                __func__, rc, val, nretry);
 272        return rc;
 273}
 274
 275/**
 276 * context_reset_ioarrin() - reset context via IOARRIN register
 277 * @hwq:        Hardware queue owning the context to be reset.
 278 *
 279 * Return: 0 on success, -errno on failure
 280 */
 281static int context_reset_ioarrin(struct hwq *hwq)
 282{
 283        return context_reset(hwq, &hwq->host_map->ioarrin);
 284}
 285
 286/**
 287 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
 288 * @hwq:        Hardware queue owning the context to be reset.
 289 *
 290 * Return: 0 on success, -errno on failure
 291 */
 292static int context_reset_sq(struct hwq *hwq)
 293{
 294        return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
 295}
 296
 297/**
 298 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
 299 * @afu:        AFU associated with the host.
 300 * @cmd:        AFU command to send.
 301 *
 302 * Return:
 303 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 304 */
 305static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
 306{
 307        struct cxlflash_cfg *cfg = afu->parent;
 308        struct device *dev = &cfg->dev->dev;
 309        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 310        int rc = 0;
 311        s64 room;
 312        ulong lock_flags;
 313
 314        /*
 315         * To avoid the performance penalty of MMIO, spread the update of
 316         * 'room' over multiple commands.
 317         */
 318        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 319        if (--hwq->room < 0) {
 320                room = readq_be(&hwq->host_map->cmd_room);
 321                if (room <= 0) {
 322                        dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
 323                                            "0x%02X, room=0x%016llX\n",
 324                                            __func__, cmd->rcb.cdb[0], room);
 325                        hwq->room = 0;
 326                        rc = SCSI_MLQUEUE_HOST_BUSY;
 327                        goto out;
 328                }
 329                hwq->room = room - 1;
 330        }
 331
 332        list_add(&cmd->list, &hwq->pending_cmds);
 333        writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
 334out:
 335        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 336        dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
 337                __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
 338        return rc;
 339}
 340
 341/**
 342 * send_cmd_sq() - sends an AFU command via SQ ring
 343 * @afu:        AFU associated with the host.
 344 * @cmd:        AFU command to send.
 345 *
 346 * Return:
 347 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 348 */
 349static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
 350{
 351        struct cxlflash_cfg *cfg = afu->parent;
 352        struct device *dev = &cfg->dev->dev;
 353        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 354        int rc = 0;
 355        int newval;
 356        ulong lock_flags;
 357
 358        newval = atomic_dec_if_positive(&hwq->hsq_credits);
 359        if (newval <= 0) {
 360                rc = SCSI_MLQUEUE_HOST_BUSY;
 361                goto out;
 362        }
 363
 364        cmd->rcb.ioasa = &cmd->sa;
 365
 366        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 367
 368        *hwq->hsq_curr = cmd->rcb;
 369        if (hwq->hsq_curr < hwq->hsq_end)
 370                hwq->hsq_curr++;
 371        else
 372                hwq->hsq_curr = hwq->hsq_start;
 373
 374        list_add(&cmd->list, &hwq->pending_cmds);
 375        writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
 376
 377        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 378out:
 379        dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
 380               "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
 381               cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
 382               readq_be(&hwq->host_map->sq_head),
 383               readq_be(&hwq->host_map->sq_tail));
 384        return rc;
 385}
 386
 387/**
 388 * wait_resp() - polls for a response or timeout to a sent AFU command
 389 * @afu:        AFU associated with the host.
 390 * @cmd:        AFU command that was sent.
 391 *
 392 * Return: 0 on success, -errno on failure
 393 */
 394static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 395{
 396        struct cxlflash_cfg *cfg = afu->parent;
 397        struct device *dev = &cfg->dev->dev;
 398        int rc = 0;
 399        ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
 400
 401        timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
 402        if (!timeout)
 403                rc = -ETIMEDOUT;
 404
 405        if (cmd->cmd_aborted)
 406                rc = -EAGAIN;
 407
 408        if (unlikely(cmd->sa.ioasc != 0)) {
 409                dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
 410                        __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
 411                rc = -EIO;
 412        }
 413
 414        return rc;
 415}
 416
 417/**
 418 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
 419 * @host:       SCSI host associated with device.
 420 * @scp:        SCSI command to send.
 421 * @afu:        SCSI command to send.
 422 *
 423 * Hashes a command based upon the hardware queue mode.
 424 *
 425 * Return: Trusted index of target hardware queue
 426 */
 427static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
 428                             struct afu *afu)
 429{
 430        u32 tag;
 431        u32 hwq = 0;
 432
 433        if (afu->num_hwqs == 1)
 434                return 0;
 435
 436        switch (afu->hwq_mode) {
 437        case HWQ_MODE_RR:
 438                hwq = afu->hwq_rr_count++ % afu->num_hwqs;
 439                break;
 440        case HWQ_MODE_TAG:
 441                tag = blk_mq_unique_tag(scp->request);
 442                hwq = blk_mq_unique_tag_to_hwq(tag);
 443                break;
 444        case HWQ_MODE_CPU:
 445                hwq = smp_processor_id() % afu->num_hwqs;
 446                break;
 447        default:
 448                WARN_ON_ONCE(1);
 449        }
 450
 451        return hwq;
 452}
 453
 454/**
 455 * send_tmf() - sends a Task Management Function (TMF)
 456 * @cfg:        Internal structure associated with the host.
 457 * @sdev:       SCSI device destined for TMF.
 458 * @tmfcmd:     TMF command to send.
 459 *
 460 * Return:
 461 *      0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
 462 */
 463static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 464                    u64 tmfcmd)
 465{
 466        struct afu *afu = cfg->afu;
 467        struct afu_cmd *cmd = NULL;
 468        struct device *dev = &cfg->dev->dev;
 469        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
 470        bool needs_deletion = false;
 471        char *buf = NULL;
 472        ulong lock_flags;
 473        int rc = 0;
 474        ulong to;
 475
 476        buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
 477        if (unlikely(!buf)) {
 478                dev_err(dev, "%s: no memory for command\n", __func__);
 479                rc = -ENOMEM;
 480                goto out;
 481        }
 482
 483        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
 484        INIT_LIST_HEAD(&cmd->queue);
 485
 486        /* When Task Management Function is active do not send another */
 487        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 488        if (cfg->tmf_active)
 489                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 490                                                  !cfg->tmf_active,
 491                                                  cfg->tmf_slock);
 492        cfg->tmf_active = true;
 493        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 494
 495        cmd->parent = afu;
 496        cmd->cmd_tmf = true;
 497        cmd->hwq_index = hwq->index;
 498
 499        cmd->rcb.ctx_id = hwq->ctx_hndl;
 500        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 501        cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
 502        cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
 503        cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
 504                              SISL_REQ_FLAGS_SUP_UNDERRUN |
 505                              SISL_REQ_FLAGS_TMF_CMD);
 506        memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 507
 508        rc = afu->send_cmd(afu, cmd);
 509        if (unlikely(rc)) {
 510                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 511                cfg->tmf_active = false;
 512                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 513                goto out;
 514        }
 515
 516        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 517        to = msecs_to_jiffies(5000);
 518        to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
 519                                                       !cfg->tmf_active,
 520                                                       cfg->tmf_slock,
 521                                                       to);
 522        if (!to) {
 523                dev_err(dev, "%s: TMF timed out\n", __func__);
 524                rc = -ETIMEDOUT;
 525                needs_deletion = true;
 526        } else if (cmd->cmd_aborted) {
 527                dev_err(dev, "%s: TMF aborted\n", __func__);
 528                rc = -EAGAIN;
 529        } else if (cmd->sa.ioasc) {
 530                dev_err(dev, "%s: TMF failed ioasc=%08x\n",
 531                        __func__, cmd->sa.ioasc);
 532                rc = -EIO;
 533        }
 534        cfg->tmf_active = false;
 535        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 536
 537        if (needs_deletion) {
 538                spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 539                list_del(&cmd->list);
 540                spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 541        }
 542out:
 543        kfree(buf);
 544        return rc;
 545}
 546
 547/**
 548 * cxlflash_driver_info() - information handler for this host driver
 549 * @host:       SCSI host associated with device.
 550 *
 551 * Return: A string describing the device.
 552 */
 553static const char *cxlflash_driver_info(struct Scsi_Host *host)
 554{
 555        return CXLFLASH_ADAPTER_NAME;
 556}
 557
 558/**
 559 * cxlflash_queuecommand() - sends a mid-layer request
 560 * @host:       SCSI host associated with device.
 561 * @scp:        SCSI command to send.
 562 *
 563 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 564 */
 565static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 566{
 567        struct cxlflash_cfg *cfg = shost_priv(host);
 568        struct afu *afu = cfg->afu;
 569        struct device *dev = &cfg->dev->dev;
 570        struct afu_cmd *cmd = sc_to_afuci(scp);
 571        struct scatterlist *sg = scsi_sglist(scp);
 572        int hwq_index = cmd_to_target_hwq(host, scp, afu);
 573        struct hwq *hwq = get_hwq(afu, hwq_index);
 574        u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
 575        ulong lock_flags;
 576        int rc = 0;
 577
 578        dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
 579                            "cdb=(%08x-%08x-%08x-%08x)\n",
 580                            __func__, scp, host->host_no, scp->device->channel,
 581                            scp->device->id, scp->device->lun,
 582                            get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
 583                            get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
 584                            get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
 585                            get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 586
 587        /*
 588         * If a Task Management Function is active, wait for it to complete
 589         * before continuing with regular commands.
 590         */
 591        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 592        if (cfg->tmf_active) {
 593                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 594                rc = SCSI_MLQUEUE_HOST_BUSY;
 595                goto out;
 596        }
 597        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 598
 599        switch (cfg->state) {
 600        case STATE_PROBING:
 601        case STATE_PROBED:
 602        case STATE_RESET:
 603                dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
 604                rc = SCSI_MLQUEUE_HOST_BUSY;
 605                goto out;
 606        case STATE_FAILTERM:
 607                dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
 608                scp->result = (DID_NO_CONNECT << 16);
 609                scp->scsi_done(scp);
 610                rc = 0;
 611                goto out;
 612        default:
 613                atomic_inc(&afu->cmds_active);
 614                break;
 615        }
 616
 617        if (likely(sg)) {
 618                cmd->rcb.data_len = sg->length;
 619                cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
 620        }
 621
 622        cmd->scp = scp;
 623        cmd->parent = afu;
 624        cmd->hwq_index = hwq_index;
 625
 626        cmd->sa.ioasc = 0;
 627        cmd->rcb.ctx_id = hwq->ctx_hndl;
 628        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 629        cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
 630        cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
 631
 632        if (scp->sc_data_direction == DMA_TO_DEVICE)
 633                req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
 634
 635        cmd->rcb.req_flags = req_flags;
 636        memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 637
 638        rc = afu->send_cmd(afu, cmd);
 639        atomic_dec(&afu->cmds_active);
 640out:
 641        return rc;
 642}
 643
 644/**
 645 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
 646 * @cfg:        Internal structure associated with the host.
 647 */
 648static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
 649{
 650        struct pci_dev *pdev = cfg->dev;
 651
 652        if (pci_channel_offline(pdev))
 653                wait_event_timeout(cfg->reset_waitq,
 654                                   !pci_channel_offline(pdev),
 655                                   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
 656}
 657
 658/**
 659 * free_mem() - free memory associated with the AFU
 660 * @cfg:        Internal structure associated with the host.
 661 */
 662static void free_mem(struct cxlflash_cfg *cfg)
 663{
 664        struct afu *afu = cfg->afu;
 665
 666        if (cfg->afu) {
 667                free_pages((ulong)afu, get_order(sizeof(struct afu)));
 668                cfg->afu = NULL;
 669        }
 670}
 671
 672/**
 673 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
 674 * @cfg:        Internal structure associated with the host.
 675 */
 676static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
 677{
 678        if (cfg->async_reset_cookie == 0)
 679                return;
 680
 681        /* Wait until all async calls prior to this cookie have completed */
 682        async_synchronize_cookie(cfg->async_reset_cookie + 1);
 683        cfg->async_reset_cookie = 0;
 684}
 685
 686/**
 687 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
 688 * @cfg:        Internal structure associated with the host.
 689 *
 690 * Safe to call with AFU in a partially allocated/initialized state.
 691 *
 692 * Cancels scheduled worker threads, waits for any active internal AFU
 693 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
 694 */
 695static void stop_afu(struct cxlflash_cfg *cfg)
 696{
 697        struct afu *afu = cfg->afu;
 698        struct hwq *hwq;
 699        int i;
 700
 701        cancel_work_sync(&cfg->work_q);
 702        if (!current_is_async())
 703                cxlflash_reset_sync(cfg);
 704
 705        if (likely(afu)) {
 706                while (atomic_read(&afu->cmds_active))
 707                        ssleep(1);
 708
 709                if (afu_is_irqpoll_enabled(afu)) {
 710                        for (i = 0; i < afu->num_hwqs; i++) {
 711                                hwq = get_hwq(afu, i);
 712
 713                                irq_poll_disable(&hwq->irqpoll);
 714                        }
 715                }
 716
 717                if (likely(afu->afu_map)) {
 718                        cfg->ops->psa_unmap(afu->afu_map);
 719                        afu->afu_map = NULL;
 720                }
 721        }
 722}
 723
 724/**
 725 * term_intr() - disables all AFU interrupts
 726 * @cfg:        Internal structure associated with the host.
 727 * @level:      Depth of allocation, where to begin waterfall tear down.
 728 * @index:      Index of the hardware queue.
 729 *
 730 * Safe to call with AFU/MC in partially allocated/initialized state.
 731 */
 732static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
 733                      u32 index)
 734{
 735        struct afu *afu = cfg->afu;
 736        struct device *dev = &cfg->dev->dev;
 737        struct hwq *hwq;
 738
 739        if (!afu) {
 740                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 741                return;
 742        }
 743
 744        hwq = get_hwq(afu, index);
 745
 746        if (!hwq->ctx_cookie) {
 747                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 748                return;
 749        }
 750
 751        switch (level) {
 752        case UNMAP_THREE:
 753                /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
 754                if (index == PRIMARY_HWQ)
 755                        cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
 756                /* fall through */
 757        case UNMAP_TWO:
 758                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
 759                /* fall through */
 760        case UNMAP_ONE:
 761                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
 762                /* fall through */
 763        case FREE_IRQ:
 764                cfg->ops->free_afu_irqs(hwq->ctx_cookie);
 765                /* fall through */
 766        case UNDO_NOOP:
 767                /* No action required */
 768                break;
 769        }
 770}
 771
 772/**
 773 * term_mc() - terminates the master context
 774 * @cfg:        Internal structure associated with the host.
 775 * @index:      Index of the hardware queue.
 776 *
 777 * Safe to call with AFU/MC in partially allocated/initialized state.
 778 */
 779static void term_mc(struct cxlflash_cfg *cfg, u32 index)
 780{
 781        struct afu *afu = cfg->afu;
 782        struct device *dev = &cfg->dev->dev;
 783        struct hwq *hwq;
 784        ulong lock_flags;
 785
 786        if (!afu) {
 787                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 788                return;
 789        }
 790
 791        hwq = get_hwq(afu, index);
 792
 793        if (!hwq->ctx_cookie) {
 794                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 795                return;
 796        }
 797
 798        WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
 799        if (index != PRIMARY_HWQ)
 800                WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
 801        hwq->ctx_cookie = NULL;
 802
 803        spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
 804        hwq->hrrq_online = false;
 805        spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
 806
 807        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 808        flush_pending_cmds(hwq);
 809        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 810}
 811
 812/**
 813 * term_afu() - terminates the AFU
 814 * @cfg:        Internal structure associated with the host.
 815 *
 816 * Safe to call with AFU/MC in partially allocated/initialized state.
 817 */
 818static void term_afu(struct cxlflash_cfg *cfg)
 819{
 820        struct device *dev = &cfg->dev->dev;
 821        int k;
 822
 823        /*
 824         * Tear down is carefully orchestrated to ensure
 825         * no interrupts can come in when the problem state
 826         * area is unmapped.
 827         *
 828         * 1) Disable all AFU interrupts for each master
 829         * 2) Unmap the problem state area
 830         * 3) Stop each master context
 831         */
 832        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 833                term_intr(cfg, UNMAP_THREE, k);
 834
 835        stop_afu(cfg);
 836
 837        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 838                term_mc(cfg, k);
 839
 840        dev_dbg(dev, "%s: returning\n", __func__);
 841}
 842
 843/**
 844 * notify_shutdown() - notifies device of pending shutdown
 845 * @cfg:        Internal structure associated with the host.
 846 * @wait:       Whether to wait for shutdown processing to complete.
 847 *
 848 * This function will notify the AFU that the adapter is being shutdown
 849 * and will wait for shutdown processing to complete if wait is true.
 850 * This notification should flush pending I/Os to the device and halt
 851 * further I/Os until the next AFU reset is issued and device restarted.
 852 */
 853static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
 854{
 855        struct afu *afu = cfg->afu;
 856        struct device *dev = &cfg->dev->dev;
 857        struct dev_dependent_vals *ddv;
 858        __be64 __iomem *fc_port_regs;
 859        u64 reg, status;
 860        int i, retry_cnt = 0;
 861
 862        ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
 863        if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
 864                return;
 865
 866        if (!afu || !afu->afu_map) {
 867                dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
 868                return;
 869        }
 870
 871        /* Notify AFU */
 872        for (i = 0; i < cfg->num_fc_ports; i++) {
 873                fc_port_regs = get_fc_port_regs(cfg, i);
 874
 875                reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
 876                reg |= SISL_FC_SHUTDOWN_NORMAL;
 877                writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
 878        }
 879
 880        if (!wait)
 881                return;
 882
 883        /* Wait up to 1.5 seconds for shutdown processing to complete */
 884        for (i = 0; i < cfg->num_fc_ports; i++) {
 885                fc_port_regs = get_fc_port_regs(cfg, i);
 886                retry_cnt = 0;
 887
 888                while (true) {
 889                        status = readq_be(&fc_port_regs[FC_STATUS / 8]);
 890                        if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
 891                                break;
 892                        if (++retry_cnt >= MC_RETRY_CNT) {
 893                                dev_dbg(dev, "%s: port %d shutdown processing "
 894                                        "not yet completed\n", __func__, i);
 895                                break;
 896                        }
 897                        msleep(100 * retry_cnt);
 898                }
 899        }
 900}
 901
 902/**
 903 * cxlflash_get_minor() - gets the first available minor number
 904 *
 905 * Return: Unique minor number that can be used to create the character device.
 906 */
 907static int cxlflash_get_minor(void)
 908{
 909        int minor;
 910        long bit;
 911
 912        bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
 913        if (bit >= CXLFLASH_MAX_ADAPTERS)
 914                return -1;
 915
 916        minor = bit & MINORMASK;
 917        set_bit(minor, cxlflash_minor);
 918        return minor;
 919}
 920
 921/**
 922 * cxlflash_put_minor() - releases the minor number
 923 * @minor:      Minor number that is no longer needed.
 924 */
 925static void cxlflash_put_minor(int minor)
 926{
 927        clear_bit(minor, cxlflash_minor);
 928}
 929
 930/**
 931 * cxlflash_release_chrdev() - release the character device for the host
 932 * @cfg:        Internal structure associated with the host.
 933 */
 934static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
 935{
 936        device_unregister(cfg->chardev);
 937        cfg->chardev = NULL;
 938        cdev_del(&cfg->cdev);
 939        cxlflash_put_minor(MINOR(cfg->cdev.dev));
 940}
 941
 942/**
 943 * cxlflash_remove() - PCI entry point to tear down host
 944 * @pdev:       PCI device associated with the host.
 945 *
 946 * Safe to use as a cleanup in partially allocated/initialized state. Note that
 947 * the reset_waitq is flushed as part of the stop/termination of user contexts.
 948 */
 949static void cxlflash_remove(struct pci_dev *pdev)
 950{
 951        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
 952        struct device *dev = &pdev->dev;
 953        ulong lock_flags;
 954
 955        if (!pci_is_enabled(pdev)) {
 956                dev_dbg(dev, "%s: Device is disabled\n", __func__);
 957                return;
 958        }
 959
 960        /* Yield to running recovery threads before continuing with remove */
 961        wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
 962                                     cfg->state != STATE_PROBING);
 963        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 964        if (cfg->tmf_active)
 965                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 966                                                  !cfg->tmf_active,
 967                                                  cfg->tmf_slock);
 968        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 969
 970        /* Notify AFU and wait for shutdown processing to complete */
 971        notify_shutdown(cfg, true);
 972
 973        cfg->state = STATE_FAILTERM;
 974        cxlflash_stop_term_user_contexts(cfg);
 975
 976        switch (cfg->init_state) {
 977        case INIT_STATE_CDEV:
 978                cxlflash_release_chrdev(cfg);
 979                /* fall through */
 980        case INIT_STATE_SCSI:
 981                cxlflash_term_local_luns(cfg);
 982                scsi_remove_host(cfg->host);
 983                /* fall through */
 984        case INIT_STATE_AFU:
 985                term_afu(cfg);
 986                /* fall through */
 987        case INIT_STATE_PCI:
 988                cfg->ops->destroy_afu(cfg->afu_cookie);
 989                pci_disable_device(pdev);
 990                /* fall through */
 991        case INIT_STATE_NONE:
 992                free_mem(cfg);
 993                scsi_host_put(cfg->host);
 994                break;
 995        }
 996
 997        dev_dbg(dev, "%s: returning\n", __func__);
 998}
 999
1000/**
1001 * alloc_mem() - allocates the AFU and its command pool
1002 * @cfg:        Internal structure associated with the host.
1003 *
1004 * A partially allocated state remains on failure.
1005 *
1006 * Return:
1007 *      0 on success
1008 *      -ENOMEM on failure to allocate memory
1009 */
1010static int alloc_mem(struct cxlflash_cfg *cfg)
1011{
1012        int rc = 0;
1013        struct device *dev = &cfg->dev->dev;
1014
1015        /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1016        cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1017                                            get_order(sizeof(struct afu)));
1018        if (unlikely(!cfg->afu)) {
1019                dev_err(dev, "%s: cannot get %d free pages\n",
1020                        __func__, get_order(sizeof(struct afu)));
1021                rc = -ENOMEM;
1022                goto out;
1023        }
1024        cfg->afu->parent = cfg;
1025        cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1026        cfg->afu->afu_map = NULL;
1027out:
1028        return rc;
1029}
1030
1031/**
1032 * init_pci() - initializes the host as a PCI device
1033 * @cfg:        Internal structure associated with the host.
1034 *
1035 * Return: 0 on success, -errno on failure
1036 */
1037static int init_pci(struct cxlflash_cfg *cfg)
1038{
1039        struct pci_dev *pdev = cfg->dev;
1040        struct device *dev = &cfg->dev->dev;
1041        int rc = 0;
1042
1043        rc = pci_enable_device(pdev);
1044        if (rc || pci_channel_offline(pdev)) {
1045                if (pci_channel_offline(pdev)) {
1046                        cxlflash_wait_for_pci_err_recovery(cfg);
1047                        rc = pci_enable_device(pdev);
1048                }
1049
1050                if (rc) {
1051                        dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1052                        cxlflash_wait_for_pci_err_recovery(cfg);
1053                        goto out;
1054                }
1055        }
1056
1057out:
1058        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1059        return rc;
1060}
1061
1062/**
1063 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1064 * @cfg:        Internal structure associated with the host.
1065 *
1066 * Return: 0 on success, -errno on failure
1067 */
1068static int init_scsi(struct cxlflash_cfg *cfg)
1069{
1070        struct pci_dev *pdev = cfg->dev;
1071        struct device *dev = &cfg->dev->dev;
1072        int rc = 0;
1073
1074        rc = scsi_add_host(cfg->host, &pdev->dev);
1075        if (rc) {
1076                dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1077                goto out;
1078        }
1079
1080        scsi_scan_host(cfg->host);
1081
1082out:
1083        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1084        return rc;
1085}
1086
1087/**
1088 * set_port_online() - transitions the specified host FC port to online state
1089 * @fc_regs:    Top of MMIO region defined for specified port.
1090 *
1091 * The provided MMIO region must be mapped prior to call. Online state means
1092 * that the FC link layer has synced, completed the handshaking process, and
1093 * is ready for login to start.
1094 */
1095static void set_port_online(__be64 __iomem *fc_regs)
1096{
1097        u64 cmdcfg;
1098
1099        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1100        cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1101        cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);   /* set ON_LINE */
1102        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1103}
1104
1105/**
1106 * set_port_offline() - transitions the specified host FC port to offline state
1107 * @fc_regs:    Top of MMIO region defined for specified port.
1108 *
1109 * The provided MMIO region must be mapped prior to call.
1110 */
1111static void set_port_offline(__be64 __iomem *fc_regs)
1112{
1113        u64 cmdcfg;
1114
1115        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1116        cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);  /* clear ON_LINE */
1117        cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);  /* set OFF_LINE */
1118        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1119}
1120
1121/**
1122 * wait_port_online() - waits for the specified host FC port come online
1123 * @fc_regs:    Top of MMIO region defined for specified port.
1124 * @delay_us:   Number of microseconds to delay between reading port status.
1125 * @nretry:     Number of cycles to retry reading port status.
1126 *
1127 * The provided MMIO region must be mapped prior to call. This will timeout
1128 * when the cable is not plugged in.
1129 *
1130 * Return:
1131 *      TRUE (1) when the specified port is online
1132 *      FALSE (0) when the specified port fails to come online after timeout
1133 */
1134static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1135{
1136        u64 status;
1137
1138        WARN_ON(delay_us < 1000);
1139
1140        do {
1141                msleep(delay_us / 1000);
1142                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1143                if (status == U64_MAX)
1144                        nretry /= 2;
1145        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1146                 nretry--);
1147
1148        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1149}
1150
1151/**
1152 * wait_port_offline() - waits for the specified host FC port go offline
1153 * @fc_regs:    Top of MMIO region defined for specified port.
1154 * @delay_us:   Number of microseconds to delay between reading port status.
1155 * @nretry:     Number of cycles to retry reading port status.
1156 *
1157 * The provided MMIO region must be mapped prior to call.
1158 *
1159 * Return:
1160 *      TRUE (1) when the specified port is offline
1161 *      FALSE (0) when the specified port fails to go offline after timeout
1162 */
1163static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1164{
1165        u64 status;
1166
1167        WARN_ON(delay_us < 1000);
1168
1169        do {
1170                msleep(delay_us / 1000);
1171                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1172                if (status == U64_MAX)
1173                        nretry /= 2;
1174        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1175                 nretry--);
1176
1177        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1178}
1179
1180/**
1181 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1182 * @afu:        AFU associated with the host that owns the specified FC port.
1183 * @port:       Port number being configured.
1184 * @fc_regs:    Top of MMIO region defined for specified port.
1185 * @wwpn:       The world-wide-port-number previously discovered for port.
1186 *
1187 * The provided MMIO region must be mapped prior to call. As part of the
1188 * sequence to configure the WWPN, the port is toggled offline and then back
1189 * online. This toggling action can cause this routine to delay up to a few
1190 * seconds. When configured to use the internal LUN feature of the AFU, a
1191 * failure to come online is overridden.
1192 */
1193static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1194                         u64 wwpn)
1195{
1196        struct cxlflash_cfg *cfg = afu->parent;
1197        struct device *dev = &cfg->dev->dev;
1198
1199        set_port_offline(fc_regs);
1200        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1201                               FC_PORT_STATUS_RETRY_CNT)) {
1202                dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1203                        __func__, port);
1204        }
1205
1206        writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1207
1208        set_port_online(fc_regs);
1209        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1210                              FC_PORT_STATUS_RETRY_CNT)) {
1211                dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1212                        __func__, port);
1213        }
1214}
1215
1216/**
1217 * afu_link_reset() - resets the specified host FC port
1218 * @afu:        AFU associated with the host that owns the specified FC port.
1219 * @port:       Port number being configured.
1220 * @fc_regs:    Top of MMIO region defined for specified port.
1221 *
1222 * The provided MMIO region must be mapped prior to call. The sequence to
1223 * reset the port involves toggling it offline and then back online. This
1224 * action can cause this routine to delay up to a few seconds. An effort
1225 * is made to maintain link with the device by switching to host to use
1226 * the alternate port exclusively while the reset takes place.
1227 * failure to come online is overridden.
1228 */
1229static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1230{
1231        struct cxlflash_cfg *cfg = afu->parent;
1232        struct device *dev = &cfg->dev->dev;
1233        u64 port_sel;
1234
1235        /* first switch the AFU to the other links, if any */
1236        port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1237        port_sel &= ~(1ULL << port);
1238        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1239        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1240
1241        set_port_offline(fc_regs);
1242        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1243                               FC_PORT_STATUS_RETRY_CNT))
1244                dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1245                        __func__, port);
1246
1247        set_port_online(fc_regs);
1248        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1249                              FC_PORT_STATUS_RETRY_CNT))
1250                dev_err(dev, "%s: wait on port %d to go online timed out\n",
1251                        __func__, port);
1252
1253        /* switch back to include this port */
1254        port_sel |= (1ULL << port);
1255        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1256        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1257
1258        dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1259}
1260
1261/**
1262 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1263 * @afu:        AFU associated with the host.
1264 */
1265static void afu_err_intr_init(struct afu *afu)
1266{
1267        struct cxlflash_cfg *cfg = afu->parent;
1268        __be64 __iomem *fc_port_regs;
1269        int i;
1270        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1271        u64 reg;
1272
1273        /* global async interrupts: AFU clears afu_ctrl on context exit
1274         * if async interrupts were sent to that context. This prevents
1275         * the AFU form sending further async interrupts when
1276         * there is
1277         * nobody to receive them.
1278         */
1279
1280        /* mask all */
1281        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1282        /* set LISN# to send and point to primary master context */
1283        reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1284
1285        if (afu->internal_lun)
1286                reg |= 1;       /* Bit 63 indicates local lun */
1287        writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1288        /* clear all */
1289        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1290        /* unmask bits that are of interest */
1291        /* note: afu can send an interrupt after this step */
1292        writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1293        /* clear again in case a bit came on after previous clear but before */
1294        /* unmask */
1295        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1296
1297        /* Clear/Set internal lun bits */
1298        fc_port_regs = get_fc_port_regs(cfg, 0);
1299        reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1300        reg &= SISL_FC_INTERNAL_MASK;
1301        if (afu->internal_lun)
1302                reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1303        writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1304
1305        /* now clear FC errors */
1306        for (i = 0; i < cfg->num_fc_ports; i++) {
1307                fc_port_regs = get_fc_port_regs(cfg, i);
1308
1309                writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1310                writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1311        }
1312
1313        /* sync interrupts for master's IOARRIN write */
1314        /* note that unlike asyncs, there can be no pending sync interrupts */
1315        /* at this time (this is a fresh context and master has not written */
1316        /* IOARRIN yet), so there is nothing to clear. */
1317
1318        /* set LISN#, it is always sent to the context that wrote IOARRIN */
1319        for (i = 0; i < afu->num_hwqs; i++) {
1320                hwq = get_hwq(afu, i);
1321
1322                reg = readq_be(&hwq->host_map->ctx_ctrl);
1323                WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1324                reg |= SISL_MSI_SYNC_ERROR;
1325                writeq_be(reg, &hwq->host_map->ctx_ctrl);
1326                writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1327        }
1328}
1329
1330/**
1331 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1332 * @irq:        Interrupt number.
1333 * @data:       Private data provided at interrupt registration, the AFU.
1334 *
1335 * Return: Always return IRQ_HANDLED.
1336 */
1337static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1338{
1339        struct hwq *hwq = (struct hwq *)data;
1340        struct cxlflash_cfg *cfg = hwq->afu->parent;
1341        struct device *dev = &cfg->dev->dev;
1342        u64 reg;
1343        u64 reg_unmasked;
1344
1345        reg = readq_be(&hwq->host_map->intr_status);
1346        reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1347
1348        if (reg_unmasked == 0UL) {
1349                dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1350                        __func__, reg);
1351                goto cxlflash_sync_err_irq_exit;
1352        }
1353
1354        dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1355                __func__, reg);
1356
1357        writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1358
1359cxlflash_sync_err_irq_exit:
1360        return IRQ_HANDLED;
1361}
1362
1363/**
1364 * process_hrrq() - process the read-response queue
1365 * @afu:        AFU associated with the host.
1366 * @doneq:      Queue of commands harvested from the RRQ.
1367 * @budget:     Threshold of RRQ entries to process.
1368 *
1369 * This routine must be called holding the disabled RRQ spin lock.
1370 *
1371 * Return: The number of entries processed.
1372 */
1373static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1374{
1375        struct afu *afu = hwq->afu;
1376        struct afu_cmd *cmd;
1377        struct sisl_ioasa *ioasa;
1378        struct sisl_ioarcb *ioarcb;
1379        bool toggle = hwq->toggle;
1380        int num_hrrq = 0;
1381        u64 entry,
1382            *hrrq_start = hwq->hrrq_start,
1383            *hrrq_end = hwq->hrrq_end,
1384            *hrrq_curr = hwq->hrrq_curr;
1385
1386        /* Process ready RRQ entries up to the specified budget (if any) */
1387        while (true) {
1388                entry = *hrrq_curr;
1389
1390                if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1391                        break;
1392
1393                entry &= ~SISL_RESP_HANDLE_T_BIT;
1394
1395                if (afu_is_sq_cmd_mode(afu)) {
1396                        ioasa = (struct sisl_ioasa *)entry;
1397                        cmd = container_of(ioasa, struct afu_cmd, sa);
1398                } else {
1399                        ioarcb = (struct sisl_ioarcb *)entry;
1400                        cmd = container_of(ioarcb, struct afu_cmd, rcb);
1401                }
1402
1403                list_add_tail(&cmd->queue, doneq);
1404
1405                /* Advance to next entry or wrap and flip the toggle bit */
1406                if (hrrq_curr < hrrq_end)
1407                        hrrq_curr++;
1408                else {
1409                        hrrq_curr = hrrq_start;
1410                        toggle ^= SISL_RESP_HANDLE_T_BIT;
1411                }
1412
1413                atomic_inc(&hwq->hsq_credits);
1414                num_hrrq++;
1415
1416                if (budget > 0 && num_hrrq >= budget)
1417                        break;
1418        }
1419
1420        hwq->hrrq_curr = hrrq_curr;
1421        hwq->toggle = toggle;
1422
1423        return num_hrrq;
1424}
1425
1426/**
1427 * process_cmd_doneq() - process a queue of harvested RRQ commands
1428 * @doneq:      Queue of completed commands.
1429 *
1430 * Note that upon return the queue can no longer be trusted.
1431 */
1432static void process_cmd_doneq(struct list_head *doneq)
1433{
1434        struct afu_cmd *cmd, *tmp;
1435
1436        WARN_ON(list_empty(doneq));
1437
1438        list_for_each_entry_safe(cmd, tmp, doneq, queue)
1439                cmd_complete(cmd);
1440}
1441
1442/**
1443 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1444 * @irqpoll:    IRQ poll structure associated with queue to poll.
1445 * @budget:     Threshold of RRQ entries to process per poll.
1446 *
1447 * Return: The number of entries processed.
1448 */
1449static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1450{
1451        struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1452        unsigned long hrrq_flags;
1453        LIST_HEAD(doneq);
1454        int num_entries = 0;
1455
1456        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1457
1458        num_entries = process_hrrq(hwq, &doneq, budget);
1459        if (num_entries < budget)
1460                irq_poll_complete(irqpoll);
1461
1462        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1463
1464        process_cmd_doneq(&doneq);
1465        return num_entries;
1466}
1467
1468/**
1469 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1470 * @irq:        Interrupt number.
1471 * @data:       Private data provided at interrupt registration, the AFU.
1472 *
1473 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1474 */
1475static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1476{
1477        struct hwq *hwq = (struct hwq *)data;
1478        struct afu *afu = hwq->afu;
1479        unsigned long hrrq_flags;
1480        LIST_HEAD(doneq);
1481        int num_entries = 0;
1482
1483        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1484
1485        /* Silently drop spurious interrupts when queue is not online */
1486        if (!hwq->hrrq_online) {
1487                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1488                return IRQ_HANDLED;
1489        }
1490
1491        if (afu_is_irqpoll_enabled(afu)) {
1492                irq_poll_sched(&hwq->irqpoll);
1493                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1494                return IRQ_HANDLED;
1495        }
1496
1497        num_entries = process_hrrq(hwq, &doneq, -1);
1498        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1499
1500        if (num_entries == 0)
1501                return IRQ_NONE;
1502
1503        process_cmd_doneq(&doneq);
1504        return IRQ_HANDLED;
1505}
1506
1507/*
1508 * Asynchronous interrupt information table
1509 *
1510 * NOTE:
1511 *      - Order matters here as this array is indexed by bit position.
1512 *
1513 *      - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1514 *        as complex and complains due to a lack of parentheses/braces.
1515 */
1516#define ASTATUS_FC(_a, _b, _c, _d)                                       \
1517        { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1518
1519#define BUILD_SISL_ASTATUS_FC_PORT(_a)                                   \
1520        ASTATUS_FC(_a, LINK_UP, "link up", 0),                           \
1521        ASTATUS_FC(_a, LINK_DN, "link down", 0),                         \
1522        ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),            \
1523        ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),            \
1524        ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1525        ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),     \
1526        ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),                \
1527        ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1528
1529static const struct asyc_intr_info ainfo[] = {
1530        BUILD_SISL_ASTATUS_FC_PORT(1),
1531        BUILD_SISL_ASTATUS_FC_PORT(0),
1532        BUILD_SISL_ASTATUS_FC_PORT(3),
1533        BUILD_SISL_ASTATUS_FC_PORT(2)
1534};
1535
1536/**
1537 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1538 * @irq:        Interrupt number.
1539 * @data:       Private data provided at interrupt registration, the AFU.
1540 *
1541 * Return: Always return IRQ_HANDLED.
1542 */
1543static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1544{
1545        struct hwq *hwq = (struct hwq *)data;
1546        struct afu *afu = hwq->afu;
1547        struct cxlflash_cfg *cfg = afu->parent;
1548        struct device *dev = &cfg->dev->dev;
1549        const struct asyc_intr_info *info;
1550        struct sisl_global_map __iomem *global = &afu->afu_map->global;
1551        __be64 __iomem *fc_port_regs;
1552        u64 reg_unmasked;
1553        u64 reg;
1554        u64 bit;
1555        u8 port;
1556
1557        reg = readq_be(&global->regs.aintr_status);
1558        reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1559
1560        if (unlikely(reg_unmasked == 0)) {
1561                dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1562                        __func__, reg);
1563                goto out;
1564        }
1565
1566        /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1567        writeq_be(reg_unmasked, &global->regs.aintr_clear);
1568
1569        /* Check each bit that is on */
1570        for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1571                if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1572                        WARN_ON_ONCE(1);
1573                        continue;
1574                }
1575
1576                info = &ainfo[bit];
1577                if (unlikely(info->status != 1ULL << bit)) {
1578                        WARN_ON_ONCE(1);
1579                        continue;
1580                }
1581
1582                port = info->port;
1583                fc_port_regs = get_fc_port_regs(cfg, port);
1584
1585                dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1586                        __func__, port, info->desc,
1587                       readq_be(&fc_port_regs[FC_STATUS / 8]));
1588
1589                /*
1590                 * Do link reset first, some OTHER errors will set FC_ERROR
1591                 * again if cleared before or w/o a reset
1592                 */
1593                if (info->action & LINK_RESET) {
1594                        dev_err(dev, "%s: FC Port %d: resetting link\n",
1595                                __func__, port);
1596                        cfg->lr_state = LINK_RESET_REQUIRED;
1597                        cfg->lr_port = port;
1598                        schedule_work(&cfg->work_q);
1599                }
1600
1601                if (info->action & CLR_FC_ERROR) {
1602                        reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1603
1604                        /*
1605                         * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1606                         * should be the same and tracing one is sufficient.
1607                         */
1608
1609                        dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1610                                __func__, port, reg);
1611
1612                        writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1613                        writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1614                }
1615
1616                if (info->action & SCAN_HOST) {
1617                        atomic_inc(&cfg->scan_host_needed);
1618                        schedule_work(&cfg->work_q);
1619                }
1620        }
1621
1622out:
1623        return IRQ_HANDLED;
1624}
1625
1626/**
1627 * read_vpd() - obtains the WWPNs from VPD
1628 * @cfg:        Internal structure associated with the host.
1629 * @wwpn:       Array of size MAX_FC_PORTS to pass back WWPNs
1630 *
1631 * Return: 0 on success, -errno on failure
1632 */
1633static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1634{
1635        struct device *dev = &cfg->dev->dev;
1636        struct pci_dev *pdev = cfg->dev;
1637        int rc = 0;
1638        int ro_start, ro_size, i, j, k;
1639        ssize_t vpd_size;
1640        char vpd_data[CXLFLASH_VPD_LEN];
1641        char tmp_buf[WWPN_BUF_LEN] = { 0 };
1642        const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1643                                                cfg->dev_id->driver_data;
1644        const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1645        const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1646
1647        /* Get the VPD data from the device */
1648        vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1649        if (unlikely(vpd_size <= 0)) {
1650                dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1651                        __func__, vpd_size);
1652                rc = -ENODEV;
1653                goto out;
1654        }
1655
1656        /* Get the read only section offset */
1657        ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1658                                    PCI_VPD_LRDT_RO_DATA);
1659        if (unlikely(ro_start < 0)) {
1660                dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1661                rc = -ENODEV;
1662                goto out;
1663        }
1664
1665        /* Get the read only section size, cap when extends beyond read VPD */
1666        ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1667        j = ro_size;
1668        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1669        if (unlikely((i + j) > vpd_size)) {
1670                dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1671                        __func__, (i + j), vpd_size);
1672                ro_size = vpd_size - i;
1673        }
1674
1675        /*
1676         * Find the offset of the WWPN tag within the read only
1677         * VPD data and validate the found field (partials are
1678         * no good to us). Convert the ASCII data to an integer
1679         * value. Note that we must copy to a temporary buffer
1680         * because the conversion service requires that the ASCII
1681         * string be terminated.
1682         *
1683         * Allow for WWPN not being found for all devices, setting
1684         * the returned WWPN to zero when not found. Notify with a
1685         * log error for cards that should have had WWPN keywords
1686         * in the VPD - cards requiring WWPN will not have their
1687         * ports programmed and operate in an undefined state.
1688         */
1689        for (k = 0; k < cfg->num_fc_ports; k++) {
1690                j = ro_size;
1691                i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1692
1693                i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1694                if (i < 0) {
1695                        if (wwpn_vpd_required)
1696                                dev_err(dev, "%s: Port %d WWPN not found\n",
1697                                        __func__, k);
1698                        wwpn[k] = 0ULL;
1699                        continue;
1700                }
1701
1702                j = pci_vpd_info_field_size(&vpd_data[i]);
1703                i += PCI_VPD_INFO_FLD_HDR_SIZE;
1704                if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1705                        dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1706                                __func__, k);
1707                        rc = -ENODEV;
1708                        goto out;
1709                }
1710
1711                memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1712                rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1713                if (unlikely(rc)) {
1714                        dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1715                                __func__, k);
1716                        rc = -ENODEV;
1717                        goto out;
1718                }
1719
1720                dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1721        }
1722
1723out:
1724        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1725        return rc;
1726}
1727
1728/**
1729 * init_pcr() - initialize the provisioning and control registers
1730 * @cfg:        Internal structure associated with the host.
1731 *
1732 * Also sets up fast access to the mapped registers and initializes AFU
1733 * command fields that never change.
1734 */
1735static void init_pcr(struct cxlflash_cfg *cfg)
1736{
1737        struct afu *afu = cfg->afu;
1738        struct sisl_ctrl_map __iomem *ctrl_map;
1739        struct hwq *hwq;
1740        void *cookie;
1741        int i;
1742
1743        for (i = 0; i < MAX_CONTEXT; i++) {
1744                ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1745                /* Disrupt any clients that could be running */
1746                /* e.g. clients that survived a master restart */
1747                writeq_be(0, &ctrl_map->rht_start);
1748                writeq_be(0, &ctrl_map->rht_cnt_id);
1749                writeq_be(0, &ctrl_map->ctx_cap);
1750        }
1751
1752        /* Copy frequently used fields into hwq */
1753        for (i = 0; i < afu->num_hwqs; i++) {
1754                hwq = get_hwq(afu, i);
1755                cookie = hwq->ctx_cookie;
1756
1757                hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1758                hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1759                hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1760
1761                /* Program the Endian Control for the master context */
1762                writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1763        }
1764}
1765
1766/**
1767 * init_global() - initialize AFU global registers
1768 * @cfg:        Internal structure associated with the host.
1769 */
1770static int init_global(struct cxlflash_cfg *cfg)
1771{
1772        struct afu *afu = cfg->afu;
1773        struct device *dev = &cfg->dev->dev;
1774        struct hwq *hwq;
1775        struct sisl_host_map __iomem *hmap;
1776        __be64 __iomem *fc_port_regs;
1777        u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1778        int i = 0, num_ports = 0;
1779        int rc = 0;
1780        int j;
1781        void *ctx;
1782        u64 reg;
1783
1784        rc = read_vpd(cfg, &wwpn[0]);
1785        if (rc) {
1786                dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1787                goto out;
1788        }
1789
1790        /* Set up RRQ and SQ in HWQ for master issued cmds */
1791        for (i = 0; i < afu->num_hwqs; i++) {
1792                hwq = get_hwq(afu, i);
1793                hmap = hwq->host_map;
1794
1795                writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1796                writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1797                hwq->hrrq_online = true;
1798
1799                if (afu_is_sq_cmd_mode(afu)) {
1800                        writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1801                        writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1802                }
1803        }
1804
1805        /* AFU configuration */
1806        reg = readq_be(&afu->afu_map->global.regs.afu_config);
1807        reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1808        /* enable all auto retry options and control endianness */
1809        /* leave others at default: */
1810        /* CTX_CAP write protected, mbox_r does not clear on read and */
1811        /* checker on if dual afu */
1812        writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1813
1814        /* Global port select: select either port */
1815        if (afu->internal_lun) {
1816                /* Only use port 0 */
1817                writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1818                num_ports = 0;
1819        } else {
1820                writeq_be(PORT_MASK(cfg->num_fc_ports),
1821                          &afu->afu_map->global.regs.afu_port_sel);
1822                num_ports = cfg->num_fc_ports;
1823        }
1824
1825        for (i = 0; i < num_ports; i++) {
1826                fc_port_regs = get_fc_port_regs(cfg, i);
1827
1828                /* Unmask all errors (but they are still masked at AFU) */
1829                writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1830                /* Clear CRC error cnt & set a threshold */
1831                (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1832                writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1833
1834                /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1835                if (wwpn[i] != 0)
1836                        afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1837                /* Programming WWPN back to back causes additional
1838                 * offline/online transitions and a PLOGI
1839                 */
1840                msleep(100);
1841        }
1842
1843        if (afu_is_ocxl_lisn(afu)) {
1844                /* Set up the LISN effective address for each master */
1845                for (i = 0; i < afu->num_hwqs; i++) {
1846                        hwq = get_hwq(afu, i);
1847                        ctx = hwq->ctx_cookie;
1848
1849                        for (j = 0; j < hwq->num_irqs; j++) {
1850                                reg = cfg->ops->get_irq_objhndl(ctx, j);
1851                                writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1852                        }
1853
1854                        reg = hwq->ctx_hndl;
1855                        writeq_be(SISL_LISN_PASID(reg, reg),
1856                                  &hwq->ctrl_map->lisn_pasid[0]);
1857                        writeq_be(SISL_LISN_PASID(0UL, reg),
1858                                  &hwq->ctrl_map->lisn_pasid[1]);
1859                }
1860        }
1861
1862        /* Set up master's own CTX_CAP to allow real mode, host translation */
1863        /* tables, afu cmds and read/write GSCSI cmds. */
1864        /* First, unlock ctx_cap write by reading mbox */
1865        for (i = 0; i < afu->num_hwqs; i++) {
1866                hwq = get_hwq(afu, i);
1867
1868                (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1869                writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1870                        SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1871                        SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1872                        &hwq->ctrl_map->ctx_cap);
1873        }
1874
1875        /*
1876         * Determine write-same unmap support for host by evaluating the unmap
1877         * sector support bit of the context control register associated with
1878         * the primary hardware queue. Note that while this status is reflected
1879         * in a context register, the outcome can be assumed to be host-wide.
1880         */
1881        hwq = get_hwq(afu, PRIMARY_HWQ);
1882        reg = readq_be(&hwq->host_map->ctx_ctrl);
1883        if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1884                cfg->ws_unmap = true;
1885
1886        /* Initialize heartbeat */
1887        afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1888out:
1889        return rc;
1890}
1891
1892/**
1893 * start_afu() - initializes and starts the AFU
1894 * @cfg:        Internal structure associated with the host.
1895 */
1896static int start_afu(struct cxlflash_cfg *cfg)
1897{
1898        struct afu *afu = cfg->afu;
1899        struct device *dev = &cfg->dev->dev;
1900        struct hwq *hwq;
1901        int rc = 0;
1902        int i;
1903
1904        init_pcr(cfg);
1905
1906        /* Initialize each HWQ */
1907        for (i = 0; i < afu->num_hwqs; i++) {
1908                hwq = get_hwq(afu, i);
1909
1910                /* After an AFU reset, RRQ entries are stale, clear them */
1911                memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1912
1913                /* Initialize RRQ pointers */
1914                hwq->hrrq_start = &hwq->rrq_entry[0];
1915                hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1916                hwq->hrrq_curr = hwq->hrrq_start;
1917                hwq->toggle = 1;
1918
1919                /* Initialize spin locks */
1920                spin_lock_init(&hwq->hrrq_slock);
1921                spin_lock_init(&hwq->hsq_slock);
1922
1923                /* Initialize SQ */
1924                if (afu_is_sq_cmd_mode(afu)) {
1925                        memset(&hwq->sq, 0, sizeof(hwq->sq));
1926                        hwq->hsq_start = &hwq->sq[0];
1927                        hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1928                        hwq->hsq_curr = hwq->hsq_start;
1929
1930                        atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1931                }
1932
1933                /* Initialize IRQ poll */
1934                if (afu_is_irqpoll_enabled(afu))
1935                        irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1936                                      cxlflash_irqpoll);
1937
1938        }
1939
1940        rc = init_global(cfg);
1941
1942        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1943        return rc;
1944}
1945
1946/**
1947 * init_intr() - setup interrupt handlers for the master context
1948 * @cfg:        Internal structure associated with the host.
1949 * @hwq:        Hardware queue to initialize.
1950 *
1951 * Return: 0 on success, -errno on failure
1952 */
1953static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1954                                 struct hwq *hwq)
1955{
1956        struct device *dev = &cfg->dev->dev;
1957        void *ctx = hwq->ctx_cookie;
1958        int rc = 0;
1959        enum undo_level level = UNDO_NOOP;
1960        bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1961        int num_irqs = hwq->num_irqs;
1962
1963        rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1964        if (unlikely(rc)) {
1965                dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1966                        __func__, rc);
1967                level = UNDO_NOOP;
1968                goto out;
1969        }
1970
1971        rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1972                                   "SISL_MSI_SYNC_ERROR");
1973        if (unlikely(rc <= 0)) {
1974                dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1975                level = FREE_IRQ;
1976                goto out;
1977        }
1978
1979        rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1980                                   "SISL_MSI_RRQ_UPDATED");
1981        if (unlikely(rc <= 0)) {
1982                dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1983                level = UNMAP_ONE;
1984                goto out;
1985        }
1986
1987        /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1988        if (!is_primary_hwq)
1989                goto out;
1990
1991        rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1992                                   "SISL_MSI_ASYNC_ERROR");
1993        if (unlikely(rc <= 0)) {
1994                dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1995                level = UNMAP_TWO;
1996                goto out;
1997        }
1998out:
1999        return level;
2000}
2001
2002/**
2003 * init_mc() - create and register as the master context
2004 * @cfg:        Internal structure associated with the host.
2005 * index:       HWQ Index of the master context.
2006 *
2007 * Return: 0 on success, -errno on failure
2008 */
2009static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2010{
2011        void *ctx;
2012        struct device *dev = &cfg->dev->dev;
2013        struct hwq *hwq = get_hwq(cfg->afu, index);
2014        int rc = 0;
2015        int num_irqs;
2016        enum undo_level level;
2017
2018        hwq->afu = cfg->afu;
2019        hwq->index = index;
2020        INIT_LIST_HEAD(&hwq->pending_cmds);
2021
2022        if (index == PRIMARY_HWQ) {
2023                ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2024                num_irqs = 3;
2025        } else {
2026                ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2027                num_irqs = 2;
2028        }
2029        if (IS_ERR_OR_NULL(ctx)) {
2030                rc = -ENOMEM;
2031                goto err1;
2032        }
2033
2034        WARN_ON(hwq->ctx_cookie);
2035        hwq->ctx_cookie = ctx;
2036        hwq->num_irqs = num_irqs;
2037
2038        /* Set it up as a master with the CXL */
2039        cfg->ops->set_master(ctx);
2040
2041        /* Reset AFU when initializing primary context */
2042        if (index == PRIMARY_HWQ) {
2043                rc = cfg->ops->afu_reset(ctx);
2044                if (unlikely(rc)) {
2045                        dev_err(dev, "%s: AFU reset failed rc=%d\n",
2046                                      __func__, rc);
2047                        goto err1;
2048                }
2049        }
2050
2051        level = init_intr(cfg, hwq);
2052        if (unlikely(level)) {
2053                dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2054                goto err2;
2055        }
2056
2057        /* Finally, activate the context by starting it */
2058        rc = cfg->ops->start_context(hwq->ctx_cookie);
2059        if (unlikely(rc)) {
2060                dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2061                level = UNMAP_THREE;
2062                goto err2;
2063        }
2064
2065out:
2066        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2067        return rc;
2068err2:
2069        term_intr(cfg, level, index);
2070        if (index != PRIMARY_HWQ)
2071                cfg->ops->release_context(ctx);
2072err1:
2073        hwq->ctx_cookie = NULL;
2074        goto out;
2075}
2076
2077/**
2078 * get_num_afu_ports() - determines and configures the number of AFU ports
2079 * @cfg:        Internal structure associated with the host.
2080 *
2081 * This routine determines the number of AFU ports by converting the global
2082 * port selection mask. The converted value is only valid following an AFU
2083 * reset (explicit or power-on). This routine must be invoked shortly after
2084 * mapping as other routines are dependent on the number of ports during the
2085 * initialization sequence.
2086 *
2087 * To support legacy AFUs that might not have reflected an initial global
2088 * port mask (value read is 0), default to the number of ports originally
2089 * supported by the cxlflash driver (2) before hardware with other port
2090 * offerings was introduced.
2091 */
2092static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2093{
2094        struct afu *afu = cfg->afu;
2095        struct device *dev = &cfg->dev->dev;
2096        u64 port_mask;
2097        int num_fc_ports = LEGACY_FC_PORTS;
2098
2099        port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2100        if (port_mask != 0ULL)
2101                num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2102
2103        dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2104                __func__, port_mask, num_fc_ports);
2105
2106        cfg->num_fc_ports = num_fc_ports;
2107        cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2108}
2109
2110/**
2111 * init_afu() - setup as master context and start AFU
2112 * @cfg:        Internal structure associated with the host.
2113 *
2114 * This routine is a higher level of control for configuring the
2115 * AFU on probe and reset paths.
2116 *
2117 * Return: 0 on success, -errno on failure
2118 */
2119static int init_afu(struct cxlflash_cfg *cfg)
2120{
2121        u64 reg;
2122        int rc = 0;
2123        struct afu *afu = cfg->afu;
2124        struct device *dev = &cfg->dev->dev;
2125        struct hwq *hwq;
2126        int i;
2127
2128        cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2129
2130        mutex_init(&afu->sync_active);
2131        afu->num_hwqs = afu->desired_hwqs;
2132        for (i = 0; i < afu->num_hwqs; i++) {
2133                rc = init_mc(cfg, i);
2134                if (rc) {
2135                        dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2136                                __func__, rc, i);
2137                        goto err1;
2138                }
2139        }
2140
2141        /* Map the entire MMIO space of the AFU using the first context */
2142        hwq = get_hwq(afu, PRIMARY_HWQ);
2143        afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2144        if (!afu->afu_map) {
2145                dev_err(dev, "%s: psa_map failed\n", __func__);
2146                rc = -ENOMEM;
2147                goto err1;
2148        }
2149
2150        /* No byte reverse on reading afu_version or string will be backwards */
2151        reg = readq(&afu->afu_map->global.regs.afu_version);
2152        memcpy(afu->version, &reg, sizeof(reg));
2153        afu->interface_version =
2154            readq_be(&afu->afu_map->global.regs.interface_version);
2155        if ((afu->interface_version + 1) == 0) {
2156                dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2157                        "interface version %016llx\n", afu->version,
2158                       afu->interface_version);
2159                rc = -EINVAL;
2160                goto err1;
2161        }
2162
2163        if (afu_is_sq_cmd_mode(afu)) {
2164                afu->send_cmd = send_cmd_sq;
2165                afu->context_reset = context_reset_sq;
2166        } else {
2167                afu->send_cmd = send_cmd_ioarrin;
2168                afu->context_reset = context_reset_ioarrin;
2169        }
2170
2171        dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2172                afu->version, afu->interface_version);
2173
2174        get_num_afu_ports(cfg);
2175
2176        rc = start_afu(cfg);
2177        if (rc) {
2178                dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2179                goto err1;
2180        }
2181
2182        afu_err_intr_init(cfg->afu);
2183        for (i = 0; i < afu->num_hwqs; i++) {
2184                hwq = get_hwq(afu, i);
2185
2186                hwq->room = readq_be(&hwq->host_map->cmd_room);
2187        }
2188
2189        /* Restore the LUN mappings */
2190        cxlflash_restore_luntable(cfg);
2191out:
2192        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2193        return rc;
2194
2195err1:
2196        for (i = afu->num_hwqs - 1; i >= 0; i--) {
2197                term_intr(cfg, UNMAP_THREE, i);
2198                term_mc(cfg, i);
2199        }
2200        goto out;
2201}
2202
2203/**
2204 * afu_reset() - resets the AFU
2205 * @cfg:        Internal structure associated with the host.
2206 *
2207 * Return: 0 on success, -errno on failure
2208 */
2209static int afu_reset(struct cxlflash_cfg *cfg)
2210{
2211        struct device *dev = &cfg->dev->dev;
2212        int rc = 0;
2213
2214        /* Stop the context before the reset. Since the context is
2215         * no longer available restart it after the reset is complete
2216         */
2217        term_afu(cfg);
2218
2219        rc = init_afu(cfg);
2220
2221        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2222        return rc;
2223}
2224
2225/**
2226 * drain_ioctls() - wait until all currently executing ioctls have completed
2227 * @cfg:        Internal structure associated with the host.
2228 *
2229 * Obtain write access to read/write semaphore that wraps ioctl
2230 * handling to 'drain' ioctls currently executing.
2231 */
2232static void drain_ioctls(struct cxlflash_cfg *cfg)
2233{
2234        down_write(&cfg->ioctl_rwsem);
2235        up_write(&cfg->ioctl_rwsem);
2236}
2237
2238/**
2239 * cxlflash_async_reset_host() - asynchronous host reset handler
2240 * @data:       Private data provided while scheduling reset.
2241 * @cookie:     Cookie that can be used for checkpointing.
2242 */
2243static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2244{
2245        struct cxlflash_cfg *cfg = data;
2246        struct device *dev = &cfg->dev->dev;
2247        int rc = 0;
2248
2249        if (cfg->state != STATE_RESET) {
2250                dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2251                        __func__, cfg->state);
2252                goto out;
2253        }
2254
2255        drain_ioctls(cfg);
2256        cxlflash_mark_contexts_error(cfg);
2257        rc = afu_reset(cfg);
2258        if (rc)
2259                cfg->state = STATE_FAILTERM;
2260        else
2261                cfg->state = STATE_NORMAL;
2262        wake_up_all(&cfg->reset_waitq);
2263
2264out:
2265        scsi_unblock_requests(cfg->host);
2266}
2267
2268/**
2269 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2270 * @cfg:        Internal structure associated with the host.
2271 */
2272static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2273{
2274        struct device *dev = &cfg->dev->dev;
2275
2276        if (cfg->state != STATE_NORMAL) {
2277                dev_dbg(dev, "%s: Not performing reset state=%d\n",
2278                        __func__, cfg->state);
2279                return;
2280        }
2281
2282        cfg->state = STATE_RESET;
2283        scsi_block_requests(cfg->host);
2284        cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2285                                                 cfg);
2286}
2287
2288/**
2289 * send_afu_cmd() - builds and sends an internal AFU command
2290 * @afu:        AFU associated with the host.
2291 * @rcb:        Pre-populated IOARCB describing command to send.
2292 *
2293 * The AFU can only take one internal AFU command at a time. This limitation is
2294 * enforced by using a mutex to provide exclusive access to the AFU during the
2295 * operation. This design point requires calling threads to not be on interrupt
2296 * context due to the possibility of sleeping during concurrent AFU operations.
2297 *
2298 * The command status is optionally passed back to the caller when the caller
2299 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2300 *
2301 * Return:
2302 *      0 on success, -errno on failure
2303 */
2304static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2305{
2306        struct cxlflash_cfg *cfg = afu->parent;
2307        struct device *dev = &cfg->dev->dev;
2308        struct afu_cmd *cmd = NULL;
2309        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2310        ulong lock_flags;
2311        char *buf = NULL;
2312        int rc = 0;
2313        int nretry = 0;
2314
2315        if (cfg->state != STATE_NORMAL) {
2316                dev_dbg(dev, "%s: Sync not required state=%u\n",
2317                        __func__, cfg->state);
2318                return 0;
2319        }
2320
2321        mutex_lock(&afu->sync_active);
2322        atomic_inc(&afu->cmds_active);
2323        buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2324        if (unlikely(!buf)) {
2325                dev_err(dev, "%s: no memory for command\n", __func__);
2326                rc = -ENOMEM;
2327                goto out;
2328        }
2329
2330        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2331
2332retry:
2333        memset(cmd, 0, sizeof(*cmd));
2334        memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2335        INIT_LIST_HEAD(&cmd->queue);
2336        init_completion(&cmd->cevent);
2337        cmd->parent = afu;
2338        cmd->hwq_index = hwq->index;
2339        cmd->rcb.ctx_id = hwq->ctx_hndl;
2340
2341        dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2342                __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2343
2344        rc = afu->send_cmd(afu, cmd);
2345        if (unlikely(rc)) {
2346                rc = -ENOBUFS;
2347                goto out;
2348        }
2349
2350        rc = wait_resp(afu, cmd);
2351        switch (rc) {
2352        case -ETIMEDOUT:
2353                rc = afu->context_reset(hwq);
2354                if (rc) {
2355                        /* Delete the command from pending_cmds list */
2356                        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2357                        list_del(&cmd->list);
2358                        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2359
2360                        cxlflash_schedule_async_reset(cfg);
2361                        break;
2362                }
2363                /* fall through - to retry */
2364        case -EAGAIN:
2365                if (++nretry < 2)
2366                        goto retry;
2367                /* fall through - to exit */
2368        default:
2369                break;
2370        }
2371
2372        if (rcb->ioasa)
2373                *rcb->ioasa = cmd->sa;
2374out:
2375        atomic_dec(&afu->cmds_active);
2376        mutex_unlock(&afu->sync_active);
2377        kfree(buf);
2378        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2379        return rc;
2380}
2381
2382/**
2383 * cxlflash_afu_sync() - builds and sends an AFU sync command
2384 * @afu:        AFU associated with the host.
2385 * @ctx:        Identifies context requesting sync.
2386 * @res:        Identifies resource requesting sync.
2387 * @mode:       Type of sync to issue (lightweight, heavyweight, global).
2388 *
2389 * AFU sync operations are only necessary and allowed when the device is
2390 * operating normally. When not operating normally, sync requests can occur as
2391 * part of cleaning up resources associated with an adapter prior to removal.
2392 * In this scenario, these requests are simply ignored (safe due to the AFU
2393 * going away).
2394 *
2395 * Return:
2396 *      0 on success, -errno on failure
2397 */
2398int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2399{
2400        struct cxlflash_cfg *cfg = afu->parent;
2401        struct device *dev = &cfg->dev->dev;
2402        struct sisl_ioarcb rcb = { 0 };
2403
2404        dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2405                __func__, afu, ctx, res, mode);
2406
2407        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2408        rcb.msi = SISL_MSI_RRQ_UPDATED;
2409        rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2410
2411        rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2412        rcb.cdb[1] = mode;
2413        put_unaligned_be16(ctx, &rcb.cdb[2]);
2414        put_unaligned_be32(res, &rcb.cdb[4]);
2415
2416        return send_afu_cmd(afu, &rcb);
2417}
2418
2419/**
2420 * cxlflash_eh_abort_handler() - abort a SCSI command
2421 * @scp:        SCSI command to abort.
2422 *
2423 * CXL Flash devices do not support a single command abort. Reset the context
2424 * as per SISLite specification. Flush any pending commands in the hardware
2425 * queue before the reset.
2426 *
2427 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2428 */
2429static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2430{
2431        int rc = FAILED;
2432        struct Scsi_Host *host = scp->device->host;
2433        struct cxlflash_cfg *cfg = shost_priv(host);
2434        struct afu_cmd *cmd = sc_to_afuc(scp);
2435        struct device *dev = &cfg->dev->dev;
2436        struct afu *afu = cfg->afu;
2437        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2438
2439        dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2440                "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2441                scp->device->channel, scp->device->id, scp->device->lun,
2442                get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2443                get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2444                get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2445                get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2446
2447        /* When the state is not normal, another reset/reload is in progress.
2448         * Return failed and the mid-layer will invoke host reset handler.
2449         */
2450        if (cfg->state != STATE_NORMAL) {
2451                dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2452                        __func__, cfg->state);
2453                goto out;
2454        }
2455
2456        rc = afu->context_reset(hwq);
2457        if (unlikely(rc))
2458                goto out;
2459
2460        rc = SUCCESS;
2461
2462out:
2463        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2464        return rc;
2465}
2466
2467/**
2468 * cxlflash_eh_device_reset_handler() - reset a single LUN
2469 * @scp:        SCSI command to send.
2470 *
2471 * Return:
2472 *      SUCCESS as defined in scsi/scsi.h
2473 *      FAILED as defined in scsi/scsi.h
2474 */
2475static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2476{
2477        int rc = SUCCESS;
2478        struct scsi_device *sdev = scp->device;
2479        struct Scsi_Host *host = sdev->host;
2480        struct cxlflash_cfg *cfg = shost_priv(host);
2481        struct device *dev = &cfg->dev->dev;
2482        int rcr = 0;
2483
2484        dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2485                host->host_no, sdev->channel, sdev->id, sdev->lun);
2486retry:
2487        switch (cfg->state) {
2488        case STATE_NORMAL:
2489                rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2490                if (unlikely(rcr))
2491                        rc = FAILED;
2492                break;
2493        case STATE_RESET:
2494                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2495                goto retry;
2496        default:
2497                rc = FAILED;
2498                break;
2499        }
2500
2501        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2502        return rc;
2503}
2504
2505/**
2506 * cxlflash_eh_host_reset_handler() - reset the host adapter
2507 * @scp:        SCSI command from stack identifying host.
2508 *
2509 * Following a reset, the state is evaluated again in case an EEH occurred
2510 * during the reset. In such a scenario, the host reset will either yield
2511 * until the EEH recovery is complete or return success or failure based
2512 * upon the current device state.
2513 *
2514 * Return:
2515 *      SUCCESS as defined in scsi/scsi.h
2516 *      FAILED as defined in scsi/scsi.h
2517 */
2518static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2519{
2520        int rc = SUCCESS;
2521        int rcr = 0;
2522        struct Scsi_Host *host = scp->device->host;
2523        struct cxlflash_cfg *cfg = shost_priv(host);
2524        struct device *dev = &cfg->dev->dev;
2525
2526        dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2527
2528        switch (cfg->state) {
2529        case STATE_NORMAL:
2530                cfg->state = STATE_RESET;
2531                drain_ioctls(cfg);
2532                cxlflash_mark_contexts_error(cfg);
2533                rcr = afu_reset(cfg);
2534                if (rcr) {
2535                        rc = FAILED;
2536                        cfg->state = STATE_FAILTERM;
2537                } else
2538                        cfg->state = STATE_NORMAL;
2539                wake_up_all(&cfg->reset_waitq);
2540                ssleep(1);
2541                /* fall through */
2542        case STATE_RESET:
2543                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2544                if (cfg->state == STATE_NORMAL)
2545                        break;
2546                /* fall through */
2547        default:
2548                rc = FAILED;
2549                break;
2550        }
2551
2552        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2553        return rc;
2554}
2555
2556/**
2557 * cxlflash_change_queue_depth() - change the queue depth for the device
2558 * @sdev:       SCSI device destined for queue depth change.
2559 * @qdepth:     Requested queue depth value to set.
2560 *
2561 * The requested queue depth is capped to the maximum supported value.
2562 *
2563 * Return: The actual queue depth set.
2564 */
2565static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2566{
2567
2568        if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2569                qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2570
2571        scsi_change_queue_depth(sdev, qdepth);
2572        return sdev->queue_depth;
2573}
2574
2575/**
2576 * cxlflash_show_port_status() - queries and presents the current port status
2577 * @port:       Desired port for status reporting.
2578 * @cfg:        Internal structure associated with the host.
2579 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2580 *
2581 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2582 */
2583static ssize_t cxlflash_show_port_status(u32 port,
2584                                         struct cxlflash_cfg *cfg,
2585                                         char *buf)
2586{
2587        struct device *dev = &cfg->dev->dev;
2588        char *disp_status;
2589        u64 status;
2590        __be64 __iomem *fc_port_regs;
2591
2592        WARN_ON(port >= MAX_FC_PORTS);
2593
2594        if (port >= cfg->num_fc_ports) {
2595                dev_info(dev, "%s: Port %d not supported on this card.\n",
2596                        __func__, port);
2597                return -EINVAL;
2598        }
2599
2600        fc_port_regs = get_fc_port_regs(cfg, port);
2601        status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2602        status &= FC_MTIP_STATUS_MASK;
2603
2604        if (status == FC_MTIP_STATUS_ONLINE)
2605                disp_status = "online";
2606        else if (status == FC_MTIP_STATUS_OFFLINE)
2607                disp_status = "offline";
2608        else
2609                disp_status = "unknown";
2610
2611        return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2612}
2613
2614/**
2615 * port0_show() - queries and presents the current status of port 0
2616 * @dev:        Generic device associated with the host owning the port.
2617 * @attr:       Device attribute representing the port.
2618 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2619 *
2620 * Return: The size of the ASCII string returned in @buf.
2621 */
2622static ssize_t port0_show(struct device *dev,
2623                          struct device_attribute *attr,
2624                          char *buf)
2625{
2626        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2627
2628        return cxlflash_show_port_status(0, cfg, buf);
2629}
2630
2631/**
2632 * port1_show() - queries and presents the current status of port 1
2633 * @dev:        Generic device associated with the host owning the port.
2634 * @attr:       Device attribute representing the port.
2635 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2636 *
2637 * Return: The size of the ASCII string returned in @buf.
2638 */
2639static ssize_t port1_show(struct device *dev,
2640                          struct device_attribute *attr,
2641                          char *buf)
2642{
2643        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2644
2645        return cxlflash_show_port_status(1, cfg, buf);
2646}
2647
2648/**
2649 * port2_show() - queries and presents the current status of port 2
2650 * @dev:        Generic device associated with the host owning the port.
2651 * @attr:       Device attribute representing the port.
2652 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2653 *
2654 * Return: The size of the ASCII string returned in @buf.
2655 */
2656static ssize_t port2_show(struct device *dev,
2657                          struct device_attribute *attr,
2658                          char *buf)
2659{
2660        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2661
2662        return cxlflash_show_port_status(2, cfg, buf);
2663}
2664
2665/**
2666 * port3_show() - queries and presents the current status of port 3
2667 * @dev:        Generic device associated with the host owning the port.
2668 * @attr:       Device attribute representing the port.
2669 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2670 *
2671 * Return: The size of the ASCII string returned in @buf.
2672 */
2673static ssize_t port3_show(struct device *dev,
2674                          struct device_attribute *attr,
2675                          char *buf)
2676{
2677        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2678
2679        return cxlflash_show_port_status(3, cfg, buf);
2680}
2681
2682/**
2683 * lun_mode_show() - presents the current LUN mode of the host
2684 * @dev:        Generic device associated with the host.
2685 * @attr:       Device attribute representing the LUN mode.
2686 * @buf:        Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2687 *
2688 * Return: The size of the ASCII string returned in @buf.
2689 */
2690static ssize_t lun_mode_show(struct device *dev,
2691                             struct device_attribute *attr, char *buf)
2692{
2693        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2694        struct afu *afu = cfg->afu;
2695
2696        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2697}
2698
2699/**
2700 * lun_mode_store() - sets the LUN mode of the host
2701 * @dev:        Generic device associated with the host.
2702 * @attr:       Device attribute representing the LUN mode.
2703 * @buf:        Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2704 * @count:      Length of data resizing in @buf.
2705 *
2706 * The CXL Flash AFU supports a dummy LUN mode where the external
2707 * links and storage are not required. Space on the FPGA is used
2708 * to create 1 or 2 small LUNs which are presented to the system
2709 * as if they were a normal storage device. This feature is useful
2710 * during development and also provides manufacturing with a way
2711 * to test the AFU without an actual device.
2712 *
2713 * 0 = external LUN[s] (default)
2714 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2715 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2716 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2717 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2718 *
2719 * Return: The size of the ASCII string returned in @buf.
2720 */
2721static ssize_t lun_mode_store(struct device *dev,
2722                              struct device_attribute *attr,
2723                              const char *buf, size_t count)
2724{
2725        struct Scsi_Host *shost = class_to_shost(dev);
2726        struct cxlflash_cfg *cfg = shost_priv(shost);
2727        struct afu *afu = cfg->afu;
2728        int rc;
2729        u32 lun_mode;
2730
2731        rc = kstrtouint(buf, 10, &lun_mode);
2732        if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2733                afu->internal_lun = lun_mode;
2734
2735                /*
2736                 * When configured for internal LUN, there is only one channel,
2737                 * channel number 0, else there will be one less than the number
2738                 * of fc ports for this card.
2739                 */
2740                if (afu->internal_lun)
2741                        shost->max_channel = 0;
2742                else
2743                        shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2744
2745                afu_reset(cfg);
2746                scsi_scan_host(cfg->host);
2747        }
2748
2749        return count;
2750}
2751
2752/**
2753 * ioctl_version_show() - presents the current ioctl version of the host
2754 * @dev:        Generic device associated with the host.
2755 * @attr:       Device attribute representing the ioctl version.
2756 * @buf:        Buffer of length PAGE_SIZE to report back the ioctl version.
2757 *
2758 * Return: The size of the ASCII string returned in @buf.
2759 */
2760static ssize_t ioctl_version_show(struct device *dev,
2761                                  struct device_attribute *attr, char *buf)
2762{
2763        ssize_t bytes = 0;
2764
2765        bytes = scnprintf(buf, PAGE_SIZE,
2766                          "disk: %u\n", DK_CXLFLASH_VERSION_0);
2767        bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2768                           "host: %u\n", HT_CXLFLASH_VERSION_0);
2769
2770        return bytes;
2771}
2772
2773/**
2774 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2775 * @port:       Desired port for status reporting.
2776 * @cfg:        Internal structure associated with the host.
2777 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2778 *
2779 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2780 */
2781static ssize_t cxlflash_show_port_lun_table(u32 port,
2782                                            struct cxlflash_cfg *cfg,
2783                                            char *buf)
2784{
2785        struct device *dev = &cfg->dev->dev;
2786        __be64 __iomem *fc_port_luns;
2787        int i;
2788        ssize_t bytes = 0;
2789
2790        WARN_ON(port >= MAX_FC_PORTS);
2791
2792        if (port >= cfg->num_fc_ports) {
2793                dev_info(dev, "%s: Port %d not supported on this card.\n",
2794                        __func__, port);
2795                return -EINVAL;
2796        }
2797
2798        fc_port_luns = get_fc_port_luns(cfg, port);
2799
2800        for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2801                bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2802                                   "%03d: %016llx\n",
2803                                   i, readq_be(&fc_port_luns[i]));
2804        return bytes;
2805}
2806
2807/**
2808 * port0_lun_table_show() - presents the current LUN table of port 0
2809 * @dev:        Generic device associated with the host owning the port.
2810 * @attr:       Device attribute representing the port.
2811 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2812 *
2813 * Return: The size of the ASCII string returned in @buf.
2814 */
2815static ssize_t port0_lun_table_show(struct device *dev,
2816                                    struct device_attribute *attr,
2817                                    char *buf)
2818{
2819        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2820
2821        return cxlflash_show_port_lun_table(0, cfg, buf);
2822}
2823
2824/**
2825 * port1_lun_table_show() - presents the current LUN table of port 1
2826 * @dev:        Generic device associated with the host owning the port.
2827 * @attr:       Device attribute representing the port.
2828 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2829 *
2830 * Return: The size of the ASCII string returned in @buf.
2831 */
2832static ssize_t port1_lun_table_show(struct device *dev,
2833                                    struct device_attribute *attr,
2834                                    char *buf)
2835{
2836        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2837
2838        return cxlflash_show_port_lun_table(1, cfg, buf);
2839}
2840
2841/**
2842 * port2_lun_table_show() - presents the current LUN table of port 2
2843 * @dev:        Generic device associated with the host owning the port.
2844 * @attr:       Device attribute representing the port.
2845 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2846 *
2847 * Return: The size of the ASCII string returned in @buf.
2848 */
2849static ssize_t port2_lun_table_show(struct device *dev,
2850                                    struct device_attribute *attr,
2851                                    char *buf)
2852{
2853        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2854
2855        return cxlflash_show_port_lun_table(2, cfg, buf);
2856}
2857
2858/**
2859 * port3_lun_table_show() - presents the current LUN table of port 3
2860 * @dev:        Generic device associated with the host owning the port.
2861 * @attr:       Device attribute representing the port.
2862 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2863 *
2864 * Return: The size of the ASCII string returned in @buf.
2865 */
2866static ssize_t port3_lun_table_show(struct device *dev,
2867                                    struct device_attribute *attr,
2868                                    char *buf)
2869{
2870        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2871
2872        return cxlflash_show_port_lun_table(3, cfg, buf);
2873}
2874
2875/**
2876 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2877 * @dev:        Generic device associated with the host.
2878 * @attr:       Device attribute representing the IRQ poll weight.
2879 * @buf:        Buffer of length PAGE_SIZE to report back the current IRQ poll
2880 *              weight in ASCII.
2881 *
2882 * An IRQ poll weight of 0 indicates polling is disabled.
2883 *
2884 * Return: The size of the ASCII string returned in @buf.
2885 */
2886static ssize_t irqpoll_weight_show(struct device *dev,
2887                                   struct device_attribute *attr, char *buf)
2888{
2889        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2890        struct afu *afu = cfg->afu;
2891
2892        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2893}
2894
2895/**
2896 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2897 * @dev:        Generic device associated with the host.
2898 * @attr:       Device attribute representing the IRQ poll weight.
2899 * @buf:        Buffer of length PAGE_SIZE containing the desired IRQ poll
2900 *              weight in ASCII.
2901 * @count:      Length of data resizing in @buf.
2902 *
2903 * An IRQ poll weight of 0 indicates polling is disabled.
2904 *
2905 * Return: The size of the ASCII string returned in @buf.
2906 */
2907static ssize_t irqpoll_weight_store(struct device *dev,
2908                                    struct device_attribute *attr,
2909                                    const char *buf, size_t count)
2910{
2911        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2912        struct device *cfgdev = &cfg->dev->dev;
2913        struct afu *afu = cfg->afu;
2914        struct hwq *hwq;
2915        u32 weight;
2916        int rc, i;
2917
2918        rc = kstrtouint(buf, 10, &weight);
2919        if (rc)
2920                return -EINVAL;
2921
2922        if (weight > 256) {
2923                dev_info(cfgdev,
2924                         "Invalid IRQ poll weight. It must be 256 or less.\n");
2925                return -EINVAL;
2926        }
2927
2928        if (weight == afu->irqpoll_weight) {
2929                dev_info(cfgdev,
2930                         "Current IRQ poll weight has the same weight.\n");
2931                return -EINVAL;
2932        }
2933
2934        if (afu_is_irqpoll_enabled(afu)) {
2935                for (i = 0; i < afu->num_hwqs; i++) {
2936                        hwq = get_hwq(afu, i);
2937
2938                        irq_poll_disable(&hwq->irqpoll);
2939                }
2940        }
2941
2942        afu->irqpoll_weight = weight;
2943
2944        if (weight > 0) {
2945                for (i = 0; i < afu->num_hwqs; i++) {
2946                        hwq = get_hwq(afu, i);
2947
2948                        irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2949                }
2950        }
2951
2952        return count;
2953}
2954
2955/**
2956 * num_hwqs_show() - presents the number of hardware queues for the host
2957 * @dev:        Generic device associated with the host.
2958 * @attr:       Device attribute representing the number of hardware queues.
2959 * @buf:        Buffer of length PAGE_SIZE to report back the number of hardware
2960 *              queues in ASCII.
2961 *
2962 * Return: The size of the ASCII string returned in @buf.
2963 */
2964static ssize_t num_hwqs_show(struct device *dev,
2965                             struct device_attribute *attr, char *buf)
2966{
2967        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2968        struct afu *afu = cfg->afu;
2969
2970        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2971}
2972
2973/**
2974 * num_hwqs_store() - sets the number of hardware queues for the host
2975 * @dev:        Generic device associated with the host.
2976 * @attr:       Device attribute representing the number of hardware queues.
2977 * @buf:        Buffer of length PAGE_SIZE containing the number of hardware
2978 *              queues in ASCII.
2979 * @count:      Length of data resizing in @buf.
2980 *
2981 * n > 0: num_hwqs = n
2982 * n = 0: num_hwqs = num_online_cpus()
2983 * n < 0: num_online_cpus() / abs(n)
2984 *
2985 * Return: The size of the ASCII string returned in @buf.
2986 */
2987static ssize_t num_hwqs_store(struct device *dev,
2988                              struct device_attribute *attr,
2989                              const char *buf, size_t count)
2990{
2991        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2992        struct afu *afu = cfg->afu;
2993        int rc;
2994        int nhwqs, num_hwqs;
2995
2996        rc = kstrtoint(buf, 10, &nhwqs);
2997        if (rc)
2998                return -EINVAL;
2999
3000        if (nhwqs >= 1)
3001                num_hwqs = nhwqs;
3002        else if (nhwqs == 0)
3003                num_hwqs = num_online_cpus();
3004        else
3005                num_hwqs = num_online_cpus() / abs(nhwqs);
3006
3007        afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3008        WARN_ON_ONCE(afu->desired_hwqs == 0);
3009
3010retry:
3011        switch (cfg->state) {
3012        case STATE_NORMAL:
3013                cfg->state = STATE_RESET;
3014                drain_ioctls(cfg);
3015                cxlflash_mark_contexts_error(cfg);
3016                rc = afu_reset(cfg);
3017                if (rc)
3018                        cfg->state = STATE_FAILTERM;
3019                else
3020                        cfg->state = STATE_NORMAL;
3021                wake_up_all(&cfg->reset_waitq);
3022                break;
3023        case STATE_RESET:
3024                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3025                if (cfg->state == STATE_NORMAL)
3026                        goto retry;
3027                /* else, fall through */
3028        default:
3029                /* Ideally should not happen */
3030                dev_err(dev, "%s: Device is not ready, state=%d\n",
3031                        __func__, cfg->state);
3032                break;
3033        }
3034
3035        return count;
3036}
3037
3038static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3039
3040/**
3041 * hwq_mode_show() - presents the HWQ steering mode for the host
3042 * @dev:        Generic device associated with the host.
3043 * @attr:       Device attribute representing the HWQ steering mode.
3044 * @buf:        Buffer of length PAGE_SIZE to report back the HWQ steering mode
3045 *              as a character string.
3046 *
3047 * Return: The size of the ASCII string returned in @buf.
3048 */
3049static ssize_t hwq_mode_show(struct device *dev,
3050                             struct device_attribute *attr, char *buf)
3051{
3052        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3053        struct afu *afu = cfg->afu;
3054
3055        return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3056}
3057
3058/**
3059 * hwq_mode_store() - sets the HWQ steering mode for the host
3060 * @dev:        Generic device associated with the host.
3061 * @attr:       Device attribute representing the HWQ steering mode.
3062 * @buf:        Buffer of length PAGE_SIZE containing the HWQ steering mode
3063 *              as a character string.
3064 * @count:      Length of data resizing in @buf.
3065 *
3066 * rr = Round-Robin
3067 * tag = Block MQ Tagging
3068 * cpu = CPU Affinity
3069 *
3070 * Return: The size of the ASCII string returned in @buf.
3071 */
3072static ssize_t hwq_mode_store(struct device *dev,
3073                              struct device_attribute *attr,
3074                              const char *buf, size_t count)
3075{
3076        struct Scsi_Host *shost = class_to_shost(dev);
3077        struct cxlflash_cfg *cfg = shost_priv(shost);
3078        struct device *cfgdev = &cfg->dev->dev;
3079        struct afu *afu = cfg->afu;
3080        int i;
3081        u32 mode = MAX_HWQ_MODE;
3082
3083        for (i = 0; i < MAX_HWQ_MODE; i++) {
3084                if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3085                        mode = i;
3086                        break;
3087                }
3088        }
3089
3090        if (mode >= MAX_HWQ_MODE) {
3091                dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3092                return -EINVAL;
3093        }
3094
3095        afu->hwq_mode = mode;
3096
3097        return count;
3098}
3099
3100/**
3101 * mode_show() - presents the current mode of the device
3102 * @dev:        Generic device associated with the device.
3103 * @attr:       Device attribute representing the device mode.
3104 * @buf:        Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3105 *
3106 * Return: The size of the ASCII string returned in @buf.
3107 */
3108static ssize_t mode_show(struct device *dev,
3109                         struct device_attribute *attr, char *buf)
3110{
3111        struct scsi_device *sdev = to_scsi_device(dev);
3112
3113        return scnprintf(buf, PAGE_SIZE, "%s\n",
3114                         sdev->hostdata ? "superpipe" : "legacy");
3115}
3116
3117/*
3118 * Host attributes
3119 */
3120static DEVICE_ATTR_RO(port0);
3121static DEVICE_ATTR_RO(port1);
3122static DEVICE_ATTR_RO(port2);
3123static DEVICE_ATTR_RO(port3);
3124static DEVICE_ATTR_RW(lun_mode);
3125static DEVICE_ATTR_RO(ioctl_version);
3126static DEVICE_ATTR_RO(port0_lun_table);
3127static DEVICE_ATTR_RO(port1_lun_table);
3128static DEVICE_ATTR_RO(port2_lun_table);
3129static DEVICE_ATTR_RO(port3_lun_table);
3130static DEVICE_ATTR_RW(irqpoll_weight);
3131static DEVICE_ATTR_RW(num_hwqs);
3132static DEVICE_ATTR_RW(hwq_mode);
3133
3134static struct device_attribute *cxlflash_host_attrs[] = {
3135        &dev_attr_port0,
3136        &dev_attr_port1,
3137        &dev_attr_port2,
3138        &dev_attr_port3,
3139        &dev_attr_lun_mode,
3140        &dev_attr_ioctl_version,
3141        &dev_attr_port0_lun_table,
3142        &dev_attr_port1_lun_table,
3143        &dev_attr_port2_lun_table,
3144        &dev_attr_port3_lun_table,
3145        &dev_attr_irqpoll_weight,
3146        &dev_attr_num_hwqs,
3147        &dev_attr_hwq_mode,
3148        NULL
3149};
3150
3151/*
3152 * Device attributes
3153 */
3154static DEVICE_ATTR_RO(mode);
3155
3156static struct device_attribute *cxlflash_dev_attrs[] = {
3157        &dev_attr_mode,
3158        NULL
3159};
3160
3161/*
3162 * Host template
3163 */
3164static struct scsi_host_template driver_template = {
3165        .module = THIS_MODULE,
3166        .name = CXLFLASH_ADAPTER_NAME,
3167        .info = cxlflash_driver_info,
3168        .ioctl = cxlflash_ioctl,
3169        .proc_name = CXLFLASH_NAME,
3170        .queuecommand = cxlflash_queuecommand,
3171        .eh_abort_handler = cxlflash_eh_abort_handler,
3172        .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3173        .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3174        .change_queue_depth = cxlflash_change_queue_depth,
3175        .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3176        .can_queue = CXLFLASH_MAX_CMDS,
3177        .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3178        .this_id = -1,
3179        .sg_tablesize = 1,      /* No scatter gather support */
3180        .max_sectors = CXLFLASH_MAX_SECTORS,
3181        .shost_attrs = cxlflash_host_attrs,
3182        .sdev_attrs = cxlflash_dev_attrs,
3183};
3184
3185/*
3186 * Device dependent values
3187 */
3188static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3189                                        CXLFLASH_WWPN_VPD_REQUIRED };
3190static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3191                                        CXLFLASH_NOTIFY_SHUTDOWN };
3192static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3193                                        (CXLFLASH_NOTIFY_SHUTDOWN |
3194                                        CXLFLASH_OCXL_DEV) };
3195
3196/*
3197 * PCI device binding table
3198 */
3199static struct pci_device_id cxlflash_pci_table[] = {
3200        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3201         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3202        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3203         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3204        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3205         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3206        {}
3207};
3208
3209MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3210
3211/**
3212 * cxlflash_worker_thread() - work thread handler for the AFU
3213 * @work:       Work structure contained within cxlflash associated with host.
3214 *
3215 * Handles the following events:
3216 * - Link reset which cannot be performed on interrupt context due to
3217 * blocking up to a few seconds
3218 * - Rescan the host
3219 */
3220static void cxlflash_worker_thread(struct work_struct *work)
3221{
3222        struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3223                                                work_q);
3224        struct afu *afu = cfg->afu;
3225        struct device *dev = &cfg->dev->dev;
3226        __be64 __iomem *fc_port_regs;
3227        int port;
3228        ulong lock_flags;
3229
3230        /* Avoid MMIO if the device has failed */
3231
3232        if (cfg->state != STATE_NORMAL)
3233                return;
3234
3235        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3236
3237        if (cfg->lr_state == LINK_RESET_REQUIRED) {
3238                port = cfg->lr_port;
3239                if (port < 0)
3240                        dev_err(dev, "%s: invalid port index %d\n",
3241                                __func__, port);
3242                else {
3243                        spin_unlock_irqrestore(cfg->host->host_lock,
3244                                               lock_flags);
3245
3246                        /* The reset can block... */
3247                        fc_port_regs = get_fc_port_regs(cfg, port);
3248                        afu_link_reset(afu, port, fc_port_regs);
3249                        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3250                }
3251
3252                cfg->lr_state = LINK_RESET_COMPLETE;
3253        }
3254
3255        spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3256
3257        if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3258                scsi_scan_host(cfg->host);
3259}
3260
3261/**
3262 * cxlflash_chr_open() - character device open handler
3263 * @inode:      Device inode associated with this character device.
3264 * @file:       File pointer for this device.
3265 *
3266 * Only users with admin privileges are allowed to open the character device.
3267 *
3268 * Return: 0 on success, -errno on failure
3269 */
3270static int cxlflash_chr_open(struct inode *inode, struct file *file)
3271{
3272        struct cxlflash_cfg *cfg;
3273
3274        if (!capable(CAP_SYS_ADMIN))
3275                return -EACCES;
3276
3277        cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3278        file->private_data = cfg;
3279
3280        return 0;
3281}
3282
3283/**
3284 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3285 * @cmd:        The host ioctl command to decode.
3286 *
3287 * Return: A string identifying the decoded host ioctl.
3288 */
3289static char *decode_hioctl(unsigned int cmd)
3290{
3291        switch (cmd) {
3292        case HT_CXLFLASH_LUN_PROVISION:
3293                return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3294        }
3295
3296        return "UNKNOWN";
3297}
3298
3299/**
3300 * cxlflash_lun_provision() - host LUN provisioning handler
3301 * @cfg:        Internal structure associated with the host.
3302 * @arg:        Kernel copy of userspace ioctl data structure.
3303 *
3304 * Return: 0 on success, -errno on failure
3305 */
3306static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3307                                  struct ht_cxlflash_lun_provision *lunprov)
3308{
3309        struct afu *afu = cfg->afu;
3310        struct device *dev = &cfg->dev->dev;
3311        struct sisl_ioarcb rcb;
3312        struct sisl_ioasa asa;
3313        __be64 __iomem *fc_port_regs;
3314        u16 port = lunprov->port;
3315        u16 scmd = lunprov->hdr.subcmd;
3316        u16 type;
3317        u64 reg;
3318        u64 size;
3319        u64 lun_id;
3320        int rc = 0;
3321
3322        if (!afu_is_lun_provision(afu)) {
3323                rc = -ENOTSUPP;
3324                goto out;
3325        }
3326
3327        if (port >= cfg->num_fc_ports) {
3328                rc = -EINVAL;
3329                goto out;
3330        }
3331
3332        switch (scmd) {
3333        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3334                type = SISL_AFU_LUN_PROVISION_CREATE;
3335                size = lunprov->size;
3336                lun_id = 0;
3337                break;
3338        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3339                type = SISL_AFU_LUN_PROVISION_DELETE;
3340                size = 0;
3341                lun_id = lunprov->lun_id;
3342                break;
3343        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3344                fc_port_regs = get_fc_port_regs(cfg, port);
3345
3346                reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3347                lunprov->max_num_luns = reg;
3348                reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3349                lunprov->cur_num_luns = reg;
3350                reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3351                lunprov->max_cap_port = reg;
3352                reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3353                lunprov->cur_cap_port = reg;
3354
3355                goto out;
3356        default:
3357                rc = -EINVAL;
3358                goto out;
3359        }
3360
3361        memset(&rcb, 0, sizeof(rcb));
3362        memset(&asa, 0, sizeof(asa));
3363        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3364        rcb.lun_id = lun_id;
3365        rcb.msi = SISL_MSI_RRQ_UPDATED;
3366        rcb.timeout = MC_LUN_PROV_TIMEOUT;
3367        rcb.ioasa = &asa;
3368
3369        rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3370        rcb.cdb[1] = type;
3371        rcb.cdb[2] = port;
3372        put_unaligned_be64(size, &rcb.cdb[8]);
3373
3374        rc = send_afu_cmd(afu, &rcb);
3375        if (rc) {
3376                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3377                        __func__, rc, asa.ioasc, asa.afu_extra);
3378                goto out;
3379        }
3380
3381        if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3382                lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3383                memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3384        }
3385out:
3386        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3387        return rc;
3388}
3389
3390/**
3391 * cxlflash_afu_debug() - host AFU debug handler
3392 * @cfg:        Internal structure associated with the host.
3393 * @arg:        Kernel copy of userspace ioctl data structure.
3394 *
3395 * For debug requests requiring a data buffer, always provide an aligned
3396 * (cache line) buffer to the AFU to appease any alignment requirements.
3397 *
3398 * Return: 0 on success, -errno on failure
3399 */
3400static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3401                              struct ht_cxlflash_afu_debug *afu_dbg)
3402{
3403        struct afu *afu = cfg->afu;
3404        struct device *dev = &cfg->dev->dev;
3405        struct sisl_ioarcb rcb;
3406        struct sisl_ioasa asa;
3407        char *buf = NULL;
3408        char *kbuf = NULL;
3409        void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3410        u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3411        u32 ulen = afu_dbg->data_len;
3412        bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3413        int rc = 0;
3414
3415        if (!afu_is_afu_debug(afu)) {
3416                rc = -ENOTSUPP;
3417                goto out;
3418        }
3419
3420        if (ulen) {
3421                req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3422
3423                if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3424                        rc = -EINVAL;
3425                        goto out;
3426                }
3427
3428                buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3429                if (unlikely(!buf)) {
3430                        rc = -ENOMEM;
3431                        goto out;
3432                }
3433
3434                kbuf = PTR_ALIGN(buf, cache_line_size());
3435
3436                if (is_write) {
3437                        req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3438
3439                        if (copy_from_user(kbuf, ubuf, ulen)) {
3440                                rc = -EFAULT;
3441                                goto out;
3442                        }
3443                }
3444        }
3445
3446        memset(&rcb, 0, sizeof(rcb));
3447        memset(&asa, 0, sizeof(asa));
3448
3449        rcb.req_flags = req_flags;
3450        rcb.msi = SISL_MSI_RRQ_UPDATED;
3451        rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3452        rcb.ioasa = &asa;
3453
3454        if (ulen) {
3455                rcb.data_len = ulen;
3456                rcb.data_ea = (uintptr_t)kbuf;
3457        }
3458
3459        rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3460        memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3461               HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3462
3463        rc = send_afu_cmd(afu, &rcb);
3464        if (rc) {
3465                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3466                        __func__, rc, asa.ioasc, asa.afu_extra);
3467                goto out;
3468        }
3469
3470        if (ulen && !is_write) {
3471                if (copy_to_user(ubuf, kbuf, ulen))
3472                        rc = -EFAULT;
3473        }
3474out:
3475        kfree(buf);
3476        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3477        return rc;
3478}
3479
3480/**
3481 * cxlflash_chr_ioctl() - character device IOCTL handler
3482 * @file:       File pointer for this device.
3483 * @cmd:        IOCTL command.
3484 * @arg:        Userspace ioctl data structure.
3485 *
3486 * A read/write semaphore is used to implement a 'drain' of currently
3487 * running ioctls. The read semaphore is taken at the beginning of each
3488 * ioctl thread and released upon concluding execution. Additionally the
3489 * semaphore should be released and then reacquired in any ioctl execution
3490 * path which will wait for an event to occur that is outside the scope of
3491 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3492 * a thread simply needs to acquire the write semaphore.
3493 *
3494 * Return: 0 on success, -errno on failure
3495 */
3496static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3497                               unsigned long arg)
3498{
3499        typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3500
3501        struct cxlflash_cfg *cfg = file->private_data;
3502        struct device *dev = &cfg->dev->dev;
3503        char buf[sizeof(union cxlflash_ht_ioctls)];
3504        void __user *uarg = (void __user *)arg;
3505        struct ht_cxlflash_hdr *hdr;
3506        size_t size = 0;
3507        bool known_ioctl = false;
3508        int idx = 0;
3509        int rc = 0;
3510        hioctl do_ioctl = NULL;
3511
3512        static const struct {
3513                size_t size;
3514                hioctl ioctl;
3515        } ioctl_tbl[] = {       /* NOTE: order matters here */
3516        { sizeof(struct ht_cxlflash_lun_provision),
3517                (hioctl)cxlflash_lun_provision },
3518        { sizeof(struct ht_cxlflash_afu_debug),
3519                (hioctl)cxlflash_afu_debug },
3520        };
3521
3522        /* Hold read semaphore so we can drain if needed */
3523        down_read(&cfg->ioctl_rwsem);
3524
3525        dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3526                __func__, cmd, idx, sizeof(ioctl_tbl));
3527
3528        switch (cmd) {
3529        case HT_CXLFLASH_LUN_PROVISION:
3530        case HT_CXLFLASH_AFU_DEBUG:
3531                known_ioctl = true;
3532                idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3533                size = ioctl_tbl[idx].size;
3534                do_ioctl = ioctl_tbl[idx].ioctl;
3535
3536                if (likely(do_ioctl))
3537                        break;
3538
3539                /* fall through */
3540        default:
3541                rc = -EINVAL;
3542                goto out;
3543        }
3544
3545        if (unlikely(copy_from_user(&buf, uarg, size))) {
3546                dev_err(dev, "%s: copy_from_user() fail "
3547                        "size=%lu cmd=%d (%s) uarg=%p\n",
3548                        __func__, size, cmd, decode_hioctl(cmd), uarg);
3549                rc = -EFAULT;
3550                goto out;
3551        }
3552
3553        hdr = (struct ht_cxlflash_hdr *)&buf;
3554        if (hdr->version != HT_CXLFLASH_VERSION_0) {
3555                dev_dbg(dev, "%s: Version %u not supported for %s\n",
3556                        __func__, hdr->version, decode_hioctl(cmd));
3557                rc = -EINVAL;
3558                goto out;
3559        }
3560
3561        if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3562                dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3563                rc = -EINVAL;
3564                goto out;
3565        }
3566
3567        rc = do_ioctl(cfg, (void *)&buf);
3568        if (likely(!rc))
3569                if (unlikely(copy_to_user(uarg, &buf, size))) {
3570                        dev_err(dev, "%s: copy_to_user() fail "
3571                                "size=%lu cmd=%d (%s) uarg=%p\n",
3572                                __func__, size, cmd, decode_hioctl(cmd), uarg);
3573                        rc = -EFAULT;
3574                }
3575
3576        /* fall through to exit */
3577
3578out:
3579        up_read(&cfg->ioctl_rwsem);
3580        if (unlikely(rc && known_ioctl))
3581                dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3582                        __func__, decode_hioctl(cmd), cmd, rc);
3583        else
3584                dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3585                        __func__, decode_hioctl(cmd), cmd, rc);
3586        return rc;
3587}
3588
3589/*
3590 * Character device file operations
3591 */
3592static const struct file_operations cxlflash_chr_fops = {
3593        .owner          = THIS_MODULE,
3594        .open           = cxlflash_chr_open,
3595        .unlocked_ioctl = cxlflash_chr_ioctl,
3596        .compat_ioctl   = cxlflash_chr_ioctl,
3597};
3598
3599/**
3600 * init_chrdev() - initialize the character device for the host
3601 * @cfg:        Internal structure associated with the host.
3602 *
3603 * Return: 0 on success, -errno on failure
3604 */
3605static int init_chrdev(struct cxlflash_cfg *cfg)
3606{
3607        struct device *dev = &cfg->dev->dev;
3608        struct device *char_dev;
3609        dev_t devno;
3610        int minor;
3611        int rc = 0;
3612
3613        minor = cxlflash_get_minor();
3614        if (unlikely(minor < 0)) {
3615                dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3616                rc = -ENOSPC;
3617                goto out;
3618        }
3619
3620        devno = MKDEV(cxlflash_major, minor);
3621        cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3622
3623        rc = cdev_add(&cfg->cdev, devno, 1);
3624        if (rc) {
3625                dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3626                goto err1;
3627        }
3628
3629        char_dev = device_create(cxlflash_class, NULL, devno,
3630                                 NULL, "cxlflash%d", minor);
3631        if (IS_ERR(char_dev)) {
3632                rc = PTR_ERR(char_dev);
3633                dev_err(dev, "%s: device_create failed rc=%d\n",
3634                        __func__, rc);
3635                goto err2;
3636        }
3637
3638        cfg->chardev = char_dev;
3639out:
3640        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3641        return rc;
3642err2:
3643        cdev_del(&cfg->cdev);
3644err1:
3645        cxlflash_put_minor(minor);
3646        goto out;
3647}
3648
3649/**
3650 * cxlflash_probe() - PCI entry point to add host
3651 * @pdev:       PCI device associated with the host.
3652 * @dev_id:     PCI device id associated with device.
3653 *
3654 * The device will initially start out in a 'probing' state and
3655 * transition to the 'normal' state at the end of a successful
3656 * probe. Should an EEH event occur during probe, the notification
3657 * thread (error_detected()) will wait until the probe handler
3658 * is nearly complete. At that time, the device will be moved to
3659 * a 'probed' state and the EEH thread woken up to drive the slot
3660 * reset and recovery (device moves to 'normal' state). Meanwhile,
3661 * the probe will be allowed to exit successfully.
3662 *
3663 * Return: 0 on success, -errno on failure
3664 */
3665static int cxlflash_probe(struct pci_dev *pdev,
3666                          const struct pci_device_id *dev_id)
3667{
3668        struct Scsi_Host *host;
3669        struct cxlflash_cfg *cfg = NULL;
3670        struct device *dev = &pdev->dev;
3671        struct dev_dependent_vals *ddv;
3672        int rc = 0;
3673        int k;
3674
3675        dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3676                __func__, pdev->irq);
3677
3678        ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3679        driver_template.max_sectors = ddv->max_sectors;
3680
3681        host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3682        if (!host) {
3683                dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3684                rc = -ENOMEM;
3685                goto out;
3686        }
3687
3688        host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3689        host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3690        host->unique_id = host->host_no;
3691        host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3692
3693        cfg = shost_priv(host);
3694        cfg->state = STATE_PROBING;
3695        cfg->host = host;
3696        rc = alloc_mem(cfg);
3697        if (rc) {
3698                dev_err(dev, "%s: alloc_mem failed\n", __func__);
3699                rc = -ENOMEM;
3700                scsi_host_put(cfg->host);
3701                goto out;
3702        }
3703
3704        cfg->init_state = INIT_STATE_NONE;
3705        cfg->dev = pdev;
3706        cfg->cxl_fops = cxlflash_cxl_fops;
3707        cfg->ops = cxlflash_assign_ops(ddv);
3708        WARN_ON_ONCE(!cfg->ops);
3709
3710        /*
3711         * Promoted LUNs move to the top of the LUN table. The rest stay on
3712         * the bottom half. The bottom half grows from the end (index = 255),
3713         * whereas the top half grows from the beginning (index = 0).
3714         *
3715         * Initialize the last LUN index for all possible ports.
3716         */
3717        cfg->promote_lun_index = 0;
3718
3719        for (k = 0; k < MAX_FC_PORTS; k++)
3720                cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3721
3722        cfg->dev_id = (struct pci_device_id *)dev_id;
3723
3724        init_waitqueue_head(&cfg->tmf_waitq);
3725        init_waitqueue_head(&cfg->reset_waitq);
3726
3727        INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3728        cfg->lr_state = LINK_RESET_INVALID;
3729        cfg->lr_port = -1;
3730        spin_lock_init(&cfg->tmf_slock);
3731        mutex_init(&cfg->ctx_tbl_list_mutex);
3732        mutex_init(&cfg->ctx_recovery_mutex);
3733        init_rwsem(&cfg->ioctl_rwsem);
3734        INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3735        INIT_LIST_HEAD(&cfg->lluns);
3736
3737        pci_set_drvdata(pdev, cfg);
3738
3739        rc = init_pci(cfg);
3740        if (rc) {
3741                dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3742                goto out_remove;
3743        }
3744        cfg->init_state = INIT_STATE_PCI;
3745
3746        cfg->afu_cookie = cfg->ops->create_afu(pdev);
3747        if (unlikely(!cfg->afu_cookie)) {
3748                dev_err(dev, "%s: create_afu failed\n", __func__);
3749                goto out_remove;
3750        }
3751
3752        rc = init_afu(cfg);
3753        if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3754                dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3755                goto out_remove;
3756        }
3757        cfg->init_state = INIT_STATE_AFU;
3758
3759        rc = init_scsi(cfg);
3760        if (rc) {
3761                dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3762                goto out_remove;
3763        }
3764        cfg->init_state = INIT_STATE_SCSI;
3765
3766        rc = init_chrdev(cfg);
3767        if (rc) {
3768                dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3769                goto out_remove;
3770        }
3771        cfg->init_state = INIT_STATE_CDEV;
3772
3773        if (wq_has_sleeper(&cfg->reset_waitq)) {
3774                cfg->state = STATE_PROBED;
3775                wake_up_all(&cfg->reset_waitq);
3776        } else
3777                cfg->state = STATE_NORMAL;
3778out:
3779        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3780        return rc;
3781
3782out_remove:
3783        cfg->state = STATE_PROBED;
3784        cxlflash_remove(pdev);
3785        goto out;
3786}
3787
3788/**
3789 * cxlflash_pci_error_detected() - called when a PCI error is detected
3790 * @pdev:       PCI device struct.
3791 * @state:      PCI channel state.
3792 *
3793 * When an EEH occurs during an active reset, wait until the reset is
3794 * complete and then take action based upon the device state.
3795 *
3796 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3797 */
3798static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3799                                                    pci_channel_state_t state)
3800{
3801        int rc = 0;
3802        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3803        struct device *dev = &cfg->dev->dev;
3804
3805        dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3806
3807        switch (state) {
3808        case pci_channel_io_frozen:
3809                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3810                                             cfg->state != STATE_PROBING);
3811                if (cfg->state == STATE_FAILTERM)
3812                        return PCI_ERS_RESULT_DISCONNECT;
3813
3814                cfg->state = STATE_RESET;
3815                scsi_block_requests(cfg->host);
3816                drain_ioctls(cfg);
3817                rc = cxlflash_mark_contexts_error(cfg);
3818                if (unlikely(rc))
3819                        dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3820                                __func__, rc);
3821                term_afu(cfg);
3822                return PCI_ERS_RESULT_NEED_RESET;
3823        case pci_channel_io_perm_failure:
3824                cfg->state = STATE_FAILTERM;
3825                wake_up_all(&cfg->reset_waitq);
3826                scsi_unblock_requests(cfg->host);
3827                return PCI_ERS_RESULT_DISCONNECT;
3828        default:
3829                break;
3830        }
3831        return PCI_ERS_RESULT_NEED_RESET;
3832}
3833
3834/**
3835 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3836 * @pdev:       PCI device struct.
3837 *
3838 * This routine is called by the pci error recovery code after the PCI
3839 * slot has been reset, just before we should resume normal operations.
3840 *
3841 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3842 */
3843static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3844{
3845        int rc = 0;
3846        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3847        struct device *dev = &cfg->dev->dev;
3848
3849        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3850
3851        rc = init_afu(cfg);
3852        if (unlikely(rc)) {
3853                dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3854                return PCI_ERS_RESULT_DISCONNECT;
3855        }
3856
3857        return PCI_ERS_RESULT_RECOVERED;
3858}
3859
3860/**
3861 * cxlflash_pci_resume() - called when normal operation can resume
3862 * @pdev:       PCI device struct
3863 */
3864static void cxlflash_pci_resume(struct pci_dev *pdev)
3865{
3866        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3867        struct device *dev = &cfg->dev->dev;
3868
3869        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3870
3871        cfg->state = STATE_NORMAL;
3872        wake_up_all(&cfg->reset_waitq);
3873        scsi_unblock_requests(cfg->host);
3874}
3875
3876/**
3877 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3878 * @dev:        Character device.
3879 * @mode:       Mode that can be used to verify access.
3880 *
3881 * Return: Allocated string describing the devtmpfs structure.
3882 */
3883static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3884{
3885        return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3886}
3887
3888/**
3889 * cxlflash_class_init() - create character device class
3890 *
3891 * Return: 0 on success, -errno on failure
3892 */
3893static int cxlflash_class_init(void)
3894{
3895        dev_t devno;
3896        int rc = 0;
3897
3898        rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3899        if (unlikely(rc)) {
3900                pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3901                goto out;
3902        }
3903
3904        cxlflash_major = MAJOR(devno);
3905
3906        cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3907        if (IS_ERR(cxlflash_class)) {
3908                rc = PTR_ERR(cxlflash_class);
3909                pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3910                goto err;
3911        }
3912
3913        cxlflash_class->devnode = cxlflash_devnode;
3914out:
3915        pr_debug("%s: returning rc=%d\n", __func__, rc);
3916        return rc;
3917err:
3918        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3919        goto out;
3920}
3921
3922/**
3923 * cxlflash_class_exit() - destroy character device class
3924 */
3925static void cxlflash_class_exit(void)
3926{
3927        dev_t devno = MKDEV(cxlflash_major, 0);
3928
3929        class_destroy(cxlflash_class);
3930        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3931}
3932
3933static const struct pci_error_handlers cxlflash_err_handler = {
3934        .error_detected = cxlflash_pci_error_detected,
3935        .slot_reset = cxlflash_pci_slot_reset,
3936        .resume = cxlflash_pci_resume,
3937};
3938
3939/*
3940 * PCI device structure
3941 */
3942static struct pci_driver cxlflash_driver = {
3943        .name = CXLFLASH_NAME,
3944        .id_table = cxlflash_pci_table,
3945        .probe = cxlflash_probe,
3946        .remove = cxlflash_remove,
3947        .shutdown = cxlflash_remove,
3948        .err_handler = &cxlflash_err_handler,
3949};
3950
3951/**
3952 * init_cxlflash() - module entry point
3953 *
3954 * Return: 0 on success, -errno on failure
3955 */
3956static int __init init_cxlflash(void)
3957{
3958        int rc;
3959
3960        check_sizes();
3961        cxlflash_list_init();
3962        rc = cxlflash_class_init();
3963        if (unlikely(rc))
3964                goto out;
3965
3966        rc = pci_register_driver(&cxlflash_driver);
3967        if (unlikely(rc))
3968                goto err;
3969out:
3970        pr_debug("%s: returning rc=%d\n", __func__, rc);
3971        return rc;
3972err:
3973        cxlflash_class_exit();
3974        goto out;
3975}
3976
3977/**
3978 * exit_cxlflash() - module exit point
3979 */
3980static void __exit exit_cxlflash(void)
3981{
3982        cxlflash_term_global_luns();
3983        cxlflash_free_errpage();
3984
3985        pci_unregister_driver(&cxlflash_driver);
3986        cxlflash_class_exit();
3987}
3988
3989module_init(init_cxlflash);
3990module_exit(exit_cxlflash);
3991