linux/drivers/scsi/cxlflash/main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * CXL Flash Device Driver
   4 *
   5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
   6 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
   7 *
   8 * Copyright (C) 2015 IBM Corporation
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/list.h>
  13#include <linux/module.h>
  14#include <linux/pci.h>
  15
  16#include <asm/unaligned.h>
  17
  18#include <scsi/scsi_cmnd.h>
  19#include <scsi/scsi_host.h>
  20#include <uapi/scsi/cxlflash_ioctl.h>
  21
  22#include "main.h"
  23#include "sislite.h"
  24#include "common.h"
  25
  26MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
  27MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
  28MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
  29MODULE_LICENSE("GPL");
  30
  31static struct class *cxlflash_class;
  32static u32 cxlflash_major;
  33static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
  34
  35/**
  36 * process_cmd_err() - command error handler
  37 * @cmd:        AFU command that experienced the error.
  38 * @scp:        SCSI command associated with the AFU command in error.
  39 *
  40 * Translates error bits from AFU command to SCSI command results.
  41 */
  42static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  43{
  44        struct afu *afu = cmd->parent;
  45        struct cxlflash_cfg *cfg = afu->parent;
  46        struct device *dev = &cfg->dev->dev;
  47        struct sisl_ioarcb *ioarcb;
  48        struct sisl_ioasa *ioasa;
  49        u32 resid;
  50
  51        if (unlikely(!cmd))
  52                return;
  53
  54        ioarcb = &(cmd->rcb);
  55        ioasa = &(cmd->sa);
  56
  57        if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
  58                resid = ioasa->resid;
  59                scsi_set_resid(scp, resid);
  60                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
  61                        __func__, cmd, scp, resid);
  62        }
  63
  64        if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
  65                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
  66                        __func__, cmd, scp);
  67                scp->result = (DID_ERROR << 16);
  68        }
  69
  70        dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
  71                "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
  72                ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
  73                ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
  74
  75        if (ioasa->rc.scsi_rc) {
  76                /* We have a SCSI status */
  77                if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
  78                        memcpy(scp->sense_buffer, ioasa->sense_data,
  79                               SISL_SENSE_DATA_LEN);
  80                        scp->result = ioasa->rc.scsi_rc;
  81                } else
  82                        scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
  83        }
  84
  85        /*
  86         * We encountered an error. Set scp->result based on nature
  87         * of error.
  88         */
  89        if (ioasa->rc.fc_rc) {
  90                /* We have an FC status */
  91                switch (ioasa->rc.fc_rc) {
  92                case SISL_FC_RC_LINKDOWN:
  93                        scp->result = (DID_REQUEUE << 16);
  94                        break;
  95                case SISL_FC_RC_RESID:
  96                        /* This indicates an FCP resid underrun */
  97                        if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
  98                                /* If the SISL_RC_FLAGS_OVERRUN flag was set,
  99                                 * then we will handle this error else where.
 100                                 * If not then we must handle it here.
 101                                 * This is probably an AFU bug.
 102                                 */
 103                                scp->result = (DID_ERROR << 16);
 104                        }
 105                        break;
 106                case SISL_FC_RC_RESIDERR:
 107                        /* Resid mismatch between adapter and device */
 108                case SISL_FC_RC_TGTABORT:
 109                case SISL_FC_RC_ABORTOK:
 110                case SISL_FC_RC_ABORTFAIL:
 111                case SISL_FC_RC_NOLOGI:
 112                case SISL_FC_RC_ABORTPEND:
 113                case SISL_FC_RC_WRABORTPEND:
 114                case SISL_FC_RC_NOEXP:
 115                case SISL_FC_RC_INUSE:
 116                        scp->result = (DID_ERROR << 16);
 117                        break;
 118                }
 119        }
 120
 121        if (ioasa->rc.afu_rc) {
 122                /* We have an AFU error */
 123                switch (ioasa->rc.afu_rc) {
 124                case SISL_AFU_RC_NO_CHANNELS:
 125                        scp->result = (DID_NO_CONNECT << 16);
 126                        break;
 127                case SISL_AFU_RC_DATA_DMA_ERR:
 128                        switch (ioasa->afu_extra) {
 129                        case SISL_AFU_DMA_ERR_PAGE_IN:
 130                                /* Retry */
 131                                scp->result = (DID_IMM_RETRY << 16);
 132                                break;
 133                        case SISL_AFU_DMA_ERR_INVALID_EA:
 134                        default:
 135                                scp->result = (DID_ERROR << 16);
 136                        }
 137                        break;
 138                case SISL_AFU_RC_OUT_OF_DATA_BUFS:
 139                        /* Retry */
 140                        scp->result = (DID_ALLOC_FAILURE << 16);
 141                        break;
 142                default:
 143                        scp->result = (DID_ERROR << 16);
 144                }
 145        }
 146}
 147
 148/**
 149 * cmd_complete() - command completion handler
 150 * @cmd:        AFU command that has completed.
 151 *
 152 * For SCSI commands this routine prepares and submits commands that have
 153 * either completed or timed out to the SCSI stack. For internal commands
 154 * (TMF or AFU), this routine simply notifies the originator that the
 155 * command has completed.
 156 */
 157static void cmd_complete(struct afu_cmd *cmd)
 158{
 159        struct scsi_cmnd *scp;
 160        ulong lock_flags;
 161        struct afu *afu = cmd->parent;
 162        struct cxlflash_cfg *cfg = afu->parent;
 163        struct device *dev = &cfg->dev->dev;
 164        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 165
 166        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 167        list_del(&cmd->list);
 168        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 169
 170        if (cmd->scp) {
 171                scp = cmd->scp;
 172                if (unlikely(cmd->sa.ioasc))
 173                        process_cmd_err(cmd, scp);
 174                else
 175                        scp->result = (DID_OK << 16);
 176
 177                dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
 178                                    __func__, scp, scp->result, cmd->sa.ioasc);
 179                scp->scsi_done(scp);
 180        } else if (cmd->cmd_tmf) {
 181                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 182                cfg->tmf_active = false;
 183                wake_up_all_locked(&cfg->tmf_waitq);
 184                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 185        } else
 186                complete(&cmd->cevent);
 187}
 188
 189/**
 190 * flush_pending_cmds() - flush all pending commands on this hardware queue
 191 * @hwq:        Hardware queue to flush.
 192 *
 193 * The hardware send queue lock associated with this hardware queue must be
 194 * held when calling this routine.
 195 */
 196static void flush_pending_cmds(struct hwq *hwq)
 197{
 198        struct cxlflash_cfg *cfg = hwq->afu->parent;
 199        struct afu_cmd *cmd, *tmp;
 200        struct scsi_cmnd *scp;
 201        ulong lock_flags;
 202
 203        list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
 204                /* Bypass command when on a doneq, cmd_complete() will handle */
 205                if (!list_empty(&cmd->queue))
 206                        continue;
 207
 208                list_del(&cmd->list);
 209
 210                if (cmd->scp) {
 211                        scp = cmd->scp;
 212                        scp->result = (DID_IMM_RETRY << 16);
 213                        scp->scsi_done(scp);
 214                } else {
 215                        cmd->cmd_aborted = true;
 216
 217                        if (cmd->cmd_tmf) {
 218                                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 219                                cfg->tmf_active = false;
 220                                wake_up_all_locked(&cfg->tmf_waitq);
 221                                spin_unlock_irqrestore(&cfg->tmf_slock,
 222                                                       lock_flags);
 223                        } else
 224                                complete(&cmd->cevent);
 225                }
 226        }
 227}
 228
 229/**
 230 * context_reset() - reset context via specified register
 231 * @hwq:        Hardware queue owning the context to be reset.
 232 * @reset_reg:  MMIO register to perform reset.
 233 *
 234 * When the reset is successful, the SISLite specification guarantees that
 235 * the AFU has aborted all currently pending I/O. Accordingly, these commands
 236 * must be flushed.
 237 *
 238 * Return: 0 on success, -errno on failure
 239 */
 240static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
 241{
 242        struct cxlflash_cfg *cfg = hwq->afu->parent;
 243        struct device *dev = &cfg->dev->dev;
 244        int rc = -ETIMEDOUT;
 245        int nretry = 0;
 246        u64 val = 0x1;
 247        ulong lock_flags;
 248
 249        dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
 250
 251        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 252
 253        writeq_be(val, reset_reg);
 254        do {
 255                val = readq_be(reset_reg);
 256                if ((val & 0x1) == 0x0) {
 257                        rc = 0;
 258                        break;
 259                }
 260
 261                /* Double delay each time */
 262                udelay(1 << nretry);
 263        } while (nretry++ < MC_ROOM_RETRY_CNT);
 264
 265        if (!rc)
 266                flush_pending_cmds(hwq);
 267
 268        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 269
 270        dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
 271                __func__, rc, val, nretry);
 272        return rc;
 273}
 274
 275/**
 276 * context_reset_ioarrin() - reset context via IOARRIN register
 277 * @hwq:        Hardware queue owning the context to be reset.
 278 *
 279 * Return: 0 on success, -errno on failure
 280 */
 281static int context_reset_ioarrin(struct hwq *hwq)
 282{
 283        return context_reset(hwq, &hwq->host_map->ioarrin);
 284}
 285
 286/**
 287 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
 288 * @hwq:        Hardware queue owning the context to be reset.
 289 *
 290 * Return: 0 on success, -errno on failure
 291 */
 292static int context_reset_sq(struct hwq *hwq)
 293{
 294        return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
 295}
 296
 297/**
 298 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
 299 * @afu:        AFU associated with the host.
 300 * @cmd:        AFU command to send.
 301 *
 302 * Return:
 303 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 304 */
 305static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
 306{
 307        struct cxlflash_cfg *cfg = afu->parent;
 308        struct device *dev = &cfg->dev->dev;
 309        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 310        int rc = 0;
 311        s64 room;
 312        ulong lock_flags;
 313
 314        /*
 315         * To avoid the performance penalty of MMIO, spread the update of
 316         * 'room' over multiple commands.
 317         */
 318        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 319        if (--hwq->room < 0) {
 320                room = readq_be(&hwq->host_map->cmd_room);
 321                if (room <= 0) {
 322                        dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
 323                                            "0x%02X, room=0x%016llX\n",
 324                                            __func__, cmd->rcb.cdb[0], room);
 325                        hwq->room = 0;
 326                        rc = SCSI_MLQUEUE_HOST_BUSY;
 327                        goto out;
 328                }
 329                hwq->room = room - 1;
 330        }
 331
 332        list_add(&cmd->list, &hwq->pending_cmds);
 333        writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
 334out:
 335        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 336        dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
 337                __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
 338        return rc;
 339}
 340
 341/**
 342 * send_cmd_sq() - sends an AFU command via SQ ring
 343 * @afu:        AFU associated with the host.
 344 * @cmd:        AFU command to send.
 345 *
 346 * Return:
 347 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 348 */
 349static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
 350{
 351        struct cxlflash_cfg *cfg = afu->parent;
 352        struct device *dev = &cfg->dev->dev;
 353        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 354        int rc = 0;
 355        int newval;
 356        ulong lock_flags;
 357
 358        newval = atomic_dec_if_positive(&hwq->hsq_credits);
 359        if (newval <= 0) {
 360                rc = SCSI_MLQUEUE_HOST_BUSY;
 361                goto out;
 362        }
 363
 364        cmd->rcb.ioasa = &cmd->sa;
 365
 366        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 367
 368        *hwq->hsq_curr = cmd->rcb;
 369        if (hwq->hsq_curr < hwq->hsq_end)
 370                hwq->hsq_curr++;
 371        else
 372                hwq->hsq_curr = hwq->hsq_start;
 373
 374        list_add(&cmd->list, &hwq->pending_cmds);
 375        writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
 376
 377        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 378out:
 379        dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
 380               "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
 381               cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
 382               readq_be(&hwq->host_map->sq_head),
 383               readq_be(&hwq->host_map->sq_tail));
 384        return rc;
 385}
 386
 387/**
 388 * wait_resp() - polls for a response or timeout to a sent AFU command
 389 * @afu:        AFU associated with the host.
 390 * @cmd:        AFU command that was sent.
 391 *
 392 * Return: 0 on success, -errno on failure
 393 */
 394static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 395{
 396        struct cxlflash_cfg *cfg = afu->parent;
 397        struct device *dev = &cfg->dev->dev;
 398        int rc = 0;
 399        ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
 400
 401        timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
 402        if (!timeout)
 403                rc = -ETIMEDOUT;
 404
 405        if (cmd->cmd_aborted)
 406                rc = -EAGAIN;
 407
 408        if (unlikely(cmd->sa.ioasc != 0)) {
 409                dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
 410                        __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
 411                rc = -EIO;
 412        }
 413
 414        return rc;
 415}
 416
 417/**
 418 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
 419 * @host:       SCSI host associated with device.
 420 * @scp:        SCSI command to send.
 421 * @afu:        SCSI command to send.
 422 *
 423 * Hashes a command based upon the hardware queue mode.
 424 *
 425 * Return: Trusted index of target hardware queue
 426 */
 427static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
 428                             struct afu *afu)
 429{
 430        u32 tag;
 431        u32 hwq = 0;
 432
 433        if (afu->num_hwqs == 1)
 434                return 0;
 435
 436        switch (afu->hwq_mode) {
 437        case HWQ_MODE_RR:
 438                hwq = afu->hwq_rr_count++ % afu->num_hwqs;
 439                break;
 440        case HWQ_MODE_TAG:
 441                tag = blk_mq_unique_tag(scp->request);
 442                hwq = blk_mq_unique_tag_to_hwq(tag);
 443                break;
 444        case HWQ_MODE_CPU:
 445                hwq = smp_processor_id() % afu->num_hwqs;
 446                break;
 447        default:
 448                WARN_ON_ONCE(1);
 449        }
 450
 451        return hwq;
 452}
 453
 454/**
 455 * send_tmf() - sends a Task Management Function (TMF)
 456 * @cfg:        Internal structure associated with the host.
 457 * @sdev:       SCSI device destined for TMF.
 458 * @tmfcmd:     TMF command to send.
 459 *
 460 * Return:
 461 *      0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
 462 */
 463static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 464                    u64 tmfcmd)
 465{
 466        struct afu *afu = cfg->afu;
 467        struct afu_cmd *cmd = NULL;
 468        struct device *dev = &cfg->dev->dev;
 469        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
 470        bool needs_deletion = false;
 471        char *buf = NULL;
 472        ulong lock_flags;
 473        int rc = 0;
 474        ulong to;
 475
 476        buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
 477        if (unlikely(!buf)) {
 478                dev_err(dev, "%s: no memory for command\n", __func__);
 479                rc = -ENOMEM;
 480                goto out;
 481        }
 482
 483        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
 484        INIT_LIST_HEAD(&cmd->queue);
 485
 486        /* When Task Management Function is active do not send another */
 487        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 488        if (cfg->tmf_active)
 489                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 490                                                  !cfg->tmf_active,
 491                                                  cfg->tmf_slock);
 492        cfg->tmf_active = true;
 493        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 494
 495        cmd->parent = afu;
 496        cmd->cmd_tmf = true;
 497        cmd->hwq_index = hwq->index;
 498
 499        cmd->rcb.ctx_id = hwq->ctx_hndl;
 500        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 501        cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
 502        cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
 503        cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
 504                              SISL_REQ_FLAGS_SUP_UNDERRUN |
 505                              SISL_REQ_FLAGS_TMF_CMD);
 506        memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 507
 508        rc = afu->send_cmd(afu, cmd);
 509        if (unlikely(rc)) {
 510                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 511                cfg->tmf_active = false;
 512                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 513                goto out;
 514        }
 515
 516        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 517        to = msecs_to_jiffies(5000);
 518        to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
 519                                                       !cfg->tmf_active,
 520                                                       cfg->tmf_slock,
 521                                                       to);
 522        if (!to) {
 523                dev_err(dev, "%s: TMF timed out\n", __func__);
 524                rc = -ETIMEDOUT;
 525                needs_deletion = true;
 526        } else if (cmd->cmd_aborted) {
 527                dev_err(dev, "%s: TMF aborted\n", __func__);
 528                rc = -EAGAIN;
 529        } else if (cmd->sa.ioasc) {
 530                dev_err(dev, "%s: TMF failed ioasc=%08x\n",
 531                        __func__, cmd->sa.ioasc);
 532                rc = -EIO;
 533        }
 534        cfg->tmf_active = false;
 535        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 536
 537        if (needs_deletion) {
 538                spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 539                list_del(&cmd->list);
 540                spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 541        }
 542out:
 543        kfree(buf);
 544        return rc;
 545}
 546
 547/**
 548 * cxlflash_driver_info() - information handler for this host driver
 549 * @host:       SCSI host associated with device.
 550 *
 551 * Return: A string describing the device.
 552 */
 553static const char *cxlflash_driver_info(struct Scsi_Host *host)
 554{
 555        return CXLFLASH_ADAPTER_NAME;
 556}
 557
 558/**
 559 * cxlflash_queuecommand() - sends a mid-layer request
 560 * @host:       SCSI host associated with device.
 561 * @scp:        SCSI command to send.
 562 *
 563 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 564 */
 565static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 566{
 567        struct cxlflash_cfg *cfg = shost_priv(host);
 568        struct afu *afu = cfg->afu;
 569        struct device *dev = &cfg->dev->dev;
 570        struct afu_cmd *cmd = sc_to_afuci(scp);
 571        struct scatterlist *sg = scsi_sglist(scp);
 572        int hwq_index = cmd_to_target_hwq(host, scp, afu);
 573        struct hwq *hwq = get_hwq(afu, hwq_index);
 574        u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
 575        ulong lock_flags;
 576        int rc = 0;
 577
 578        dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
 579                            "cdb=(%08x-%08x-%08x-%08x)\n",
 580                            __func__, scp, host->host_no, scp->device->channel,
 581                            scp->device->id, scp->device->lun,
 582                            get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
 583                            get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
 584                            get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
 585                            get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 586
 587        /*
 588         * If a Task Management Function is active, wait for it to complete
 589         * before continuing with regular commands.
 590         */
 591        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 592        if (cfg->tmf_active) {
 593                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 594                rc = SCSI_MLQUEUE_HOST_BUSY;
 595                goto out;
 596        }
 597        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 598
 599        switch (cfg->state) {
 600        case STATE_PROBING:
 601        case STATE_PROBED:
 602        case STATE_RESET:
 603                dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
 604                rc = SCSI_MLQUEUE_HOST_BUSY;
 605                goto out;
 606        case STATE_FAILTERM:
 607                dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
 608                scp->result = (DID_NO_CONNECT << 16);
 609                scp->scsi_done(scp);
 610                rc = 0;
 611                goto out;
 612        default:
 613                atomic_inc(&afu->cmds_active);
 614                break;
 615        }
 616
 617        if (likely(sg)) {
 618                cmd->rcb.data_len = sg->length;
 619                cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
 620        }
 621
 622        cmd->scp = scp;
 623        cmd->parent = afu;
 624        cmd->hwq_index = hwq_index;
 625
 626        cmd->sa.ioasc = 0;
 627        cmd->rcb.ctx_id = hwq->ctx_hndl;
 628        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 629        cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
 630        cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
 631
 632        if (scp->sc_data_direction == DMA_TO_DEVICE)
 633                req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
 634
 635        cmd->rcb.req_flags = req_flags;
 636        memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 637
 638        rc = afu->send_cmd(afu, cmd);
 639        atomic_dec(&afu->cmds_active);
 640out:
 641        return rc;
 642}
 643
 644/**
 645 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
 646 * @cfg:        Internal structure associated with the host.
 647 */
 648static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
 649{
 650        struct pci_dev *pdev = cfg->dev;
 651
 652        if (pci_channel_offline(pdev))
 653                wait_event_timeout(cfg->reset_waitq,
 654                                   !pci_channel_offline(pdev),
 655                                   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
 656}
 657
 658/**
 659 * free_mem() - free memory associated with the AFU
 660 * @cfg:        Internal structure associated with the host.
 661 */
 662static void free_mem(struct cxlflash_cfg *cfg)
 663{
 664        struct afu *afu = cfg->afu;
 665
 666        if (cfg->afu) {
 667                free_pages((ulong)afu, get_order(sizeof(struct afu)));
 668                cfg->afu = NULL;
 669        }
 670}
 671
 672/**
 673 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
 674 * @cfg:        Internal structure associated with the host.
 675 */
 676static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
 677{
 678        if (cfg->async_reset_cookie == 0)
 679                return;
 680
 681        /* Wait until all async calls prior to this cookie have completed */
 682        async_synchronize_cookie(cfg->async_reset_cookie + 1);
 683        cfg->async_reset_cookie = 0;
 684}
 685
 686/**
 687 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
 688 * @cfg:        Internal structure associated with the host.
 689 *
 690 * Safe to call with AFU in a partially allocated/initialized state.
 691 *
 692 * Cancels scheduled worker threads, waits for any active internal AFU
 693 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
 694 */
 695static void stop_afu(struct cxlflash_cfg *cfg)
 696{
 697        struct afu *afu = cfg->afu;
 698        struct hwq *hwq;
 699        int i;
 700
 701        cancel_work_sync(&cfg->work_q);
 702        if (!current_is_async())
 703                cxlflash_reset_sync(cfg);
 704
 705        if (likely(afu)) {
 706                while (atomic_read(&afu->cmds_active))
 707                        ssleep(1);
 708
 709                if (afu_is_irqpoll_enabled(afu)) {
 710                        for (i = 0; i < afu->num_hwqs; i++) {
 711                                hwq = get_hwq(afu, i);
 712
 713                                irq_poll_disable(&hwq->irqpoll);
 714                        }
 715                }
 716
 717                if (likely(afu->afu_map)) {
 718                        cfg->ops->psa_unmap(afu->afu_map);
 719                        afu->afu_map = NULL;
 720                }
 721        }
 722}
 723
 724/**
 725 * term_intr() - disables all AFU interrupts
 726 * @cfg:        Internal structure associated with the host.
 727 * @level:      Depth of allocation, where to begin waterfall tear down.
 728 * @index:      Index of the hardware queue.
 729 *
 730 * Safe to call with AFU/MC in partially allocated/initialized state.
 731 */
 732static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
 733                      u32 index)
 734{
 735        struct afu *afu = cfg->afu;
 736        struct device *dev = &cfg->dev->dev;
 737        struct hwq *hwq;
 738
 739        if (!afu) {
 740                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 741                return;
 742        }
 743
 744        hwq = get_hwq(afu, index);
 745
 746        if (!hwq->ctx_cookie) {
 747                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 748                return;
 749        }
 750
 751        switch (level) {
 752        case UNMAP_THREE:
 753                /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
 754                if (index == PRIMARY_HWQ)
 755                        cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
 756        case UNMAP_TWO:
 757                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
 758        case UNMAP_ONE:
 759                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
 760        case FREE_IRQ:
 761                cfg->ops->free_afu_irqs(hwq->ctx_cookie);
 762                /* fall through */
 763        case UNDO_NOOP:
 764                /* No action required */
 765                break;
 766        }
 767}
 768
 769/**
 770 * term_mc() - terminates the master context
 771 * @cfg:        Internal structure associated with the host.
 772 * @index:      Index of the hardware queue.
 773 *
 774 * Safe to call with AFU/MC in partially allocated/initialized state.
 775 */
 776static void term_mc(struct cxlflash_cfg *cfg, u32 index)
 777{
 778        struct afu *afu = cfg->afu;
 779        struct device *dev = &cfg->dev->dev;
 780        struct hwq *hwq;
 781        ulong lock_flags;
 782
 783        if (!afu) {
 784                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 785                return;
 786        }
 787
 788        hwq = get_hwq(afu, index);
 789
 790        if (!hwq->ctx_cookie) {
 791                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 792                return;
 793        }
 794
 795        WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
 796        if (index != PRIMARY_HWQ)
 797                WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
 798        hwq->ctx_cookie = NULL;
 799
 800        spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
 801        hwq->hrrq_online = false;
 802        spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
 803
 804        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 805        flush_pending_cmds(hwq);
 806        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 807}
 808
 809/**
 810 * term_afu() - terminates the AFU
 811 * @cfg:        Internal structure associated with the host.
 812 *
 813 * Safe to call with AFU/MC in partially allocated/initialized state.
 814 */
 815static void term_afu(struct cxlflash_cfg *cfg)
 816{
 817        struct device *dev = &cfg->dev->dev;
 818        int k;
 819
 820        /*
 821         * Tear down is carefully orchestrated to ensure
 822         * no interrupts can come in when the problem state
 823         * area is unmapped.
 824         *
 825         * 1) Disable all AFU interrupts for each master
 826         * 2) Unmap the problem state area
 827         * 3) Stop each master context
 828         */
 829        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 830                term_intr(cfg, UNMAP_THREE, k);
 831
 832        stop_afu(cfg);
 833
 834        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 835                term_mc(cfg, k);
 836
 837        dev_dbg(dev, "%s: returning\n", __func__);
 838}
 839
 840/**
 841 * notify_shutdown() - notifies device of pending shutdown
 842 * @cfg:        Internal structure associated with the host.
 843 * @wait:       Whether to wait for shutdown processing to complete.
 844 *
 845 * This function will notify the AFU that the adapter is being shutdown
 846 * and will wait for shutdown processing to complete if wait is true.
 847 * This notification should flush pending I/Os to the device and halt
 848 * further I/Os until the next AFU reset is issued and device restarted.
 849 */
 850static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
 851{
 852        struct afu *afu = cfg->afu;
 853        struct device *dev = &cfg->dev->dev;
 854        struct dev_dependent_vals *ddv;
 855        __be64 __iomem *fc_port_regs;
 856        u64 reg, status;
 857        int i, retry_cnt = 0;
 858
 859        ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
 860        if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
 861                return;
 862
 863        if (!afu || !afu->afu_map) {
 864                dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
 865                return;
 866        }
 867
 868        /* Notify AFU */
 869        for (i = 0; i < cfg->num_fc_ports; i++) {
 870                fc_port_regs = get_fc_port_regs(cfg, i);
 871
 872                reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
 873                reg |= SISL_FC_SHUTDOWN_NORMAL;
 874                writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
 875        }
 876
 877        if (!wait)
 878                return;
 879
 880        /* Wait up to 1.5 seconds for shutdown processing to complete */
 881        for (i = 0; i < cfg->num_fc_ports; i++) {
 882                fc_port_regs = get_fc_port_regs(cfg, i);
 883                retry_cnt = 0;
 884
 885                while (true) {
 886                        status = readq_be(&fc_port_regs[FC_STATUS / 8]);
 887                        if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
 888                                break;
 889                        if (++retry_cnt >= MC_RETRY_CNT) {
 890                                dev_dbg(dev, "%s: port %d shutdown processing "
 891                                        "not yet completed\n", __func__, i);
 892                                break;
 893                        }
 894                        msleep(100 * retry_cnt);
 895                }
 896        }
 897}
 898
 899/**
 900 * cxlflash_get_minor() - gets the first available minor number
 901 *
 902 * Return: Unique minor number that can be used to create the character device.
 903 */
 904static int cxlflash_get_minor(void)
 905{
 906        int minor;
 907        long bit;
 908
 909        bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
 910        if (bit >= CXLFLASH_MAX_ADAPTERS)
 911                return -1;
 912
 913        minor = bit & MINORMASK;
 914        set_bit(minor, cxlflash_minor);
 915        return minor;
 916}
 917
 918/**
 919 * cxlflash_put_minor() - releases the minor number
 920 * @minor:      Minor number that is no longer needed.
 921 */
 922static void cxlflash_put_minor(int minor)
 923{
 924        clear_bit(minor, cxlflash_minor);
 925}
 926
 927/**
 928 * cxlflash_release_chrdev() - release the character device for the host
 929 * @cfg:        Internal structure associated with the host.
 930 */
 931static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
 932{
 933        device_unregister(cfg->chardev);
 934        cfg->chardev = NULL;
 935        cdev_del(&cfg->cdev);
 936        cxlflash_put_minor(MINOR(cfg->cdev.dev));
 937}
 938
 939/**
 940 * cxlflash_remove() - PCI entry point to tear down host
 941 * @pdev:       PCI device associated with the host.
 942 *
 943 * Safe to use as a cleanup in partially allocated/initialized state. Note that
 944 * the reset_waitq is flushed as part of the stop/termination of user contexts.
 945 */
 946static void cxlflash_remove(struct pci_dev *pdev)
 947{
 948        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
 949        struct device *dev = &pdev->dev;
 950        ulong lock_flags;
 951
 952        if (!pci_is_enabled(pdev)) {
 953                dev_dbg(dev, "%s: Device is disabled\n", __func__);
 954                return;
 955        }
 956
 957        /* Yield to running recovery threads before continuing with remove */
 958        wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
 959                                     cfg->state != STATE_PROBING);
 960        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 961        if (cfg->tmf_active)
 962                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 963                                                  !cfg->tmf_active,
 964                                                  cfg->tmf_slock);
 965        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 966
 967        /* Notify AFU and wait for shutdown processing to complete */
 968        notify_shutdown(cfg, true);
 969
 970        cfg->state = STATE_FAILTERM;
 971        cxlflash_stop_term_user_contexts(cfg);
 972
 973        switch (cfg->init_state) {
 974        case INIT_STATE_CDEV:
 975                cxlflash_release_chrdev(cfg);
 976        case INIT_STATE_SCSI:
 977                cxlflash_term_local_luns(cfg);
 978                scsi_remove_host(cfg->host);
 979        case INIT_STATE_AFU:
 980                term_afu(cfg);
 981        case INIT_STATE_PCI:
 982                cfg->ops->destroy_afu(cfg->afu_cookie);
 983                pci_disable_device(pdev);
 984        case INIT_STATE_NONE:
 985                free_mem(cfg);
 986                scsi_host_put(cfg->host);
 987                break;
 988        }
 989
 990        dev_dbg(dev, "%s: returning\n", __func__);
 991}
 992
 993/**
 994 * alloc_mem() - allocates the AFU and its command pool
 995 * @cfg:        Internal structure associated with the host.
 996 *
 997 * A partially allocated state remains on failure.
 998 *
 999 * Return:
1000 *      0 on success
1001 *      -ENOMEM on failure to allocate memory
1002 */
1003static int alloc_mem(struct cxlflash_cfg *cfg)
1004{
1005        int rc = 0;
1006        struct device *dev = &cfg->dev->dev;
1007
1008        /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1009        cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1010                                            get_order(sizeof(struct afu)));
1011        if (unlikely(!cfg->afu)) {
1012                dev_err(dev, "%s: cannot get %d free pages\n",
1013                        __func__, get_order(sizeof(struct afu)));
1014                rc = -ENOMEM;
1015                goto out;
1016        }
1017        cfg->afu->parent = cfg;
1018        cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1019        cfg->afu->afu_map = NULL;
1020out:
1021        return rc;
1022}
1023
1024/**
1025 * init_pci() - initializes the host as a PCI device
1026 * @cfg:        Internal structure associated with the host.
1027 *
1028 * Return: 0 on success, -errno on failure
1029 */
1030static int init_pci(struct cxlflash_cfg *cfg)
1031{
1032        struct pci_dev *pdev = cfg->dev;
1033        struct device *dev = &cfg->dev->dev;
1034        int rc = 0;
1035
1036        rc = pci_enable_device(pdev);
1037        if (rc || pci_channel_offline(pdev)) {
1038                if (pci_channel_offline(pdev)) {
1039                        cxlflash_wait_for_pci_err_recovery(cfg);
1040                        rc = pci_enable_device(pdev);
1041                }
1042
1043                if (rc) {
1044                        dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1045                        cxlflash_wait_for_pci_err_recovery(cfg);
1046                        goto out;
1047                }
1048        }
1049
1050out:
1051        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1052        return rc;
1053}
1054
1055/**
1056 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1057 * @cfg:        Internal structure associated with the host.
1058 *
1059 * Return: 0 on success, -errno on failure
1060 */
1061static int init_scsi(struct cxlflash_cfg *cfg)
1062{
1063        struct pci_dev *pdev = cfg->dev;
1064        struct device *dev = &cfg->dev->dev;
1065        int rc = 0;
1066
1067        rc = scsi_add_host(cfg->host, &pdev->dev);
1068        if (rc) {
1069                dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1070                goto out;
1071        }
1072
1073        scsi_scan_host(cfg->host);
1074
1075out:
1076        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1077        return rc;
1078}
1079
1080/**
1081 * set_port_online() - transitions the specified host FC port to online state
1082 * @fc_regs:    Top of MMIO region defined for specified port.
1083 *
1084 * The provided MMIO region must be mapped prior to call. Online state means
1085 * that the FC link layer has synced, completed the handshaking process, and
1086 * is ready for login to start.
1087 */
1088static void set_port_online(__be64 __iomem *fc_regs)
1089{
1090        u64 cmdcfg;
1091
1092        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1093        cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1094        cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);   /* set ON_LINE */
1095        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1096}
1097
1098/**
1099 * set_port_offline() - transitions the specified host FC port to offline state
1100 * @fc_regs:    Top of MMIO region defined for specified port.
1101 *
1102 * The provided MMIO region must be mapped prior to call.
1103 */
1104static void set_port_offline(__be64 __iomem *fc_regs)
1105{
1106        u64 cmdcfg;
1107
1108        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1109        cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);  /* clear ON_LINE */
1110        cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);  /* set OFF_LINE */
1111        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1112}
1113
1114/**
1115 * wait_port_online() - waits for the specified host FC port come online
1116 * @fc_regs:    Top of MMIO region defined for specified port.
1117 * @delay_us:   Number of microseconds to delay between reading port status.
1118 * @nretry:     Number of cycles to retry reading port status.
1119 *
1120 * The provided MMIO region must be mapped prior to call. This will timeout
1121 * when the cable is not plugged in.
1122 *
1123 * Return:
1124 *      TRUE (1) when the specified port is online
1125 *      FALSE (0) when the specified port fails to come online after timeout
1126 */
1127static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1128{
1129        u64 status;
1130
1131        WARN_ON(delay_us < 1000);
1132
1133        do {
1134                msleep(delay_us / 1000);
1135                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1136                if (status == U64_MAX)
1137                        nretry /= 2;
1138        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1139                 nretry--);
1140
1141        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1142}
1143
1144/**
1145 * wait_port_offline() - waits for the specified host FC port go offline
1146 * @fc_regs:    Top of MMIO region defined for specified port.
1147 * @delay_us:   Number of microseconds to delay between reading port status.
1148 * @nretry:     Number of cycles to retry reading port status.
1149 *
1150 * The provided MMIO region must be mapped prior to call.
1151 *
1152 * Return:
1153 *      TRUE (1) when the specified port is offline
1154 *      FALSE (0) when the specified port fails to go offline after timeout
1155 */
1156static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1157{
1158        u64 status;
1159
1160        WARN_ON(delay_us < 1000);
1161
1162        do {
1163                msleep(delay_us / 1000);
1164                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1165                if (status == U64_MAX)
1166                        nretry /= 2;
1167        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1168                 nretry--);
1169
1170        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1171}
1172
1173/**
1174 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1175 * @afu:        AFU associated with the host that owns the specified FC port.
1176 * @port:       Port number being configured.
1177 * @fc_regs:    Top of MMIO region defined for specified port.
1178 * @wwpn:       The world-wide-port-number previously discovered for port.
1179 *
1180 * The provided MMIO region must be mapped prior to call. As part of the
1181 * sequence to configure the WWPN, the port is toggled offline and then back
1182 * online. This toggling action can cause this routine to delay up to a few
1183 * seconds. When configured to use the internal LUN feature of the AFU, a
1184 * failure to come online is overridden.
1185 */
1186static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1187                         u64 wwpn)
1188{
1189        struct cxlflash_cfg *cfg = afu->parent;
1190        struct device *dev = &cfg->dev->dev;
1191
1192        set_port_offline(fc_regs);
1193        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1194                               FC_PORT_STATUS_RETRY_CNT)) {
1195                dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1196                        __func__, port);
1197        }
1198
1199        writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1200
1201        set_port_online(fc_regs);
1202        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1203                              FC_PORT_STATUS_RETRY_CNT)) {
1204                dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1205                        __func__, port);
1206        }
1207}
1208
1209/**
1210 * afu_link_reset() - resets the specified host FC port
1211 * @afu:        AFU associated with the host that owns the specified FC port.
1212 * @port:       Port number being configured.
1213 * @fc_regs:    Top of MMIO region defined for specified port.
1214 *
1215 * The provided MMIO region must be mapped prior to call. The sequence to
1216 * reset the port involves toggling it offline and then back online. This
1217 * action can cause this routine to delay up to a few seconds. An effort
1218 * is made to maintain link with the device by switching to host to use
1219 * the alternate port exclusively while the reset takes place.
1220 * failure to come online is overridden.
1221 */
1222static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1223{
1224        struct cxlflash_cfg *cfg = afu->parent;
1225        struct device *dev = &cfg->dev->dev;
1226        u64 port_sel;
1227
1228        /* first switch the AFU to the other links, if any */
1229        port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1230        port_sel &= ~(1ULL << port);
1231        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1232        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1233
1234        set_port_offline(fc_regs);
1235        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1236                               FC_PORT_STATUS_RETRY_CNT))
1237                dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1238                        __func__, port);
1239
1240        set_port_online(fc_regs);
1241        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1242                              FC_PORT_STATUS_RETRY_CNT))
1243                dev_err(dev, "%s: wait on port %d to go online timed out\n",
1244                        __func__, port);
1245
1246        /* switch back to include this port */
1247        port_sel |= (1ULL << port);
1248        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1249        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1250
1251        dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1252}
1253
1254/**
1255 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1256 * @afu:        AFU associated with the host.
1257 */
1258static void afu_err_intr_init(struct afu *afu)
1259{
1260        struct cxlflash_cfg *cfg = afu->parent;
1261        __be64 __iomem *fc_port_regs;
1262        int i;
1263        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1264        u64 reg;
1265
1266        /* global async interrupts: AFU clears afu_ctrl on context exit
1267         * if async interrupts were sent to that context. This prevents
1268         * the AFU form sending further async interrupts when
1269         * there is
1270         * nobody to receive them.
1271         */
1272
1273        /* mask all */
1274        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1275        /* set LISN# to send and point to primary master context */
1276        reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1277
1278        if (afu->internal_lun)
1279                reg |= 1;       /* Bit 63 indicates local lun */
1280        writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1281        /* clear all */
1282        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1283        /* unmask bits that are of interest */
1284        /* note: afu can send an interrupt after this step */
1285        writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1286        /* clear again in case a bit came on after previous clear but before */
1287        /* unmask */
1288        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1289
1290        /* Clear/Set internal lun bits */
1291        fc_port_regs = get_fc_port_regs(cfg, 0);
1292        reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1293        reg &= SISL_FC_INTERNAL_MASK;
1294        if (afu->internal_lun)
1295                reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1296        writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1297
1298        /* now clear FC errors */
1299        for (i = 0; i < cfg->num_fc_ports; i++) {
1300                fc_port_regs = get_fc_port_regs(cfg, i);
1301
1302                writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1303                writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1304        }
1305
1306        /* sync interrupts for master's IOARRIN write */
1307        /* note that unlike asyncs, there can be no pending sync interrupts */
1308        /* at this time (this is a fresh context and master has not written */
1309        /* IOARRIN yet), so there is nothing to clear. */
1310
1311        /* set LISN#, it is always sent to the context that wrote IOARRIN */
1312        for (i = 0; i < afu->num_hwqs; i++) {
1313                hwq = get_hwq(afu, i);
1314
1315                reg = readq_be(&hwq->host_map->ctx_ctrl);
1316                WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1317                reg |= SISL_MSI_SYNC_ERROR;
1318                writeq_be(reg, &hwq->host_map->ctx_ctrl);
1319                writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1320        }
1321}
1322
1323/**
1324 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1325 * @irq:        Interrupt number.
1326 * @data:       Private data provided at interrupt registration, the AFU.
1327 *
1328 * Return: Always return IRQ_HANDLED.
1329 */
1330static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1331{
1332        struct hwq *hwq = (struct hwq *)data;
1333        struct cxlflash_cfg *cfg = hwq->afu->parent;
1334        struct device *dev = &cfg->dev->dev;
1335        u64 reg;
1336        u64 reg_unmasked;
1337
1338        reg = readq_be(&hwq->host_map->intr_status);
1339        reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1340
1341        if (reg_unmasked == 0UL) {
1342                dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1343                        __func__, reg);
1344                goto cxlflash_sync_err_irq_exit;
1345        }
1346
1347        dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1348                __func__, reg);
1349
1350        writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1351
1352cxlflash_sync_err_irq_exit:
1353        return IRQ_HANDLED;
1354}
1355
1356/**
1357 * process_hrrq() - process the read-response queue
1358 * @afu:        AFU associated with the host.
1359 * @doneq:      Queue of commands harvested from the RRQ.
1360 * @budget:     Threshold of RRQ entries to process.
1361 *
1362 * This routine must be called holding the disabled RRQ spin lock.
1363 *
1364 * Return: The number of entries processed.
1365 */
1366static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1367{
1368        struct afu *afu = hwq->afu;
1369        struct afu_cmd *cmd;
1370        struct sisl_ioasa *ioasa;
1371        struct sisl_ioarcb *ioarcb;
1372        bool toggle = hwq->toggle;
1373        int num_hrrq = 0;
1374        u64 entry,
1375            *hrrq_start = hwq->hrrq_start,
1376            *hrrq_end = hwq->hrrq_end,
1377            *hrrq_curr = hwq->hrrq_curr;
1378
1379        /* Process ready RRQ entries up to the specified budget (if any) */
1380        while (true) {
1381                entry = *hrrq_curr;
1382
1383                if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1384                        break;
1385
1386                entry &= ~SISL_RESP_HANDLE_T_BIT;
1387
1388                if (afu_is_sq_cmd_mode(afu)) {
1389                        ioasa = (struct sisl_ioasa *)entry;
1390                        cmd = container_of(ioasa, struct afu_cmd, sa);
1391                } else {
1392                        ioarcb = (struct sisl_ioarcb *)entry;
1393                        cmd = container_of(ioarcb, struct afu_cmd, rcb);
1394                }
1395
1396                list_add_tail(&cmd->queue, doneq);
1397
1398                /* Advance to next entry or wrap and flip the toggle bit */
1399                if (hrrq_curr < hrrq_end)
1400                        hrrq_curr++;
1401                else {
1402                        hrrq_curr = hrrq_start;
1403                        toggle ^= SISL_RESP_HANDLE_T_BIT;
1404                }
1405
1406                atomic_inc(&hwq->hsq_credits);
1407                num_hrrq++;
1408
1409                if (budget > 0 && num_hrrq >= budget)
1410                        break;
1411        }
1412
1413        hwq->hrrq_curr = hrrq_curr;
1414        hwq->toggle = toggle;
1415
1416        return num_hrrq;
1417}
1418
1419/**
1420 * process_cmd_doneq() - process a queue of harvested RRQ commands
1421 * @doneq:      Queue of completed commands.
1422 *
1423 * Note that upon return the queue can no longer be trusted.
1424 */
1425static void process_cmd_doneq(struct list_head *doneq)
1426{
1427        struct afu_cmd *cmd, *tmp;
1428
1429        WARN_ON(list_empty(doneq));
1430
1431        list_for_each_entry_safe(cmd, tmp, doneq, queue)
1432                cmd_complete(cmd);
1433}
1434
1435/**
1436 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1437 * @irqpoll:    IRQ poll structure associated with queue to poll.
1438 * @budget:     Threshold of RRQ entries to process per poll.
1439 *
1440 * Return: The number of entries processed.
1441 */
1442static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1443{
1444        struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1445        unsigned long hrrq_flags;
1446        LIST_HEAD(doneq);
1447        int num_entries = 0;
1448
1449        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1450
1451        num_entries = process_hrrq(hwq, &doneq, budget);
1452        if (num_entries < budget)
1453                irq_poll_complete(irqpoll);
1454
1455        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1456
1457        process_cmd_doneq(&doneq);
1458        return num_entries;
1459}
1460
1461/**
1462 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1463 * @irq:        Interrupt number.
1464 * @data:       Private data provided at interrupt registration, the AFU.
1465 *
1466 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1467 */
1468static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1469{
1470        struct hwq *hwq = (struct hwq *)data;
1471        struct afu *afu = hwq->afu;
1472        unsigned long hrrq_flags;
1473        LIST_HEAD(doneq);
1474        int num_entries = 0;
1475
1476        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1477
1478        /* Silently drop spurious interrupts when queue is not online */
1479        if (!hwq->hrrq_online) {
1480                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1481                return IRQ_HANDLED;
1482        }
1483
1484        if (afu_is_irqpoll_enabled(afu)) {
1485                irq_poll_sched(&hwq->irqpoll);
1486                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1487                return IRQ_HANDLED;
1488        }
1489
1490        num_entries = process_hrrq(hwq, &doneq, -1);
1491        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1492
1493        if (num_entries == 0)
1494                return IRQ_NONE;
1495
1496        process_cmd_doneq(&doneq);
1497        return IRQ_HANDLED;
1498}
1499
1500/*
1501 * Asynchronous interrupt information table
1502 *
1503 * NOTE:
1504 *      - Order matters here as this array is indexed by bit position.
1505 *
1506 *      - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1507 *        as complex and complains due to a lack of parentheses/braces.
1508 */
1509#define ASTATUS_FC(_a, _b, _c, _d)                                       \
1510        { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1511
1512#define BUILD_SISL_ASTATUS_FC_PORT(_a)                                   \
1513        ASTATUS_FC(_a, LINK_UP, "link up", 0),                           \
1514        ASTATUS_FC(_a, LINK_DN, "link down", 0),                         \
1515        ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),            \
1516        ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),            \
1517        ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1518        ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),     \
1519        ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),                \
1520        ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1521
1522static const struct asyc_intr_info ainfo[] = {
1523        BUILD_SISL_ASTATUS_FC_PORT(1),
1524        BUILD_SISL_ASTATUS_FC_PORT(0),
1525        BUILD_SISL_ASTATUS_FC_PORT(3),
1526        BUILD_SISL_ASTATUS_FC_PORT(2)
1527};
1528
1529/**
1530 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1531 * @irq:        Interrupt number.
1532 * @data:       Private data provided at interrupt registration, the AFU.
1533 *
1534 * Return: Always return IRQ_HANDLED.
1535 */
1536static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1537{
1538        struct hwq *hwq = (struct hwq *)data;
1539        struct afu *afu = hwq->afu;
1540        struct cxlflash_cfg *cfg = afu->parent;
1541        struct device *dev = &cfg->dev->dev;
1542        const struct asyc_intr_info *info;
1543        struct sisl_global_map __iomem *global = &afu->afu_map->global;
1544        __be64 __iomem *fc_port_regs;
1545        u64 reg_unmasked;
1546        u64 reg;
1547        u64 bit;
1548        u8 port;
1549
1550        reg = readq_be(&global->regs.aintr_status);
1551        reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1552
1553        if (unlikely(reg_unmasked == 0)) {
1554                dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1555                        __func__, reg);
1556                goto out;
1557        }
1558
1559        /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1560        writeq_be(reg_unmasked, &global->regs.aintr_clear);
1561
1562        /* Check each bit that is on */
1563        for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1564                if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1565                        WARN_ON_ONCE(1);
1566                        continue;
1567                }
1568
1569                info = &ainfo[bit];
1570                if (unlikely(info->status != 1ULL << bit)) {
1571                        WARN_ON_ONCE(1);
1572                        continue;
1573                }
1574
1575                port = info->port;
1576                fc_port_regs = get_fc_port_regs(cfg, port);
1577
1578                dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1579                        __func__, port, info->desc,
1580                       readq_be(&fc_port_regs[FC_STATUS / 8]));
1581
1582                /*
1583                 * Do link reset first, some OTHER errors will set FC_ERROR
1584                 * again if cleared before or w/o a reset
1585                 */
1586                if (info->action & LINK_RESET) {
1587                        dev_err(dev, "%s: FC Port %d: resetting link\n",
1588                                __func__, port);
1589                        cfg->lr_state = LINK_RESET_REQUIRED;
1590                        cfg->lr_port = port;
1591                        schedule_work(&cfg->work_q);
1592                }
1593
1594                if (info->action & CLR_FC_ERROR) {
1595                        reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1596
1597                        /*
1598                         * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1599                         * should be the same and tracing one is sufficient.
1600                         */
1601
1602                        dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1603                                __func__, port, reg);
1604
1605                        writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1606                        writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1607                }
1608
1609                if (info->action & SCAN_HOST) {
1610                        atomic_inc(&cfg->scan_host_needed);
1611                        schedule_work(&cfg->work_q);
1612                }
1613        }
1614
1615out:
1616        return IRQ_HANDLED;
1617}
1618
1619/**
1620 * read_vpd() - obtains the WWPNs from VPD
1621 * @cfg:        Internal structure associated with the host.
1622 * @wwpn:       Array of size MAX_FC_PORTS to pass back WWPNs
1623 *
1624 * Return: 0 on success, -errno on failure
1625 */
1626static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1627{
1628        struct device *dev = &cfg->dev->dev;
1629        struct pci_dev *pdev = cfg->dev;
1630        int rc = 0;
1631        int ro_start, ro_size, i, j, k;
1632        ssize_t vpd_size;
1633        char vpd_data[CXLFLASH_VPD_LEN];
1634        char tmp_buf[WWPN_BUF_LEN] = { 0 };
1635        const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1636                                                cfg->dev_id->driver_data;
1637        const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1638        const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1639
1640        /* Get the VPD data from the device */
1641        vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1642        if (unlikely(vpd_size <= 0)) {
1643                dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1644                        __func__, vpd_size);
1645                rc = -ENODEV;
1646                goto out;
1647        }
1648
1649        /* Get the read only section offset */
1650        ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1651                                    PCI_VPD_LRDT_RO_DATA);
1652        if (unlikely(ro_start < 0)) {
1653                dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1654                rc = -ENODEV;
1655                goto out;
1656        }
1657
1658        /* Get the read only section size, cap when extends beyond read VPD */
1659        ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1660        j = ro_size;
1661        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1662        if (unlikely((i + j) > vpd_size)) {
1663                dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1664                        __func__, (i + j), vpd_size);
1665                ro_size = vpd_size - i;
1666        }
1667
1668        /*
1669         * Find the offset of the WWPN tag within the read only
1670         * VPD data and validate the found field (partials are
1671         * no good to us). Convert the ASCII data to an integer
1672         * value. Note that we must copy to a temporary buffer
1673         * because the conversion service requires that the ASCII
1674         * string be terminated.
1675         *
1676         * Allow for WWPN not being found for all devices, setting
1677         * the returned WWPN to zero when not found. Notify with a
1678         * log error for cards that should have had WWPN keywords
1679         * in the VPD - cards requiring WWPN will not have their
1680         * ports programmed and operate in an undefined state.
1681         */
1682        for (k = 0; k < cfg->num_fc_ports; k++) {
1683                j = ro_size;
1684                i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1685
1686                i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1687                if (i < 0) {
1688                        if (wwpn_vpd_required)
1689                                dev_err(dev, "%s: Port %d WWPN not found\n",
1690                                        __func__, k);
1691                        wwpn[k] = 0ULL;
1692                        continue;
1693                }
1694
1695                j = pci_vpd_info_field_size(&vpd_data[i]);
1696                i += PCI_VPD_INFO_FLD_HDR_SIZE;
1697                if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1698                        dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1699                                __func__, k);
1700                        rc = -ENODEV;
1701                        goto out;
1702                }
1703
1704                memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1705                rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1706                if (unlikely(rc)) {
1707                        dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1708                                __func__, k);
1709                        rc = -ENODEV;
1710                        goto out;
1711                }
1712
1713                dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1714        }
1715
1716out:
1717        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1718        return rc;
1719}
1720
1721/**
1722 * init_pcr() - initialize the provisioning and control registers
1723 * @cfg:        Internal structure associated with the host.
1724 *
1725 * Also sets up fast access to the mapped registers and initializes AFU
1726 * command fields that never change.
1727 */
1728static void init_pcr(struct cxlflash_cfg *cfg)
1729{
1730        struct afu *afu = cfg->afu;
1731        struct sisl_ctrl_map __iomem *ctrl_map;
1732        struct hwq *hwq;
1733        void *cookie;
1734        int i;
1735
1736        for (i = 0; i < MAX_CONTEXT; i++) {
1737                ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1738                /* Disrupt any clients that could be running */
1739                /* e.g. clients that survived a master restart */
1740                writeq_be(0, &ctrl_map->rht_start);
1741                writeq_be(0, &ctrl_map->rht_cnt_id);
1742                writeq_be(0, &ctrl_map->ctx_cap);
1743        }
1744
1745        /* Copy frequently used fields into hwq */
1746        for (i = 0; i < afu->num_hwqs; i++) {
1747                hwq = get_hwq(afu, i);
1748                cookie = hwq->ctx_cookie;
1749
1750                hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1751                hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1752                hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1753
1754                /* Program the Endian Control for the master context */
1755                writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1756        }
1757}
1758
1759/**
1760 * init_global() - initialize AFU global registers
1761 * @cfg:        Internal structure associated with the host.
1762 */
1763static int init_global(struct cxlflash_cfg *cfg)
1764{
1765        struct afu *afu = cfg->afu;
1766        struct device *dev = &cfg->dev->dev;
1767        struct hwq *hwq;
1768        struct sisl_host_map __iomem *hmap;
1769        __be64 __iomem *fc_port_regs;
1770        u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1771        int i = 0, num_ports = 0;
1772        int rc = 0;
1773        int j;
1774        void *ctx;
1775        u64 reg;
1776
1777        rc = read_vpd(cfg, &wwpn[0]);
1778        if (rc) {
1779                dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1780                goto out;
1781        }
1782
1783        /* Set up RRQ and SQ in HWQ for master issued cmds */
1784        for (i = 0; i < afu->num_hwqs; i++) {
1785                hwq = get_hwq(afu, i);
1786                hmap = hwq->host_map;
1787
1788                writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1789                writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1790                hwq->hrrq_online = true;
1791
1792                if (afu_is_sq_cmd_mode(afu)) {
1793                        writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1794                        writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1795                }
1796        }
1797
1798        /* AFU configuration */
1799        reg = readq_be(&afu->afu_map->global.regs.afu_config);
1800        reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1801        /* enable all auto retry options and control endianness */
1802        /* leave others at default: */
1803        /* CTX_CAP write protected, mbox_r does not clear on read and */
1804        /* checker on if dual afu */
1805        writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1806
1807        /* Global port select: select either port */
1808        if (afu->internal_lun) {
1809                /* Only use port 0 */
1810                writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1811                num_ports = 0;
1812        } else {
1813                writeq_be(PORT_MASK(cfg->num_fc_ports),
1814                          &afu->afu_map->global.regs.afu_port_sel);
1815                num_ports = cfg->num_fc_ports;
1816        }
1817
1818        for (i = 0; i < num_ports; i++) {
1819                fc_port_regs = get_fc_port_regs(cfg, i);
1820
1821                /* Unmask all errors (but they are still masked at AFU) */
1822                writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1823                /* Clear CRC error cnt & set a threshold */
1824                (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1825                writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1826
1827                /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1828                if (wwpn[i] != 0)
1829                        afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1830                /* Programming WWPN back to back causes additional
1831                 * offline/online transitions and a PLOGI
1832                 */
1833                msleep(100);
1834        }
1835
1836        if (afu_is_ocxl_lisn(afu)) {
1837                /* Set up the LISN effective address for each master */
1838                for (i = 0; i < afu->num_hwqs; i++) {
1839                        hwq = get_hwq(afu, i);
1840                        ctx = hwq->ctx_cookie;
1841
1842                        for (j = 0; j < hwq->num_irqs; j++) {
1843                                reg = cfg->ops->get_irq_objhndl(ctx, j);
1844                                writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1845                        }
1846
1847                        reg = hwq->ctx_hndl;
1848                        writeq_be(SISL_LISN_PASID(reg, reg),
1849                                  &hwq->ctrl_map->lisn_pasid[0]);
1850                        writeq_be(SISL_LISN_PASID(0UL, reg),
1851                                  &hwq->ctrl_map->lisn_pasid[1]);
1852                }
1853        }
1854
1855        /* Set up master's own CTX_CAP to allow real mode, host translation */
1856        /* tables, afu cmds and read/write GSCSI cmds. */
1857        /* First, unlock ctx_cap write by reading mbox */
1858        for (i = 0; i < afu->num_hwqs; i++) {
1859                hwq = get_hwq(afu, i);
1860
1861                (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1862                writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1863                        SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1864                        SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1865                        &hwq->ctrl_map->ctx_cap);
1866        }
1867
1868        /*
1869         * Determine write-same unmap support for host by evaluating the unmap
1870         * sector support bit of the context control register associated with
1871         * the primary hardware queue. Note that while this status is reflected
1872         * in a context register, the outcome can be assumed to be host-wide.
1873         */
1874        hwq = get_hwq(afu, PRIMARY_HWQ);
1875        reg = readq_be(&hwq->host_map->ctx_ctrl);
1876        if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1877                cfg->ws_unmap = true;
1878
1879        /* Initialize heartbeat */
1880        afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1881out:
1882        return rc;
1883}
1884
1885/**
1886 * start_afu() - initializes and starts the AFU
1887 * @cfg:        Internal structure associated with the host.
1888 */
1889static int start_afu(struct cxlflash_cfg *cfg)
1890{
1891        struct afu *afu = cfg->afu;
1892        struct device *dev = &cfg->dev->dev;
1893        struct hwq *hwq;
1894        int rc = 0;
1895        int i;
1896
1897        init_pcr(cfg);
1898
1899        /* Initialize each HWQ */
1900        for (i = 0; i < afu->num_hwqs; i++) {
1901                hwq = get_hwq(afu, i);
1902
1903                /* After an AFU reset, RRQ entries are stale, clear them */
1904                memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1905
1906                /* Initialize RRQ pointers */
1907                hwq->hrrq_start = &hwq->rrq_entry[0];
1908                hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1909                hwq->hrrq_curr = hwq->hrrq_start;
1910                hwq->toggle = 1;
1911
1912                /* Initialize spin locks */
1913                spin_lock_init(&hwq->hrrq_slock);
1914                spin_lock_init(&hwq->hsq_slock);
1915
1916                /* Initialize SQ */
1917                if (afu_is_sq_cmd_mode(afu)) {
1918                        memset(&hwq->sq, 0, sizeof(hwq->sq));
1919                        hwq->hsq_start = &hwq->sq[0];
1920                        hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1921                        hwq->hsq_curr = hwq->hsq_start;
1922
1923                        atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1924                }
1925
1926                /* Initialize IRQ poll */
1927                if (afu_is_irqpoll_enabled(afu))
1928                        irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1929                                      cxlflash_irqpoll);
1930
1931        }
1932
1933        rc = init_global(cfg);
1934
1935        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1936        return rc;
1937}
1938
1939/**
1940 * init_intr() - setup interrupt handlers for the master context
1941 * @cfg:        Internal structure associated with the host.
1942 * @hwq:        Hardware queue to initialize.
1943 *
1944 * Return: 0 on success, -errno on failure
1945 */
1946static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1947                                 struct hwq *hwq)
1948{
1949        struct device *dev = &cfg->dev->dev;
1950        void *ctx = hwq->ctx_cookie;
1951        int rc = 0;
1952        enum undo_level level = UNDO_NOOP;
1953        bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1954        int num_irqs = hwq->num_irqs;
1955
1956        rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1957        if (unlikely(rc)) {
1958                dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1959                        __func__, rc);
1960                level = UNDO_NOOP;
1961                goto out;
1962        }
1963
1964        rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1965                                   "SISL_MSI_SYNC_ERROR");
1966        if (unlikely(rc <= 0)) {
1967                dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1968                level = FREE_IRQ;
1969                goto out;
1970        }
1971
1972        rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1973                                   "SISL_MSI_RRQ_UPDATED");
1974        if (unlikely(rc <= 0)) {
1975                dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1976                level = UNMAP_ONE;
1977                goto out;
1978        }
1979
1980        /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1981        if (!is_primary_hwq)
1982                goto out;
1983
1984        rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1985                                   "SISL_MSI_ASYNC_ERROR");
1986        if (unlikely(rc <= 0)) {
1987                dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1988                level = UNMAP_TWO;
1989                goto out;
1990        }
1991out:
1992        return level;
1993}
1994
1995/**
1996 * init_mc() - create and register as the master context
1997 * @cfg:        Internal structure associated with the host.
1998 * index:       HWQ Index of the master context.
1999 *
2000 * Return: 0 on success, -errno on failure
2001 */
2002static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2003{
2004        void *ctx;
2005        struct device *dev = &cfg->dev->dev;
2006        struct hwq *hwq = get_hwq(cfg->afu, index);
2007        int rc = 0;
2008        int num_irqs;
2009        enum undo_level level;
2010
2011        hwq->afu = cfg->afu;
2012        hwq->index = index;
2013        INIT_LIST_HEAD(&hwq->pending_cmds);
2014
2015        if (index == PRIMARY_HWQ) {
2016                ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2017                num_irqs = 3;
2018        } else {
2019                ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2020                num_irqs = 2;
2021        }
2022        if (IS_ERR_OR_NULL(ctx)) {
2023                rc = -ENOMEM;
2024                goto err1;
2025        }
2026
2027        WARN_ON(hwq->ctx_cookie);
2028        hwq->ctx_cookie = ctx;
2029        hwq->num_irqs = num_irqs;
2030
2031        /* Set it up as a master with the CXL */
2032        cfg->ops->set_master(ctx);
2033
2034        /* Reset AFU when initializing primary context */
2035        if (index == PRIMARY_HWQ) {
2036                rc = cfg->ops->afu_reset(ctx);
2037                if (unlikely(rc)) {
2038                        dev_err(dev, "%s: AFU reset failed rc=%d\n",
2039                                      __func__, rc);
2040                        goto err1;
2041                }
2042        }
2043
2044        level = init_intr(cfg, hwq);
2045        if (unlikely(level)) {
2046                dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2047                goto err2;
2048        }
2049
2050        /* Finally, activate the context by starting it */
2051        rc = cfg->ops->start_context(hwq->ctx_cookie);
2052        if (unlikely(rc)) {
2053                dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2054                level = UNMAP_THREE;
2055                goto err2;
2056        }
2057
2058out:
2059        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2060        return rc;
2061err2:
2062        term_intr(cfg, level, index);
2063        if (index != PRIMARY_HWQ)
2064                cfg->ops->release_context(ctx);
2065err1:
2066        hwq->ctx_cookie = NULL;
2067        goto out;
2068}
2069
2070/**
2071 * get_num_afu_ports() - determines and configures the number of AFU ports
2072 * @cfg:        Internal structure associated with the host.
2073 *
2074 * This routine determines the number of AFU ports by converting the global
2075 * port selection mask. The converted value is only valid following an AFU
2076 * reset (explicit or power-on). This routine must be invoked shortly after
2077 * mapping as other routines are dependent on the number of ports during the
2078 * initialization sequence.
2079 *
2080 * To support legacy AFUs that might not have reflected an initial global
2081 * port mask (value read is 0), default to the number of ports originally
2082 * supported by the cxlflash driver (2) before hardware with other port
2083 * offerings was introduced.
2084 */
2085static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2086{
2087        struct afu *afu = cfg->afu;
2088        struct device *dev = &cfg->dev->dev;
2089        u64 port_mask;
2090        int num_fc_ports = LEGACY_FC_PORTS;
2091
2092        port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2093        if (port_mask != 0ULL)
2094                num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2095
2096        dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2097                __func__, port_mask, num_fc_ports);
2098
2099        cfg->num_fc_ports = num_fc_ports;
2100        cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2101}
2102
2103/**
2104 * init_afu() - setup as master context and start AFU
2105 * @cfg:        Internal structure associated with the host.
2106 *
2107 * This routine is a higher level of control for configuring the
2108 * AFU on probe and reset paths.
2109 *
2110 * Return: 0 on success, -errno on failure
2111 */
2112static int init_afu(struct cxlflash_cfg *cfg)
2113{
2114        u64 reg;
2115        int rc = 0;
2116        struct afu *afu = cfg->afu;
2117        struct device *dev = &cfg->dev->dev;
2118        struct hwq *hwq;
2119        int i;
2120
2121        cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2122
2123        mutex_init(&afu->sync_active);
2124        afu->num_hwqs = afu->desired_hwqs;
2125        for (i = 0; i < afu->num_hwqs; i++) {
2126                rc = init_mc(cfg, i);
2127                if (rc) {
2128                        dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2129                                __func__, rc, i);
2130                        goto err1;
2131                }
2132        }
2133
2134        /* Map the entire MMIO space of the AFU using the first context */
2135        hwq = get_hwq(afu, PRIMARY_HWQ);
2136        afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2137        if (!afu->afu_map) {
2138                dev_err(dev, "%s: psa_map failed\n", __func__);
2139                rc = -ENOMEM;
2140                goto err1;
2141        }
2142
2143        /* No byte reverse on reading afu_version or string will be backwards */
2144        reg = readq(&afu->afu_map->global.regs.afu_version);
2145        memcpy(afu->version, &reg, sizeof(reg));
2146        afu->interface_version =
2147            readq_be(&afu->afu_map->global.regs.interface_version);
2148        if ((afu->interface_version + 1) == 0) {
2149                dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2150                        "interface version %016llx\n", afu->version,
2151                       afu->interface_version);
2152                rc = -EINVAL;
2153                goto err1;
2154        }
2155
2156        if (afu_is_sq_cmd_mode(afu)) {
2157                afu->send_cmd = send_cmd_sq;
2158                afu->context_reset = context_reset_sq;
2159        } else {
2160                afu->send_cmd = send_cmd_ioarrin;
2161                afu->context_reset = context_reset_ioarrin;
2162        }
2163
2164        dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2165                afu->version, afu->interface_version);
2166
2167        get_num_afu_ports(cfg);
2168
2169        rc = start_afu(cfg);
2170        if (rc) {
2171                dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2172                goto err1;
2173        }
2174
2175        afu_err_intr_init(cfg->afu);
2176        for (i = 0; i < afu->num_hwqs; i++) {
2177                hwq = get_hwq(afu, i);
2178
2179                hwq->room = readq_be(&hwq->host_map->cmd_room);
2180        }
2181
2182        /* Restore the LUN mappings */
2183        cxlflash_restore_luntable(cfg);
2184out:
2185        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2186        return rc;
2187
2188err1:
2189        for (i = afu->num_hwqs - 1; i >= 0; i--) {
2190                term_intr(cfg, UNMAP_THREE, i);
2191                term_mc(cfg, i);
2192        }
2193        goto out;
2194}
2195
2196/**
2197 * afu_reset() - resets the AFU
2198 * @cfg:        Internal structure associated with the host.
2199 *
2200 * Return: 0 on success, -errno on failure
2201 */
2202static int afu_reset(struct cxlflash_cfg *cfg)
2203{
2204        struct device *dev = &cfg->dev->dev;
2205        int rc = 0;
2206
2207        /* Stop the context before the reset. Since the context is
2208         * no longer available restart it after the reset is complete
2209         */
2210        term_afu(cfg);
2211
2212        rc = init_afu(cfg);
2213
2214        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2215        return rc;
2216}
2217
2218/**
2219 * drain_ioctls() - wait until all currently executing ioctls have completed
2220 * @cfg:        Internal structure associated with the host.
2221 *
2222 * Obtain write access to read/write semaphore that wraps ioctl
2223 * handling to 'drain' ioctls currently executing.
2224 */
2225static void drain_ioctls(struct cxlflash_cfg *cfg)
2226{
2227        down_write(&cfg->ioctl_rwsem);
2228        up_write(&cfg->ioctl_rwsem);
2229}
2230
2231/**
2232 * cxlflash_async_reset_host() - asynchronous host reset handler
2233 * @data:       Private data provided while scheduling reset.
2234 * @cookie:     Cookie that can be used for checkpointing.
2235 */
2236static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2237{
2238        struct cxlflash_cfg *cfg = data;
2239        struct device *dev = &cfg->dev->dev;
2240        int rc = 0;
2241
2242        if (cfg->state != STATE_RESET) {
2243                dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2244                        __func__, cfg->state);
2245                goto out;
2246        }
2247
2248        drain_ioctls(cfg);
2249        cxlflash_mark_contexts_error(cfg);
2250        rc = afu_reset(cfg);
2251        if (rc)
2252                cfg->state = STATE_FAILTERM;
2253        else
2254                cfg->state = STATE_NORMAL;
2255        wake_up_all(&cfg->reset_waitq);
2256
2257out:
2258        scsi_unblock_requests(cfg->host);
2259}
2260
2261/**
2262 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2263 * @cfg:        Internal structure associated with the host.
2264 */
2265static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2266{
2267        struct device *dev = &cfg->dev->dev;
2268
2269        if (cfg->state != STATE_NORMAL) {
2270                dev_dbg(dev, "%s: Not performing reset state=%d\n",
2271                        __func__, cfg->state);
2272                return;
2273        }
2274
2275        cfg->state = STATE_RESET;
2276        scsi_block_requests(cfg->host);
2277        cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2278                                                 cfg);
2279}
2280
2281/**
2282 * send_afu_cmd() - builds and sends an internal AFU command
2283 * @afu:        AFU associated with the host.
2284 * @rcb:        Pre-populated IOARCB describing command to send.
2285 *
2286 * The AFU can only take one internal AFU command at a time. This limitation is
2287 * enforced by using a mutex to provide exclusive access to the AFU during the
2288 * operation. This design point requires calling threads to not be on interrupt
2289 * context due to the possibility of sleeping during concurrent AFU operations.
2290 *
2291 * The command status is optionally passed back to the caller when the caller
2292 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2293 *
2294 * Return:
2295 *      0 on success, -errno on failure
2296 */
2297static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2298{
2299        struct cxlflash_cfg *cfg = afu->parent;
2300        struct device *dev = &cfg->dev->dev;
2301        struct afu_cmd *cmd = NULL;
2302        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2303        ulong lock_flags;
2304        char *buf = NULL;
2305        int rc = 0;
2306        int nretry = 0;
2307
2308        if (cfg->state != STATE_NORMAL) {
2309                dev_dbg(dev, "%s: Sync not required state=%u\n",
2310                        __func__, cfg->state);
2311                return 0;
2312        }
2313
2314        mutex_lock(&afu->sync_active);
2315        atomic_inc(&afu->cmds_active);
2316        buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2317        if (unlikely(!buf)) {
2318                dev_err(dev, "%s: no memory for command\n", __func__);
2319                rc = -ENOMEM;
2320                goto out;
2321        }
2322
2323        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2324
2325retry:
2326        memset(cmd, 0, sizeof(*cmd));
2327        memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2328        INIT_LIST_HEAD(&cmd->queue);
2329        init_completion(&cmd->cevent);
2330        cmd->parent = afu;
2331        cmd->hwq_index = hwq->index;
2332        cmd->rcb.ctx_id = hwq->ctx_hndl;
2333
2334        dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2335                __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2336
2337        rc = afu->send_cmd(afu, cmd);
2338        if (unlikely(rc)) {
2339                rc = -ENOBUFS;
2340                goto out;
2341        }
2342
2343        rc = wait_resp(afu, cmd);
2344        switch (rc) {
2345        case -ETIMEDOUT:
2346                rc = afu->context_reset(hwq);
2347                if (rc) {
2348                        /* Delete the command from pending_cmds list */
2349                        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2350                        list_del(&cmd->list);
2351                        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2352
2353                        cxlflash_schedule_async_reset(cfg);
2354                        break;
2355                }
2356                /* fall through to retry */
2357        case -EAGAIN:
2358                if (++nretry < 2)
2359                        goto retry;
2360                /* fall through to exit */
2361        default:
2362                break;
2363        }
2364
2365        if (rcb->ioasa)
2366                *rcb->ioasa = cmd->sa;
2367out:
2368        atomic_dec(&afu->cmds_active);
2369        mutex_unlock(&afu->sync_active);
2370        kfree(buf);
2371        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2372        return rc;
2373}
2374
2375/**
2376 * cxlflash_afu_sync() - builds and sends an AFU sync command
2377 * @afu:        AFU associated with the host.
2378 * @ctx:        Identifies context requesting sync.
2379 * @res:        Identifies resource requesting sync.
2380 * @mode:       Type of sync to issue (lightweight, heavyweight, global).
2381 *
2382 * AFU sync operations are only necessary and allowed when the device is
2383 * operating normally. When not operating normally, sync requests can occur as
2384 * part of cleaning up resources associated with an adapter prior to removal.
2385 * In this scenario, these requests are simply ignored (safe due to the AFU
2386 * going away).
2387 *
2388 * Return:
2389 *      0 on success, -errno on failure
2390 */
2391int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2392{
2393        struct cxlflash_cfg *cfg = afu->parent;
2394        struct device *dev = &cfg->dev->dev;
2395        struct sisl_ioarcb rcb = { 0 };
2396
2397        dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2398                __func__, afu, ctx, res, mode);
2399
2400        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2401        rcb.msi = SISL_MSI_RRQ_UPDATED;
2402        rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2403
2404        rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2405        rcb.cdb[1] = mode;
2406        put_unaligned_be16(ctx, &rcb.cdb[2]);
2407        put_unaligned_be32(res, &rcb.cdb[4]);
2408
2409        return send_afu_cmd(afu, &rcb);
2410}
2411
2412/**
2413 * cxlflash_eh_abort_handler() - abort a SCSI command
2414 * @scp:        SCSI command to abort.
2415 *
2416 * CXL Flash devices do not support a single command abort. Reset the context
2417 * as per SISLite specification. Flush any pending commands in the hardware
2418 * queue before the reset.
2419 *
2420 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2421 */
2422static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2423{
2424        int rc = FAILED;
2425        struct Scsi_Host *host = scp->device->host;
2426        struct cxlflash_cfg *cfg = shost_priv(host);
2427        struct afu_cmd *cmd = sc_to_afuc(scp);
2428        struct device *dev = &cfg->dev->dev;
2429        struct afu *afu = cfg->afu;
2430        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2431
2432        dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2433                "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2434                scp->device->channel, scp->device->id, scp->device->lun,
2435                get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2436                get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2437                get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2438                get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2439
2440        /* When the state is not normal, another reset/reload is in progress.
2441         * Return failed and the mid-layer will invoke host reset handler.
2442         */
2443        if (cfg->state != STATE_NORMAL) {
2444                dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2445                        __func__, cfg->state);
2446                goto out;
2447        }
2448
2449        rc = afu->context_reset(hwq);
2450        if (unlikely(rc))
2451                goto out;
2452
2453        rc = SUCCESS;
2454
2455out:
2456        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2457        return rc;
2458}
2459
2460/**
2461 * cxlflash_eh_device_reset_handler() - reset a single LUN
2462 * @scp:        SCSI command to send.
2463 *
2464 * Return:
2465 *      SUCCESS as defined in scsi/scsi.h
2466 *      FAILED as defined in scsi/scsi.h
2467 */
2468static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2469{
2470        int rc = SUCCESS;
2471        struct scsi_device *sdev = scp->device;
2472        struct Scsi_Host *host = sdev->host;
2473        struct cxlflash_cfg *cfg = shost_priv(host);
2474        struct device *dev = &cfg->dev->dev;
2475        int rcr = 0;
2476
2477        dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2478                host->host_no, sdev->channel, sdev->id, sdev->lun);
2479retry:
2480        switch (cfg->state) {
2481        case STATE_NORMAL:
2482                rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2483                if (unlikely(rcr))
2484                        rc = FAILED;
2485                break;
2486        case STATE_RESET:
2487                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2488                goto retry;
2489        default:
2490                rc = FAILED;
2491                break;
2492        }
2493
2494        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2495        return rc;
2496}
2497
2498/**
2499 * cxlflash_eh_host_reset_handler() - reset the host adapter
2500 * @scp:        SCSI command from stack identifying host.
2501 *
2502 * Following a reset, the state is evaluated again in case an EEH occurred
2503 * during the reset. In such a scenario, the host reset will either yield
2504 * until the EEH recovery is complete or return success or failure based
2505 * upon the current device state.
2506 *
2507 * Return:
2508 *      SUCCESS as defined in scsi/scsi.h
2509 *      FAILED as defined in scsi/scsi.h
2510 */
2511static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2512{
2513        int rc = SUCCESS;
2514        int rcr = 0;
2515        struct Scsi_Host *host = scp->device->host;
2516        struct cxlflash_cfg *cfg = shost_priv(host);
2517        struct device *dev = &cfg->dev->dev;
2518
2519        dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2520
2521        switch (cfg->state) {
2522        case STATE_NORMAL:
2523                cfg->state = STATE_RESET;
2524                drain_ioctls(cfg);
2525                cxlflash_mark_contexts_error(cfg);
2526                rcr = afu_reset(cfg);
2527                if (rcr) {
2528                        rc = FAILED;
2529                        cfg->state = STATE_FAILTERM;
2530                } else
2531                        cfg->state = STATE_NORMAL;
2532                wake_up_all(&cfg->reset_waitq);
2533                ssleep(1);
2534                /* fall through */
2535        case STATE_RESET:
2536                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2537                if (cfg->state == STATE_NORMAL)
2538                        break;
2539                /* fall through */
2540        default:
2541                rc = FAILED;
2542                break;
2543        }
2544
2545        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2546        return rc;
2547}
2548
2549/**
2550 * cxlflash_change_queue_depth() - change the queue depth for the device
2551 * @sdev:       SCSI device destined for queue depth change.
2552 * @qdepth:     Requested queue depth value to set.
2553 *
2554 * The requested queue depth is capped to the maximum supported value.
2555 *
2556 * Return: The actual queue depth set.
2557 */
2558static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2559{
2560
2561        if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2562                qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2563
2564        scsi_change_queue_depth(sdev, qdepth);
2565        return sdev->queue_depth;
2566}
2567
2568/**
2569 * cxlflash_show_port_status() - queries and presents the current port status
2570 * @port:       Desired port for status reporting.
2571 * @cfg:        Internal structure associated with the host.
2572 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2573 *
2574 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2575 */
2576static ssize_t cxlflash_show_port_status(u32 port,
2577                                         struct cxlflash_cfg *cfg,
2578                                         char *buf)
2579{
2580        struct device *dev = &cfg->dev->dev;
2581        char *disp_status;
2582        u64 status;
2583        __be64 __iomem *fc_port_regs;
2584
2585        WARN_ON(port >= MAX_FC_PORTS);
2586
2587        if (port >= cfg->num_fc_ports) {
2588                dev_info(dev, "%s: Port %d not supported on this card.\n",
2589                        __func__, port);
2590                return -EINVAL;
2591        }
2592
2593        fc_port_regs = get_fc_port_regs(cfg, port);
2594        status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2595        status &= FC_MTIP_STATUS_MASK;
2596
2597        if (status == FC_MTIP_STATUS_ONLINE)
2598                disp_status = "online";
2599        else if (status == FC_MTIP_STATUS_OFFLINE)
2600                disp_status = "offline";
2601        else
2602                disp_status = "unknown";
2603
2604        return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2605}
2606
2607/**
2608 * port0_show() - queries and presents the current status of port 0
2609 * @dev:        Generic device associated with the host owning the port.
2610 * @attr:       Device attribute representing the port.
2611 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2612 *
2613 * Return: The size of the ASCII string returned in @buf.
2614 */
2615static ssize_t port0_show(struct device *dev,
2616                          struct device_attribute *attr,
2617                          char *buf)
2618{
2619        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2620
2621        return cxlflash_show_port_status(0, cfg, buf);
2622}
2623
2624/**
2625 * port1_show() - queries and presents the current status of port 1
2626 * @dev:        Generic device associated with the host owning the port.
2627 * @attr:       Device attribute representing the port.
2628 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2629 *
2630 * Return: The size of the ASCII string returned in @buf.
2631 */
2632static ssize_t port1_show(struct device *dev,
2633                          struct device_attribute *attr,
2634                          char *buf)
2635{
2636        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2637
2638        return cxlflash_show_port_status(1, cfg, buf);
2639}
2640
2641/**
2642 * port2_show() - queries and presents the current status of port 2
2643 * @dev:        Generic device associated with the host owning the port.
2644 * @attr:       Device attribute representing the port.
2645 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2646 *
2647 * Return: The size of the ASCII string returned in @buf.
2648 */
2649static ssize_t port2_show(struct device *dev,
2650                          struct device_attribute *attr,
2651                          char *buf)
2652{
2653        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2654
2655        return cxlflash_show_port_status(2, cfg, buf);
2656}
2657
2658/**
2659 * port3_show() - queries and presents the current status of port 3
2660 * @dev:        Generic device associated with the host owning the port.
2661 * @attr:       Device attribute representing the port.
2662 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2663 *
2664 * Return: The size of the ASCII string returned in @buf.
2665 */
2666static ssize_t port3_show(struct device *dev,
2667                          struct device_attribute *attr,
2668                          char *buf)
2669{
2670        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2671
2672        return cxlflash_show_port_status(3, cfg, buf);
2673}
2674
2675/**
2676 * lun_mode_show() - presents the current LUN mode of the host
2677 * @dev:        Generic device associated with the host.
2678 * @attr:       Device attribute representing the LUN mode.
2679 * @buf:        Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2680 *
2681 * Return: The size of the ASCII string returned in @buf.
2682 */
2683static ssize_t lun_mode_show(struct device *dev,
2684                             struct device_attribute *attr, char *buf)
2685{
2686        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2687        struct afu *afu = cfg->afu;
2688
2689        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2690}
2691
2692/**
2693 * lun_mode_store() - sets the LUN mode of the host
2694 * @dev:        Generic device associated with the host.
2695 * @attr:       Device attribute representing the LUN mode.
2696 * @buf:        Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2697 * @count:      Length of data resizing in @buf.
2698 *
2699 * The CXL Flash AFU supports a dummy LUN mode where the external
2700 * links and storage are not required. Space on the FPGA is used
2701 * to create 1 or 2 small LUNs which are presented to the system
2702 * as if they were a normal storage device. This feature is useful
2703 * during development and also provides manufacturing with a way
2704 * to test the AFU without an actual device.
2705 *
2706 * 0 = external LUN[s] (default)
2707 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2708 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2709 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2710 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2711 *
2712 * Return: The size of the ASCII string returned in @buf.
2713 */
2714static ssize_t lun_mode_store(struct device *dev,
2715                              struct device_attribute *attr,
2716                              const char *buf, size_t count)
2717{
2718        struct Scsi_Host *shost = class_to_shost(dev);
2719        struct cxlflash_cfg *cfg = shost_priv(shost);
2720        struct afu *afu = cfg->afu;
2721        int rc;
2722        u32 lun_mode;
2723
2724        rc = kstrtouint(buf, 10, &lun_mode);
2725        if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2726                afu->internal_lun = lun_mode;
2727
2728                /*
2729                 * When configured for internal LUN, there is only one channel,
2730                 * channel number 0, else there will be one less than the number
2731                 * of fc ports for this card.
2732                 */
2733                if (afu->internal_lun)
2734                        shost->max_channel = 0;
2735                else
2736                        shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2737
2738                afu_reset(cfg);
2739                scsi_scan_host(cfg->host);
2740        }
2741
2742        return count;
2743}
2744
2745/**
2746 * ioctl_version_show() - presents the current ioctl version of the host
2747 * @dev:        Generic device associated with the host.
2748 * @attr:       Device attribute representing the ioctl version.
2749 * @buf:        Buffer of length PAGE_SIZE to report back the ioctl version.
2750 *
2751 * Return: The size of the ASCII string returned in @buf.
2752 */
2753static ssize_t ioctl_version_show(struct device *dev,
2754                                  struct device_attribute *attr, char *buf)
2755{
2756        ssize_t bytes = 0;
2757
2758        bytes = scnprintf(buf, PAGE_SIZE,
2759                          "disk: %u\n", DK_CXLFLASH_VERSION_0);
2760        bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2761                           "host: %u\n", HT_CXLFLASH_VERSION_0);
2762
2763        return bytes;
2764}
2765
2766/**
2767 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2768 * @port:       Desired port for status reporting.
2769 * @cfg:        Internal structure associated with the host.
2770 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2771 *
2772 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2773 */
2774static ssize_t cxlflash_show_port_lun_table(u32 port,
2775                                            struct cxlflash_cfg *cfg,
2776                                            char *buf)
2777{
2778        struct device *dev = &cfg->dev->dev;
2779        __be64 __iomem *fc_port_luns;
2780        int i;
2781        ssize_t bytes = 0;
2782
2783        WARN_ON(port >= MAX_FC_PORTS);
2784
2785        if (port >= cfg->num_fc_ports) {
2786                dev_info(dev, "%s: Port %d not supported on this card.\n",
2787                        __func__, port);
2788                return -EINVAL;
2789        }
2790
2791        fc_port_luns = get_fc_port_luns(cfg, port);
2792
2793        for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2794                bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2795                                   "%03d: %016llx\n",
2796                                   i, readq_be(&fc_port_luns[i]));
2797        return bytes;
2798}
2799
2800/**
2801 * port0_lun_table_show() - presents the current LUN table of port 0
2802 * @dev:        Generic device associated with the host owning the port.
2803 * @attr:       Device attribute representing the port.
2804 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2805 *
2806 * Return: The size of the ASCII string returned in @buf.
2807 */
2808static ssize_t port0_lun_table_show(struct device *dev,
2809                                    struct device_attribute *attr,
2810                                    char *buf)
2811{
2812        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2813
2814        return cxlflash_show_port_lun_table(0, cfg, buf);
2815}
2816
2817/**
2818 * port1_lun_table_show() - presents the current LUN table of port 1
2819 * @dev:        Generic device associated with the host owning the port.
2820 * @attr:       Device attribute representing the port.
2821 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2822 *
2823 * Return: The size of the ASCII string returned in @buf.
2824 */
2825static ssize_t port1_lun_table_show(struct device *dev,
2826                                    struct device_attribute *attr,
2827                                    char *buf)
2828{
2829        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2830
2831        return cxlflash_show_port_lun_table(1, cfg, buf);
2832}
2833
2834/**
2835 * port2_lun_table_show() - presents the current LUN table of port 2
2836 * @dev:        Generic device associated with the host owning the port.
2837 * @attr:       Device attribute representing the port.
2838 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2839 *
2840 * Return: The size of the ASCII string returned in @buf.
2841 */
2842static ssize_t port2_lun_table_show(struct device *dev,
2843                                    struct device_attribute *attr,
2844                                    char *buf)
2845{
2846        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2847
2848        return cxlflash_show_port_lun_table(2, cfg, buf);
2849}
2850
2851/**
2852 * port3_lun_table_show() - presents the current LUN table of port 3
2853 * @dev:        Generic device associated with the host owning the port.
2854 * @attr:       Device attribute representing the port.
2855 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2856 *
2857 * Return: The size of the ASCII string returned in @buf.
2858 */
2859static ssize_t port3_lun_table_show(struct device *dev,
2860                                    struct device_attribute *attr,
2861                                    char *buf)
2862{
2863        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2864
2865        return cxlflash_show_port_lun_table(3, cfg, buf);
2866}
2867
2868/**
2869 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2870 * @dev:        Generic device associated with the host.
2871 * @attr:       Device attribute representing the IRQ poll weight.
2872 * @buf:        Buffer of length PAGE_SIZE to report back the current IRQ poll
2873 *              weight in ASCII.
2874 *
2875 * An IRQ poll weight of 0 indicates polling is disabled.
2876 *
2877 * Return: The size of the ASCII string returned in @buf.
2878 */
2879static ssize_t irqpoll_weight_show(struct device *dev,
2880                                   struct device_attribute *attr, char *buf)
2881{
2882        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2883        struct afu *afu = cfg->afu;
2884
2885        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2886}
2887
2888/**
2889 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2890 * @dev:        Generic device associated with the host.
2891 * @attr:       Device attribute representing the IRQ poll weight.
2892 * @buf:        Buffer of length PAGE_SIZE containing the desired IRQ poll
2893 *              weight in ASCII.
2894 * @count:      Length of data resizing in @buf.
2895 *
2896 * An IRQ poll weight of 0 indicates polling is disabled.
2897 *
2898 * Return: The size of the ASCII string returned in @buf.
2899 */
2900static ssize_t irqpoll_weight_store(struct device *dev,
2901                                    struct device_attribute *attr,
2902                                    const char *buf, size_t count)
2903{
2904        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2905        struct device *cfgdev = &cfg->dev->dev;
2906        struct afu *afu = cfg->afu;
2907        struct hwq *hwq;
2908        u32 weight;
2909        int rc, i;
2910
2911        rc = kstrtouint(buf, 10, &weight);
2912        if (rc)
2913                return -EINVAL;
2914
2915        if (weight > 256) {
2916                dev_info(cfgdev,
2917                         "Invalid IRQ poll weight. It must be 256 or less.\n");
2918                return -EINVAL;
2919        }
2920
2921        if (weight == afu->irqpoll_weight) {
2922                dev_info(cfgdev,
2923                         "Current IRQ poll weight has the same weight.\n");
2924                return -EINVAL;
2925        }
2926
2927        if (afu_is_irqpoll_enabled(afu)) {
2928                for (i = 0; i < afu->num_hwqs; i++) {
2929                        hwq = get_hwq(afu, i);
2930
2931                        irq_poll_disable(&hwq->irqpoll);
2932                }
2933        }
2934
2935        afu->irqpoll_weight = weight;
2936
2937        if (weight > 0) {
2938                for (i = 0; i < afu->num_hwqs; i++) {
2939                        hwq = get_hwq(afu, i);
2940
2941                        irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2942                }
2943        }
2944
2945        return count;
2946}
2947
2948/**
2949 * num_hwqs_show() - presents the number of hardware queues for the host
2950 * @dev:        Generic device associated with the host.
2951 * @attr:       Device attribute representing the number of hardware queues.
2952 * @buf:        Buffer of length PAGE_SIZE to report back the number of hardware
2953 *              queues in ASCII.
2954 *
2955 * Return: The size of the ASCII string returned in @buf.
2956 */
2957static ssize_t num_hwqs_show(struct device *dev,
2958                             struct device_attribute *attr, char *buf)
2959{
2960        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2961        struct afu *afu = cfg->afu;
2962
2963        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2964}
2965
2966/**
2967 * num_hwqs_store() - sets the number of hardware queues for the host
2968 * @dev:        Generic device associated with the host.
2969 * @attr:       Device attribute representing the number of hardware queues.
2970 * @buf:        Buffer of length PAGE_SIZE containing the number of hardware
2971 *              queues in ASCII.
2972 * @count:      Length of data resizing in @buf.
2973 *
2974 * n > 0: num_hwqs = n
2975 * n = 0: num_hwqs = num_online_cpus()
2976 * n < 0: num_online_cpus() / abs(n)
2977 *
2978 * Return: The size of the ASCII string returned in @buf.
2979 */
2980static ssize_t num_hwqs_store(struct device *dev,
2981                              struct device_attribute *attr,
2982                              const char *buf, size_t count)
2983{
2984        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2985        struct afu *afu = cfg->afu;
2986        int rc;
2987        int nhwqs, num_hwqs;
2988
2989        rc = kstrtoint(buf, 10, &nhwqs);
2990        if (rc)
2991                return -EINVAL;
2992
2993        if (nhwqs >= 1)
2994                num_hwqs = nhwqs;
2995        else if (nhwqs == 0)
2996                num_hwqs = num_online_cpus();
2997        else
2998                num_hwqs = num_online_cpus() / abs(nhwqs);
2999
3000        afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3001        WARN_ON_ONCE(afu->desired_hwqs == 0);
3002
3003retry:
3004        switch (cfg->state) {
3005        case STATE_NORMAL:
3006                cfg->state = STATE_RESET;
3007                drain_ioctls(cfg);
3008                cxlflash_mark_contexts_error(cfg);
3009                rc = afu_reset(cfg);
3010                if (rc)
3011                        cfg->state = STATE_FAILTERM;
3012                else
3013                        cfg->state = STATE_NORMAL;
3014                wake_up_all(&cfg->reset_waitq);
3015                break;
3016        case STATE_RESET:
3017                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3018                if (cfg->state == STATE_NORMAL)
3019                        goto retry;
3020        default:
3021                /* Ideally should not happen */
3022                dev_err(dev, "%s: Device is not ready, state=%d\n",
3023                        __func__, cfg->state);
3024                break;
3025        }
3026
3027        return count;
3028}
3029
3030static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3031
3032/**
3033 * hwq_mode_show() - presents the HWQ steering mode for the host
3034 * @dev:        Generic device associated with the host.
3035 * @attr:       Device attribute representing the HWQ steering mode.
3036 * @buf:        Buffer of length PAGE_SIZE to report back the HWQ steering mode
3037 *              as a character string.
3038 *
3039 * Return: The size of the ASCII string returned in @buf.
3040 */
3041static ssize_t hwq_mode_show(struct device *dev,
3042                             struct device_attribute *attr, char *buf)
3043{
3044        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3045        struct afu *afu = cfg->afu;
3046
3047        return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3048}
3049
3050/**
3051 * hwq_mode_store() - sets the HWQ steering mode for the host
3052 * @dev:        Generic device associated with the host.
3053 * @attr:       Device attribute representing the HWQ steering mode.
3054 * @buf:        Buffer of length PAGE_SIZE containing the HWQ steering mode
3055 *              as a character string.
3056 * @count:      Length of data resizing in @buf.
3057 *
3058 * rr = Round-Robin
3059 * tag = Block MQ Tagging
3060 * cpu = CPU Affinity
3061 *
3062 * Return: The size of the ASCII string returned in @buf.
3063 */
3064static ssize_t hwq_mode_store(struct device *dev,
3065                              struct device_attribute *attr,
3066                              const char *buf, size_t count)
3067{
3068        struct Scsi_Host *shost = class_to_shost(dev);
3069        struct cxlflash_cfg *cfg = shost_priv(shost);
3070        struct device *cfgdev = &cfg->dev->dev;
3071        struct afu *afu = cfg->afu;
3072        int i;
3073        u32 mode = MAX_HWQ_MODE;
3074
3075        for (i = 0; i < MAX_HWQ_MODE; i++) {
3076                if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3077                        mode = i;
3078                        break;
3079                }
3080        }
3081
3082        if (mode >= MAX_HWQ_MODE) {
3083                dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3084                return -EINVAL;
3085        }
3086
3087        afu->hwq_mode = mode;
3088
3089        return count;
3090}
3091
3092/**
3093 * mode_show() - presents the current mode of the device
3094 * @dev:        Generic device associated with the device.
3095 * @attr:       Device attribute representing the device mode.
3096 * @buf:        Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3097 *
3098 * Return: The size of the ASCII string returned in @buf.
3099 */
3100static ssize_t mode_show(struct device *dev,
3101                         struct device_attribute *attr, char *buf)
3102{
3103        struct scsi_device *sdev = to_scsi_device(dev);
3104
3105        return scnprintf(buf, PAGE_SIZE, "%s\n",
3106                         sdev->hostdata ? "superpipe" : "legacy");
3107}
3108
3109/*
3110 * Host attributes
3111 */
3112static DEVICE_ATTR_RO(port0);
3113static DEVICE_ATTR_RO(port1);
3114static DEVICE_ATTR_RO(port2);
3115static DEVICE_ATTR_RO(port3);
3116static DEVICE_ATTR_RW(lun_mode);
3117static DEVICE_ATTR_RO(ioctl_version);
3118static DEVICE_ATTR_RO(port0_lun_table);
3119static DEVICE_ATTR_RO(port1_lun_table);
3120static DEVICE_ATTR_RO(port2_lun_table);
3121static DEVICE_ATTR_RO(port3_lun_table);
3122static DEVICE_ATTR_RW(irqpoll_weight);
3123static DEVICE_ATTR_RW(num_hwqs);
3124static DEVICE_ATTR_RW(hwq_mode);
3125
3126static struct device_attribute *cxlflash_host_attrs[] = {
3127        &dev_attr_port0,
3128        &dev_attr_port1,
3129        &dev_attr_port2,
3130        &dev_attr_port3,
3131        &dev_attr_lun_mode,
3132        &dev_attr_ioctl_version,
3133        &dev_attr_port0_lun_table,
3134        &dev_attr_port1_lun_table,
3135        &dev_attr_port2_lun_table,
3136        &dev_attr_port3_lun_table,
3137        &dev_attr_irqpoll_weight,
3138        &dev_attr_num_hwqs,
3139        &dev_attr_hwq_mode,
3140        NULL
3141};
3142
3143/*
3144 * Device attributes
3145 */
3146static DEVICE_ATTR_RO(mode);
3147
3148static struct device_attribute *cxlflash_dev_attrs[] = {
3149        &dev_attr_mode,
3150        NULL
3151};
3152
3153/*
3154 * Host template
3155 */
3156static struct scsi_host_template driver_template = {
3157        .module = THIS_MODULE,
3158        .name = CXLFLASH_ADAPTER_NAME,
3159        .info = cxlflash_driver_info,
3160        .ioctl = cxlflash_ioctl,
3161        .proc_name = CXLFLASH_NAME,
3162        .queuecommand = cxlflash_queuecommand,
3163        .eh_abort_handler = cxlflash_eh_abort_handler,
3164        .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3165        .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3166        .change_queue_depth = cxlflash_change_queue_depth,
3167        .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3168        .can_queue = CXLFLASH_MAX_CMDS,
3169        .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3170        .this_id = -1,
3171        .sg_tablesize = 1,      /* No scatter gather support */
3172        .max_sectors = CXLFLASH_MAX_SECTORS,
3173        .shost_attrs = cxlflash_host_attrs,
3174        .sdev_attrs = cxlflash_dev_attrs,
3175};
3176
3177/*
3178 * Device dependent values
3179 */
3180static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3181                                        CXLFLASH_WWPN_VPD_REQUIRED };
3182static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3183                                        CXLFLASH_NOTIFY_SHUTDOWN };
3184static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3185                                        (CXLFLASH_NOTIFY_SHUTDOWN |
3186                                        CXLFLASH_OCXL_DEV) };
3187
3188/*
3189 * PCI device binding table
3190 */
3191static struct pci_device_id cxlflash_pci_table[] = {
3192        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3193         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3194        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3195         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3196        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3197         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3198        {}
3199};
3200
3201MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3202
3203/**
3204 * cxlflash_worker_thread() - work thread handler for the AFU
3205 * @work:       Work structure contained within cxlflash associated with host.
3206 *
3207 * Handles the following events:
3208 * - Link reset which cannot be performed on interrupt context due to
3209 * blocking up to a few seconds
3210 * - Rescan the host
3211 */
3212static void cxlflash_worker_thread(struct work_struct *work)
3213{
3214        struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3215                                                work_q);
3216        struct afu *afu = cfg->afu;
3217        struct device *dev = &cfg->dev->dev;
3218        __be64 __iomem *fc_port_regs;
3219        int port;
3220        ulong lock_flags;
3221
3222        /* Avoid MMIO if the device has failed */
3223
3224        if (cfg->state != STATE_NORMAL)
3225                return;
3226
3227        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3228
3229        if (cfg->lr_state == LINK_RESET_REQUIRED) {
3230                port = cfg->lr_port;
3231                if (port < 0)
3232                        dev_err(dev, "%s: invalid port index %d\n",
3233                                __func__, port);
3234                else {
3235                        spin_unlock_irqrestore(cfg->host->host_lock,
3236                                               lock_flags);
3237
3238                        /* The reset can block... */
3239                        fc_port_regs = get_fc_port_regs(cfg, port);
3240                        afu_link_reset(afu, port, fc_port_regs);
3241                        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3242                }
3243
3244                cfg->lr_state = LINK_RESET_COMPLETE;
3245        }
3246
3247        spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3248
3249        if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3250                scsi_scan_host(cfg->host);
3251}
3252
3253/**
3254 * cxlflash_chr_open() - character device open handler
3255 * @inode:      Device inode associated with this character device.
3256 * @file:       File pointer for this device.
3257 *
3258 * Only users with admin privileges are allowed to open the character device.
3259 *
3260 * Return: 0 on success, -errno on failure
3261 */
3262static int cxlflash_chr_open(struct inode *inode, struct file *file)
3263{
3264        struct cxlflash_cfg *cfg;
3265
3266        if (!capable(CAP_SYS_ADMIN))
3267                return -EACCES;
3268
3269        cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3270        file->private_data = cfg;
3271
3272        return 0;
3273}
3274
3275/**
3276 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3277 * @cmd:        The host ioctl command to decode.
3278 *
3279 * Return: A string identifying the decoded host ioctl.
3280 */
3281static char *decode_hioctl(unsigned int cmd)
3282{
3283        switch (cmd) {
3284        case HT_CXLFLASH_LUN_PROVISION:
3285                return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3286        }
3287
3288        return "UNKNOWN";
3289}
3290
3291/**
3292 * cxlflash_lun_provision() - host LUN provisioning handler
3293 * @cfg:        Internal structure associated with the host.
3294 * @arg:        Kernel copy of userspace ioctl data structure.
3295 *
3296 * Return: 0 on success, -errno on failure
3297 */
3298static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3299                                  struct ht_cxlflash_lun_provision *lunprov)
3300{
3301        struct afu *afu = cfg->afu;
3302        struct device *dev = &cfg->dev->dev;
3303        struct sisl_ioarcb rcb;
3304        struct sisl_ioasa asa;
3305        __be64 __iomem *fc_port_regs;
3306        u16 port = lunprov->port;
3307        u16 scmd = lunprov->hdr.subcmd;
3308        u16 type;
3309        u64 reg;
3310        u64 size;
3311        u64 lun_id;
3312        int rc = 0;
3313
3314        if (!afu_is_lun_provision(afu)) {
3315                rc = -ENOTSUPP;
3316                goto out;
3317        }
3318
3319        if (port >= cfg->num_fc_ports) {
3320                rc = -EINVAL;
3321                goto out;
3322        }
3323
3324        switch (scmd) {
3325        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3326                type = SISL_AFU_LUN_PROVISION_CREATE;
3327                size = lunprov->size;
3328                lun_id = 0;
3329                break;
3330        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3331                type = SISL_AFU_LUN_PROVISION_DELETE;
3332                size = 0;
3333                lun_id = lunprov->lun_id;
3334                break;
3335        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3336                fc_port_regs = get_fc_port_regs(cfg, port);
3337
3338                reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3339                lunprov->max_num_luns = reg;
3340                reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3341                lunprov->cur_num_luns = reg;
3342                reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3343                lunprov->max_cap_port = reg;
3344                reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3345                lunprov->cur_cap_port = reg;
3346
3347                goto out;
3348        default:
3349                rc = -EINVAL;
3350                goto out;
3351        }
3352
3353        memset(&rcb, 0, sizeof(rcb));
3354        memset(&asa, 0, sizeof(asa));
3355        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3356        rcb.lun_id = lun_id;
3357        rcb.msi = SISL_MSI_RRQ_UPDATED;
3358        rcb.timeout = MC_LUN_PROV_TIMEOUT;
3359        rcb.ioasa = &asa;
3360
3361        rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3362        rcb.cdb[1] = type;
3363        rcb.cdb[2] = port;
3364        put_unaligned_be64(size, &rcb.cdb[8]);
3365
3366        rc = send_afu_cmd(afu, &rcb);
3367        if (rc) {
3368                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3369                        __func__, rc, asa.ioasc, asa.afu_extra);
3370                goto out;
3371        }
3372
3373        if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3374                lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3375                memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3376        }
3377out:
3378        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3379        return rc;
3380}
3381
3382/**
3383 * cxlflash_afu_debug() - host AFU debug handler
3384 * @cfg:        Internal structure associated with the host.
3385 * @arg:        Kernel copy of userspace ioctl data structure.
3386 *
3387 * For debug requests requiring a data buffer, always provide an aligned
3388 * (cache line) buffer to the AFU to appease any alignment requirements.
3389 *
3390 * Return: 0 on success, -errno on failure
3391 */
3392static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3393                              struct ht_cxlflash_afu_debug *afu_dbg)
3394{
3395        struct afu *afu = cfg->afu;
3396        struct device *dev = &cfg->dev->dev;
3397        struct sisl_ioarcb rcb;
3398        struct sisl_ioasa asa;
3399        char *buf = NULL;
3400        char *kbuf = NULL;
3401        void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3402        u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3403        u32 ulen = afu_dbg->data_len;
3404        bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3405        int rc = 0;
3406
3407        if (!afu_is_afu_debug(afu)) {
3408                rc = -ENOTSUPP;
3409                goto out;
3410        }
3411
3412        if (ulen) {
3413                req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3414
3415                if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3416                        rc = -EINVAL;
3417                        goto out;
3418                }
3419
3420                buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3421                if (unlikely(!buf)) {
3422                        rc = -ENOMEM;
3423                        goto out;
3424                }
3425
3426                kbuf = PTR_ALIGN(buf, cache_line_size());
3427
3428                if (is_write) {
3429                        req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3430
3431                        if (copy_from_user(kbuf, ubuf, ulen)) {
3432                                rc = -EFAULT;
3433                                goto out;
3434                        }
3435                }
3436        }
3437
3438        memset(&rcb, 0, sizeof(rcb));
3439        memset(&asa, 0, sizeof(asa));
3440
3441        rcb.req_flags = req_flags;
3442        rcb.msi = SISL_MSI_RRQ_UPDATED;
3443        rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3444        rcb.ioasa = &asa;
3445
3446        if (ulen) {
3447                rcb.data_len = ulen;
3448                rcb.data_ea = (uintptr_t)kbuf;
3449        }
3450
3451        rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3452        memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3453               HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3454
3455        rc = send_afu_cmd(afu, &rcb);
3456        if (rc) {
3457                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3458                        __func__, rc, asa.ioasc, asa.afu_extra);
3459                goto out;
3460        }
3461
3462        if (ulen && !is_write) {
3463                if (copy_to_user(ubuf, kbuf, ulen))
3464                        rc = -EFAULT;
3465        }
3466out:
3467        kfree(buf);
3468        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3469        return rc;
3470}
3471
3472/**
3473 * cxlflash_chr_ioctl() - character device IOCTL handler
3474 * @file:       File pointer for this device.
3475 * @cmd:        IOCTL command.
3476 * @arg:        Userspace ioctl data structure.
3477 *
3478 * A read/write semaphore is used to implement a 'drain' of currently
3479 * running ioctls. The read semaphore is taken at the beginning of each
3480 * ioctl thread and released upon concluding execution. Additionally the
3481 * semaphore should be released and then reacquired in any ioctl execution
3482 * path which will wait for an event to occur that is outside the scope of
3483 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3484 * a thread simply needs to acquire the write semaphore.
3485 *
3486 * Return: 0 on success, -errno on failure
3487 */
3488static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3489                               unsigned long arg)
3490{
3491        typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3492
3493        struct cxlflash_cfg *cfg = file->private_data;
3494        struct device *dev = &cfg->dev->dev;
3495        char buf[sizeof(union cxlflash_ht_ioctls)];
3496        void __user *uarg = (void __user *)arg;
3497        struct ht_cxlflash_hdr *hdr;
3498        size_t size = 0;
3499        bool known_ioctl = false;
3500        int idx = 0;
3501        int rc = 0;
3502        hioctl do_ioctl = NULL;
3503
3504        static const struct {
3505                size_t size;
3506                hioctl ioctl;
3507        } ioctl_tbl[] = {       /* NOTE: order matters here */
3508        { sizeof(struct ht_cxlflash_lun_provision),
3509                (hioctl)cxlflash_lun_provision },
3510        { sizeof(struct ht_cxlflash_afu_debug),
3511                (hioctl)cxlflash_afu_debug },
3512        };
3513
3514        /* Hold read semaphore so we can drain if needed */
3515        down_read(&cfg->ioctl_rwsem);
3516
3517        dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3518                __func__, cmd, idx, sizeof(ioctl_tbl));
3519
3520        switch (cmd) {
3521        case HT_CXLFLASH_LUN_PROVISION:
3522        case HT_CXLFLASH_AFU_DEBUG:
3523                known_ioctl = true;
3524                idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3525                size = ioctl_tbl[idx].size;
3526                do_ioctl = ioctl_tbl[idx].ioctl;
3527
3528                if (likely(do_ioctl))
3529                        break;
3530
3531                /* fall through */
3532        default:
3533                rc = -EINVAL;
3534                goto out;
3535        }
3536
3537        if (unlikely(copy_from_user(&buf, uarg, size))) {
3538                dev_err(dev, "%s: copy_from_user() fail "
3539                        "size=%lu cmd=%d (%s) uarg=%p\n",
3540                        __func__, size, cmd, decode_hioctl(cmd), uarg);
3541                rc = -EFAULT;
3542                goto out;
3543        }
3544
3545        hdr = (struct ht_cxlflash_hdr *)&buf;
3546        if (hdr->version != HT_CXLFLASH_VERSION_0) {
3547                dev_dbg(dev, "%s: Version %u not supported for %s\n",
3548                        __func__, hdr->version, decode_hioctl(cmd));
3549                rc = -EINVAL;
3550                goto out;
3551        }
3552
3553        if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3554                dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3555                rc = -EINVAL;
3556                goto out;
3557        }
3558
3559        rc = do_ioctl(cfg, (void *)&buf);
3560        if (likely(!rc))
3561                if (unlikely(copy_to_user(uarg, &buf, size))) {
3562                        dev_err(dev, "%s: copy_to_user() fail "
3563                                "size=%lu cmd=%d (%s) uarg=%p\n",
3564                                __func__, size, cmd, decode_hioctl(cmd), uarg);
3565                        rc = -EFAULT;
3566                }
3567
3568        /* fall through to exit */
3569
3570out:
3571        up_read(&cfg->ioctl_rwsem);
3572        if (unlikely(rc && known_ioctl))
3573                dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3574                        __func__, decode_hioctl(cmd), cmd, rc);
3575        else
3576                dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3577                        __func__, decode_hioctl(cmd), cmd, rc);
3578        return rc;
3579}
3580
3581/*
3582 * Character device file operations
3583 */
3584static const struct file_operations cxlflash_chr_fops = {
3585        .owner          = THIS_MODULE,
3586        .open           = cxlflash_chr_open,
3587        .unlocked_ioctl = cxlflash_chr_ioctl,
3588        .compat_ioctl   = cxlflash_chr_ioctl,
3589};
3590
3591/**
3592 * init_chrdev() - initialize the character device for the host
3593 * @cfg:        Internal structure associated with the host.
3594 *
3595 * Return: 0 on success, -errno on failure
3596 */
3597static int init_chrdev(struct cxlflash_cfg *cfg)
3598{
3599        struct device *dev = &cfg->dev->dev;
3600        struct device *char_dev;
3601        dev_t devno;
3602        int minor;
3603        int rc = 0;
3604
3605        minor = cxlflash_get_minor();
3606        if (unlikely(minor < 0)) {
3607                dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3608                rc = -ENOSPC;
3609                goto out;
3610        }
3611
3612        devno = MKDEV(cxlflash_major, minor);
3613        cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3614
3615        rc = cdev_add(&cfg->cdev, devno, 1);
3616        if (rc) {
3617                dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3618                goto err1;
3619        }
3620
3621        char_dev = device_create(cxlflash_class, NULL, devno,
3622                                 NULL, "cxlflash%d", minor);
3623        if (IS_ERR(char_dev)) {
3624                rc = PTR_ERR(char_dev);
3625                dev_err(dev, "%s: device_create failed rc=%d\n",
3626                        __func__, rc);
3627                goto err2;
3628        }
3629
3630        cfg->chardev = char_dev;
3631out:
3632        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3633        return rc;
3634err2:
3635        cdev_del(&cfg->cdev);
3636err1:
3637        cxlflash_put_minor(minor);
3638        goto out;
3639}
3640
3641/**
3642 * cxlflash_probe() - PCI entry point to add host
3643 * @pdev:       PCI device associated with the host.
3644 * @dev_id:     PCI device id associated with device.
3645 *
3646 * The device will initially start out in a 'probing' state and
3647 * transition to the 'normal' state at the end of a successful
3648 * probe. Should an EEH event occur during probe, the notification
3649 * thread (error_detected()) will wait until the probe handler
3650 * is nearly complete. At that time, the device will be moved to
3651 * a 'probed' state and the EEH thread woken up to drive the slot
3652 * reset and recovery (device moves to 'normal' state). Meanwhile,
3653 * the probe will be allowed to exit successfully.
3654 *
3655 * Return: 0 on success, -errno on failure
3656 */
3657static int cxlflash_probe(struct pci_dev *pdev,
3658                          const struct pci_device_id *dev_id)
3659{
3660        struct Scsi_Host *host;
3661        struct cxlflash_cfg *cfg = NULL;
3662        struct device *dev = &pdev->dev;
3663        struct dev_dependent_vals *ddv;
3664        int rc = 0;
3665        int k;
3666
3667        dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3668                __func__, pdev->irq);
3669
3670        ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3671        driver_template.max_sectors = ddv->max_sectors;
3672
3673        host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3674        if (!host) {
3675                dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3676                rc = -ENOMEM;
3677                goto out;
3678        }
3679
3680        host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3681        host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3682        host->unique_id = host->host_no;
3683        host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3684
3685        cfg = shost_priv(host);
3686        cfg->state = STATE_PROBING;
3687        cfg->host = host;
3688        rc = alloc_mem(cfg);
3689        if (rc) {
3690                dev_err(dev, "%s: alloc_mem failed\n", __func__);
3691                rc = -ENOMEM;
3692                scsi_host_put(cfg->host);
3693                goto out;
3694        }
3695
3696        cfg->init_state = INIT_STATE_NONE;
3697        cfg->dev = pdev;
3698        cfg->cxl_fops = cxlflash_cxl_fops;
3699        cfg->ops = cxlflash_assign_ops(ddv);
3700        WARN_ON_ONCE(!cfg->ops);
3701
3702        /*
3703         * Promoted LUNs move to the top of the LUN table. The rest stay on
3704         * the bottom half. The bottom half grows from the end (index = 255),
3705         * whereas the top half grows from the beginning (index = 0).
3706         *
3707         * Initialize the last LUN index for all possible ports.
3708         */
3709        cfg->promote_lun_index = 0;
3710
3711        for (k = 0; k < MAX_FC_PORTS; k++)
3712                cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3713
3714        cfg->dev_id = (struct pci_device_id *)dev_id;
3715
3716        init_waitqueue_head(&cfg->tmf_waitq);
3717        init_waitqueue_head(&cfg->reset_waitq);
3718
3719        INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3720        cfg->lr_state = LINK_RESET_INVALID;
3721        cfg->lr_port = -1;
3722        spin_lock_init(&cfg->tmf_slock);
3723        mutex_init(&cfg->ctx_tbl_list_mutex);
3724        mutex_init(&cfg->ctx_recovery_mutex);
3725        init_rwsem(&cfg->ioctl_rwsem);
3726        INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3727        INIT_LIST_HEAD(&cfg->lluns);
3728
3729        pci_set_drvdata(pdev, cfg);
3730
3731        rc = init_pci(cfg);
3732        if (rc) {
3733                dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3734                goto out_remove;
3735        }
3736        cfg->init_state = INIT_STATE_PCI;
3737
3738        cfg->afu_cookie = cfg->ops->create_afu(pdev);
3739        if (unlikely(!cfg->afu_cookie)) {
3740                dev_err(dev, "%s: create_afu failed\n", __func__);
3741                goto out_remove;
3742        }
3743
3744        rc = init_afu(cfg);
3745        if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3746                dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3747                goto out_remove;
3748        }
3749        cfg->init_state = INIT_STATE_AFU;
3750
3751        rc = init_scsi(cfg);
3752        if (rc) {
3753                dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3754                goto out_remove;
3755        }
3756        cfg->init_state = INIT_STATE_SCSI;
3757
3758        rc = init_chrdev(cfg);
3759        if (rc) {
3760                dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3761                goto out_remove;
3762        }
3763        cfg->init_state = INIT_STATE_CDEV;
3764
3765        if (wq_has_sleeper(&cfg->reset_waitq)) {
3766                cfg->state = STATE_PROBED;
3767                wake_up_all(&cfg->reset_waitq);
3768        } else
3769                cfg->state = STATE_NORMAL;
3770out:
3771        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3772        return rc;
3773
3774out_remove:
3775        cfg->state = STATE_PROBED;
3776        cxlflash_remove(pdev);
3777        goto out;
3778}
3779
3780/**
3781 * cxlflash_pci_error_detected() - called when a PCI error is detected
3782 * @pdev:       PCI device struct.
3783 * @state:      PCI channel state.
3784 *
3785 * When an EEH occurs during an active reset, wait until the reset is
3786 * complete and then take action based upon the device state.
3787 *
3788 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3789 */
3790static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3791                                                    pci_channel_state_t state)
3792{
3793        int rc = 0;
3794        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3795        struct device *dev = &cfg->dev->dev;
3796
3797        dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3798
3799        switch (state) {
3800        case pci_channel_io_frozen:
3801                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3802                                             cfg->state != STATE_PROBING);
3803                if (cfg->state == STATE_FAILTERM)
3804                        return PCI_ERS_RESULT_DISCONNECT;
3805
3806                cfg->state = STATE_RESET;
3807                scsi_block_requests(cfg->host);
3808                drain_ioctls(cfg);
3809                rc = cxlflash_mark_contexts_error(cfg);
3810                if (unlikely(rc))
3811                        dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3812                                __func__, rc);
3813                term_afu(cfg);
3814                return PCI_ERS_RESULT_NEED_RESET;
3815        case pci_channel_io_perm_failure:
3816                cfg->state = STATE_FAILTERM;
3817                wake_up_all(&cfg->reset_waitq);
3818                scsi_unblock_requests(cfg->host);
3819                return PCI_ERS_RESULT_DISCONNECT;
3820        default:
3821                break;
3822        }
3823        return PCI_ERS_RESULT_NEED_RESET;
3824}
3825
3826/**
3827 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3828 * @pdev:       PCI device struct.
3829 *
3830 * This routine is called by the pci error recovery code after the PCI
3831 * slot has been reset, just before we should resume normal operations.
3832 *
3833 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3834 */
3835static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3836{
3837        int rc = 0;
3838        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3839        struct device *dev = &cfg->dev->dev;
3840
3841        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3842
3843        rc = init_afu(cfg);
3844        if (unlikely(rc)) {
3845                dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3846                return PCI_ERS_RESULT_DISCONNECT;
3847        }
3848
3849        return PCI_ERS_RESULT_RECOVERED;
3850}
3851
3852/**
3853 * cxlflash_pci_resume() - called when normal operation can resume
3854 * @pdev:       PCI device struct
3855 */
3856static void cxlflash_pci_resume(struct pci_dev *pdev)
3857{
3858        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3859        struct device *dev = &cfg->dev->dev;
3860
3861        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3862
3863        cfg->state = STATE_NORMAL;
3864        wake_up_all(&cfg->reset_waitq);
3865        scsi_unblock_requests(cfg->host);
3866}
3867
3868/**
3869 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3870 * @dev:        Character device.
3871 * @mode:       Mode that can be used to verify access.
3872 *
3873 * Return: Allocated string describing the devtmpfs structure.
3874 */
3875static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3876{
3877        return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3878}
3879
3880/**
3881 * cxlflash_class_init() - create character device class
3882 *
3883 * Return: 0 on success, -errno on failure
3884 */
3885static int cxlflash_class_init(void)
3886{
3887        dev_t devno;
3888        int rc = 0;
3889
3890        rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3891        if (unlikely(rc)) {
3892                pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3893                goto out;
3894        }
3895
3896        cxlflash_major = MAJOR(devno);
3897
3898        cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3899        if (IS_ERR(cxlflash_class)) {
3900                rc = PTR_ERR(cxlflash_class);
3901                pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3902                goto err;
3903        }
3904
3905        cxlflash_class->devnode = cxlflash_devnode;
3906out:
3907        pr_debug("%s: returning rc=%d\n", __func__, rc);
3908        return rc;
3909err:
3910        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3911        goto out;
3912}
3913
3914/**
3915 * cxlflash_class_exit() - destroy character device class
3916 */
3917static void cxlflash_class_exit(void)
3918{
3919        dev_t devno = MKDEV(cxlflash_major, 0);
3920
3921        class_destroy(cxlflash_class);
3922        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3923}
3924
3925static const struct pci_error_handlers cxlflash_err_handler = {
3926        .error_detected = cxlflash_pci_error_detected,
3927        .slot_reset = cxlflash_pci_slot_reset,
3928        .resume = cxlflash_pci_resume,
3929};
3930
3931/*
3932 * PCI device structure
3933 */
3934static struct pci_driver cxlflash_driver = {
3935        .name = CXLFLASH_NAME,
3936        .id_table = cxlflash_pci_table,
3937        .probe = cxlflash_probe,
3938        .remove = cxlflash_remove,
3939        .shutdown = cxlflash_remove,
3940        .err_handler = &cxlflash_err_handler,
3941};
3942
3943/**
3944 * init_cxlflash() - module entry point
3945 *
3946 * Return: 0 on success, -errno on failure
3947 */
3948static int __init init_cxlflash(void)
3949{
3950        int rc;
3951
3952        check_sizes();
3953        cxlflash_list_init();
3954        rc = cxlflash_class_init();
3955        if (unlikely(rc))
3956                goto out;
3957
3958        rc = pci_register_driver(&cxlflash_driver);
3959        if (unlikely(rc))
3960                goto err;
3961out:
3962        pr_debug("%s: returning rc=%d\n", __func__, rc);
3963        return rc;
3964err:
3965        cxlflash_class_exit();
3966        goto out;
3967}
3968
3969/**
3970 * exit_cxlflash() - module exit point
3971 */
3972static void __exit exit_cxlflash(void)
3973{
3974        cxlflash_term_global_luns();
3975        cxlflash_free_errpage();
3976
3977        pci_unregister_driver(&cxlflash_driver);
3978        cxlflash_class_exit();
3979}
3980
3981module_init(init_cxlflash);
3982module_exit(exit_cxlflash);
3983