linux/drivers/scsi/cxlflash/main.c
<<
>>
Prefs
   1/*
   2 * CXL Flash Device Driver
   3 *
   4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
   5 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
   6 *
   7 * Copyright (C) 2015 IBM Corporation
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/delay.h>
  16#include <linux/list.h>
  17#include <linux/module.h>
  18#include <linux/pci.h>
  19
  20#include <asm/unaligned.h>
  21
  22#include <scsi/scsi_cmnd.h>
  23#include <scsi/scsi_host.h>
  24#include <uapi/scsi/cxlflash_ioctl.h>
  25
  26#include "main.h"
  27#include "sislite.h"
  28#include "common.h"
  29
  30MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
  31MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
  32MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
  33MODULE_LICENSE("GPL");
  34
  35static struct class *cxlflash_class;
  36static u32 cxlflash_major;
  37static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
  38
  39/**
  40 * process_cmd_err() - command error handler
  41 * @cmd:        AFU command that experienced the error.
  42 * @scp:        SCSI command associated with the AFU command in error.
  43 *
  44 * Translates error bits from AFU command to SCSI command results.
  45 */
  46static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  47{
  48        struct afu *afu = cmd->parent;
  49        struct cxlflash_cfg *cfg = afu->parent;
  50        struct device *dev = &cfg->dev->dev;
  51        struct sisl_ioarcb *ioarcb;
  52        struct sisl_ioasa *ioasa;
  53        u32 resid;
  54
  55        if (unlikely(!cmd))
  56                return;
  57
  58        ioarcb = &(cmd->rcb);
  59        ioasa = &(cmd->sa);
  60
  61        if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
  62                resid = ioasa->resid;
  63                scsi_set_resid(scp, resid);
  64                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
  65                        __func__, cmd, scp, resid);
  66        }
  67
  68        if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
  69                dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
  70                        __func__, cmd, scp);
  71                scp->result = (DID_ERROR << 16);
  72        }
  73
  74        dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
  75                "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
  76                ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
  77                ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
  78
  79        if (ioasa->rc.scsi_rc) {
  80                /* We have a SCSI status */
  81                if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
  82                        memcpy(scp->sense_buffer, ioasa->sense_data,
  83                               SISL_SENSE_DATA_LEN);
  84                        scp->result = ioasa->rc.scsi_rc;
  85                } else
  86                        scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
  87        }
  88
  89        /*
  90         * We encountered an error. Set scp->result based on nature
  91         * of error.
  92         */
  93        if (ioasa->rc.fc_rc) {
  94                /* We have an FC status */
  95                switch (ioasa->rc.fc_rc) {
  96                case SISL_FC_RC_LINKDOWN:
  97                        scp->result = (DID_REQUEUE << 16);
  98                        break;
  99                case SISL_FC_RC_RESID:
 100                        /* This indicates an FCP resid underrun */
 101                        if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
 102                                /* If the SISL_RC_FLAGS_OVERRUN flag was set,
 103                                 * then we will handle this error else where.
 104                                 * If not then we must handle it here.
 105                                 * This is probably an AFU bug.
 106                                 */
 107                                scp->result = (DID_ERROR << 16);
 108                        }
 109                        break;
 110                case SISL_FC_RC_RESIDERR:
 111                        /* Resid mismatch between adapter and device */
 112                case SISL_FC_RC_TGTABORT:
 113                case SISL_FC_RC_ABORTOK:
 114                case SISL_FC_RC_ABORTFAIL:
 115                case SISL_FC_RC_NOLOGI:
 116                case SISL_FC_RC_ABORTPEND:
 117                case SISL_FC_RC_WRABORTPEND:
 118                case SISL_FC_RC_NOEXP:
 119                case SISL_FC_RC_INUSE:
 120                        scp->result = (DID_ERROR << 16);
 121                        break;
 122                }
 123        }
 124
 125        if (ioasa->rc.afu_rc) {
 126                /* We have an AFU error */
 127                switch (ioasa->rc.afu_rc) {
 128                case SISL_AFU_RC_NO_CHANNELS:
 129                        scp->result = (DID_NO_CONNECT << 16);
 130                        break;
 131                case SISL_AFU_RC_DATA_DMA_ERR:
 132                        switch (ioasa->afu_extra) {
 133                        case SISL_AFU_DMA_ERR_PAGE_IN:
 134                                /* Retry */
 135                                scp->result = (DID_IMM_RETRY << 16);
 136                                break;
 137                        case SISL_AFU_DMA_ERR_INVALID_EA:
 138                        default:
 139                                scp->result = (DID_ERROR << 16);
 140                        }
 141                        break;
 142                case SISL_AFU_RC_OUT_OF_DATA_BUFS:
 143                        /* Retry */
 144                        scp->result = (DID_ALLOC_FAILURE << 16);
 145                        break;
 146                default:
 147                        scp->result = (DID_ERROR << 16);
 148                }
 149        }
 150}
 151
 152/**
 153 * cmd_complete() - command completion handler
 154 * @cmd:        AFU command that has completed.
 155 *
 156 * For SCSI commands this routine prepares and submits commands that have
 157 * either completed or timed out to the SCSI stack. For internal commands
 158 * (TMF or AFU), this routine simply notifies the originator that the
 159 * command has completed.
 160 */
 161static void cmd_complete(struct afu_cmd *cmd)
 162{
 163        struct scsi_cmnd *scp;
 164        ulong lock_flags;
 165        struct afu *afu = cmd->parent;
 166        struct cxlflash_cfg *cfg = afu->parent;
 167        struct device *dev = &cfg->dev->dev;
 168        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 169
 170        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 171        list_del(&cmd->list);
 172        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 173
 174        if (cmd->scp) {
 175                scp = cmd->scp;
 176                if (unlikely(cmd->sa.ioasc))
 177                        process_cmd_err(cmd, scp);
 178                else
 179                        scp->result = (DID_OK << 16);
 180
 181                dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
 182                                    __func__, scp, scp->result, cmd->sa.ioasc);
 183                scp->scsi_done(scp);
 184        } else if (cmd->cmd_tmf) {
 185                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 186                cfg->tmf_active = false;
 187                wake_up_all_locked(&cfg->tmf_waitq);
 188                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 189        } else
 190                complete(&cmd->cevent);
 191}
 192
 193/**
 194 * flush_pending_cmds() - flush all pending commands on this hardware queue
 195 * @hwq:        Hardware queue to flush.
 196 *
 197 * The hardware send queue lock associated with this hardware queue must be
 198 * held when calling this routine.
 199 */
 200static void flush_pending_cmds(struct hwq *hwq)
 201{
 202        struct cxlflash_cfg *cfg = hwq->afu->parent;
 203        struct afu_cmd *cmd, *tmp;
 204        struct scsi_cmnd *scp;
 205        ulong lock_flags;
 206
 207        list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
 208                /* Bypass command when on a doneq, cmd_complete() will handle */
 209                if (!list_empty(&cmd->queue))
 210                        continue;
 211
 212                list_del(&cmd->list);
 213
 214                if (cmd->scp) {
 215                        scp = cmd->scp;
 216                        scp->result = (DID_IMM_RETRY << 16);
 217                        scp->scsi_done(scp);
 218                } else {
 219                        cmd->cmd_aborted = true;
 220
 221                        if (cmd->cmd_tmf) {
 222                                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 223                                cfg->tmf_active = false;
 224                                wake_up_all_locked(&cfg->tmf_waitq);
 225                                spin_unlock_irqrestore(&cfg->tmf_slock,
 226                                                       lock_flags);
 227                        } else
 228                                complete(&cmd->cevent);
 229                }
 230        }
 231}
 232
 233/**
 234 * context_reset() - reset context via specified register
 235 * @hwq:        Hardware queue owning the context to be reset.
 236 * @reset_reg:  MMIO register to perform reset.
 237 *
 238 * When the reset is successful, the SISLite specification guarantees that
 239 * the AFU has aborted all currently pending I/O. Accordingly, these commands
 240 * must be flushed.
 241 *
 242 * Return: 0 on success, -errno on failure
 243 */
 244static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
 245{
 246        struct cxlflash_cfg *cfg = hwq->afu->parent;
 247        struct device *dev = &cfg->dev->dev;
 248        int rc = -ETIMEDOUT;
 249        int nretry = 0;
 250        u64 val = 0x1;
 251        ulong lock_flags;
 252
 253        dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
 254
 255        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 256
 257        writeq_be(val, reset_reg);
 258        do {
 259                val = readq_be(reset_reg);
 260                if ((val & 0x1) == 0x0) {
 261                        rc = 0;
 262                        break;
 263                }
 264
 265                /* Double delay each time */
 266                udelay(1 << nretry);
 267        } while (nretry++ < MC_ROOM_RETRY_CNT);
 268
 269        if (!rc)
 270                flush_pending_cmds(hwq);
 271
 272        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 273
 274        dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
 275                __func__, rc, val, nretry);
 276        return rc;
 277}
 278
 279/**
 280 * context_reset_ioarrin() - reset context via IOARRIN register
 281 * @hwq:        Hardware queue owning the context to be reset.
 282 *
 283 * Return: 0 on success, -errno on failure
 284 */
 285static int context_reset_ioarrin(struct hwq *hwq)
 286{
 287        return context_reset(hwq, &hwq->host_map->ioarrin);
 288}
 289
 290/**
 291 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
 292 * @hwq:        Hardware queue owning the context to be reset.
 293 *
 294 * Return: 0 on success, -errno on failure
 295 */
 296static int context_reset_sq(struct hwq *hwq)
 297{
 298        return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
 299}
 300
 301/**
 302 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
 303 * @afu:        AFU associated with the host.
 304 * @cmd:        AFU command to send.
 305 *
 306 * Return:
 307 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 308 */
 309static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
 310{
 311        struct cxlflash_cfg *cfg = afu->parent;
 312        struct device *dev = &cfg->dev->dev;
 313        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 314        int rc = 0;
 315        s64 room;
 316        ulong lock_flags;
 317
 318        /*
 319         * To avoid the performance penalty of MMIO, spread the update of
 320         * 'room' over multiple commands.
 321         */
 322        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 323        if (--hwq->room < 0) {
 324                room = readq_be(&hwq->host_map->cmd_room);
 325                if (room <= 0) {
 326                        dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
 327                                            "0x%02X, room=0x%016llX\n",
 328                                            __func__, cmd->rcb.cdb[0], room);
 329                        hwq->room = 0;
 330                        rc = SCSI_MLQUEUE_HOST_BUSY;
 331                        goto out;
 332                }
 333                hwq->room = room - 1;
 334        }
 335
 336        list_add(&cmd->list, &hwq->pending_cmds);
 337        writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
 338out:
 339        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 340        dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
 341                __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
 342        return rc;
 343}
 344
 345/**
 346 * send_cmd_sq() - sends an AFU command via SQ ring
 347 * @afu:        AFU associated with the host.
 348 * @cmd:        AFU command to send.
 349 *
 350 * Return:
 351 *      0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 352 */
 353static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
 354{
 355        struct cxlflash_cfg *cfg = afu->parent;
 356        struct device *dev = &cfg->dev->dev;
 357        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
 358        int rc = 0;
 359        int newval;
 360        ulong lock_flags;
 361
 362        newval = atomic_dec_if_positive(&hwq->hsq_credits);
 363        if (newval <= 0) {
 364                rc = SCSI_MLQUEUE_HOST_BUSY;
 365                goto out;
 366        }
 367
 368        cmd->rcb.ioasa = &cmd->sa;
 369
 370        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 371
 372        *hwq->hsq_curr = cmd->rcb;
 373        if (hwq->hsq_curr < hwq->hsq_end)
 374                hwq->hsq_curr++;
 375        else
 376                hwq->hsq_curr = hwq->hsq_start;
 377
 378        list_add(&cmd->list, &hwq->pending_cmds);
 379        writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
 380
 381        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 382out:
 383        dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
 384               "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
 385               cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
 386               readq_be(&hwq->host_map->sq_head),
 387               readq_be(&hwq->host_map->sq_tail));
 388        return rc;
 389}
 390
 391/**
 392 * wait_resp() - polls for a response or timeout to a sent AFU command
 393 * @afu:        AFU associated with the host.
 394 * @cmd:        AFU command that was sent.
 395 *
 396 * Return: 0 on success, -errno on failure
 397 */
 398static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 399{
 400        struct cxlflash_cfg *cfg = afu->parent;
 401        struct device *dev = &cfg->dev->dev;
 402        int rc = 0;
 403        ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
 404
 405        timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
 406        if (!timeout)
 407                rc = -ETIMEDOUT;
 408
 409        if (cmd->cmd_aborted)
 410                rc = -EAGAIN;
 411
 412        if (unlikely(cmd->sa.ioasc != 0)) {
 413                dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
 414                        __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
 415                rc = -EIO;
 416        }
 417
 418        return rc;
 419}
 420
 421/**
 422 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
 423 * @host:       SCSI host associated with device.
 424 * @scp:        SCSI command to send.
 425 * @afu:        SCSI command to send.
 426 *
 427 * Hashes a command based upon the hardware queue mode.
 428 *
 429 * Return: Trusted index of target hardware queue
 430 */
 431static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
 432                             struct afu *afu)
 433{
 434        u32 tag;
 435        u32 hwq = 0;
 436
 437        if (afu->num_hwqs == 1)
 438                return 0;
 439
 440        switch (afu->hwq_mode) {
 441        case HWQ_MODE_RR:
 442                hwq = afu->hwq_rr_count++ % afu->num_hwqs;
 443                break;
 444        case HWQ_MODE_TAG:
 445                tag = blk_mq_unique_tag(scp->request);
 446                hwq = blk_mq_unique_tag_to_hwq(tag);
 447                break;
 448        case HWQ_MODE_CPU:
 449                hwq = smp_processor_id() % afu->num_hwqs;
 450                break;
 451        default:
 452                WARN_ON_ONCE(1);
 453        }
 454
 455        return hwq;
 456}
 457
 458/**
 459 * send_tmf() - sends a Task Management Function (TMF)
 460 * @cfg:        Internal structure associated with the host.
 461 * @sdev:       SCSI device destined for TMF.
 462 * @tmfcmd:     TMF command to send.
 463 *
 464 * Return:
 465 *      0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
 466 */
 467static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 468                    u64 tmfcmd)
 469{
 470        struct afu *afu = cfg->afu;
 471        struct afu_cmd *cmd = NULL;
 472        struct device *dev = &cfg->dev->dev;
 473        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
 474        bool needs_deletion = false;
 475        char *buf = NULL;
 476        ulong lock_flags;
 477        int rc = 0;
 478        ulong to;
 479
 480        buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
 481        if (unlikely(!buf)) {
 482                dev_err(dev, "%s: no memory for command\n", __func__);
 483                rc = -ENOMEM;
 484                goto out;
 485        }
 486
 487        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
 488        INIT_LIST_HEAD(&cmd->queue);
 489
 490        /* When Task Management Function is active do not send another */
 491        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 492        if (cfg->tmf_active)
 493                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 494                                                  !cfg->tmf_active,
 495                                                  cfg->tmf_slock);
 496        cfg->tmf_active = true;
 497        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 498
 499        cmd->parent = afu;
 500        cmd->cmd_tmf = true;
 501        cmd->hwq_index = hwq->index;
 502
 503        cmd->rcb.ctx_id = hwq->ctx_hndl;
 504        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 505        cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
 506        cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
 507        cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
 508                              SISL_REQ_FLAGS_SUP_UNDERRUN |
 509                              SISL_REQ_FLAGS_TMF_CMD);
 510        memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 511
 512        rc = afu->send_cmd(afu, cmd);
 513        if (unlikely(rc)) {
 514                spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 515                cfg->tmf_active = false;
 516                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 517                goto out;
 518        }
 519
 520        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 521        to = msecs_to_jiffies(5000);
 522        to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
 523                                                       !cfg->tmf_active,
 524                                                       cfg->tmf_slock,
 525                                                       to);
 526        if (!to) {
 527                dev_err(dev, "%s: TMF timed out\n", __func__);
 528                rc = -ETIMEDOUT;
 529                needs_deletion = true;
 530        } else if (cmd->cmd_aborted) {
 531                dev_err(dev, "%s: TMF aborted\n", __func__);
 532                rc = -EAGAIN;
 533        } else if (cmd->sa.ioasc) {
 534                dev_err(dev, "%s: TMF failed ioasc=%08x\n",
 535                        __func__, cmd->sa.ioasc);
 536                rc = -EIO;
 537        }
 538        cfg->tmf_active = false;
 539        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 540
 541        if (needs_deletion) {
 542                spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 543                list_del(&cmd->list);
 544                spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 545        }
 546out:
 547        kfree(buf);
 548        return rc;
 549}
 550
 551/**
 552 * cxlflash_driver_info() - information handler for this host driver
 553 * @host:       SCSI host associated with device.
 554 *
 555 * Return: A string describing the device.
 556 */
 557static const char *cxlflash_driver_info(struct Scsi_Host *host)
 558{
 559        return CXLFLASH_ADAPTER_NAME;
 560}
 561
 562/**
 563 * cxlflash_queuecommand() - sends a mid-layer request
 564 * @host:       SCSI host associated with device.
 565 * @scp:        SCSI command to send.
 566 *
 567 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
 568 */
 569static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 570{
 571        struct cxlflash_cfg *cfg = shost_priv(host);
 572        struct afu *afu = cfg->afu;
 573        struct device *dev = &cfg->dev->dev;
 574        struct afu_cmd *cmd = sc_to_afuci(scp);
 575        struct scatterlist *sg = scsi_sglist(scp);
 576        int hwq_index = cmd_to_target_hwq(host, scp, afu);
 577        struct hwq *hwq = get_hwq(afu, hwq_index);
 578        u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
 579        ulong lock_flags;
 580        int rc = 0;
 581
 582        dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
 583                            "cdb=(%08x-%08x-%08x-%08x)\n",
 584                            __func__, scp, host->host_no, scp->device->channel,
 585                            scp->device->id, scp->device->lun,
 586                            get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
 587                            get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
 588                            get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
 589                            get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 590
 591        /*
 592         * If a Task Management Function is active, wait for it to complete
 593         * before continuing with regular commands.
 594         */
 595        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 596        if (cfg->tmf_active) {
 597                spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 598                rc = SCSI_MLQUEUE_HOST_BUSY;
 599                goto out;
 600        }
 601        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 602
 603        switch (cfg->state) {
 604        case STATE_PROBING:
 605        case STATE_PROBED:
 606        case STATE_RESET:
 607                dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
 608                rc = SCSI_MLQUEUE_HOST_BUSY;
 609                goto out;
 610        case STATE_FAILTERM:
 611                dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
 612                scp->result = (DID_NO_CONNECT << 16);
 613                scp->scsi_done(scp);
 614                rc = 0;
 615                goto out;
 616        default:
 617                atomic_inc(&afu->cmds_active);
 618                break;
 619        }
 620
 621        if (likely(sg)) {
 622                cmd->rcb.data_len = sg->length;
 623                cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
 624        }
 625
 626        cmd->scp = scp;
 627        cmd->parent = afu;
 628        cmd->hwq_index = hwq_index;
 629
 630        cmd->sa.ioasc = 0;
 631        cmd->rcb.ctx_id = hwq->ctx_hndl;
 632        cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 633        cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
 634        cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
 635
 636        if (scp->sc_data_direction == DMA_TO_DEVICE)
 637                req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
 638
 639        cmd->rcb.req_flags = req_flags;
 640        memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 641
 642        rc = afu->send_cmd(afu, cmd);
 643        atomic_dec(&afu->cmds_active);
 644out:
 645        return rc;
 646}
 647
 648/**
 649 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
 650 * @cfg:        Internal structure associated with the host.
 651 */
 652static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
 653{
 654        struct pci_dev *pdev = cfg->dev;
 655
 656        if (pci_channel_offline(pdev))
 657                wait_event_timeout(cfg->reset_waitq,
 658                                   !pci_channel_offline(pdev),
 659                                   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
 660}
 661
 662/**
 663 * free_mem() - free memory associated with the AFU
 664 * @cfg:        Internal structure associated with the host.
 665 */
 666static void free_mem(struct cxlflash_cfg *cfg)
 667{
 668        struct afu *afu = cfg->afu;
 669
 670        if (cfg->afu) {
 671                free_pages((ulong)afu, get_order(sizeof(struct afu)));
 672                cfg->afu = NULL;
 673        }
 674}
 675
 676/**
 677 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
 678 * @cfg:        Internal structure associated with the host.
 679 */
 680static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
 681{
 682        if (cfg->async_reset_cookie == 0)
 683                return;
 684
 685        /* Wait until all async calls prior to this cookie have completed */
 686        async_synchronize_cookie(cfg->async_reset_cookie + 1);
 687        cfg->async_reset_cookie = 0;
 688}
 689
 690/**
 691 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
 692 * @cfg:        Internal structure associated with the host.
 693 *
 694 * Safe to call with AFU in a partially allocated/initialized state.
 695 *
 696 * Cancels scheduled worker threads, waits for any active internal AFU
 697 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
 698 */
 699static void stop_afu(struct cxlflash_cfg *cfg)
 700{
 701        struct afu *afu = cfg->afu;
 702        struct hwq *hwq;
 703        int i;
 704
 705        cancel_work_sync(&cfg->work_q);
 706        if (!current_is_async())
 707                cxlflash_reset_sync(cfg);
 708
 709        if (likely(afu)) {
 710                while (atomic_read(&afu->cmds_active))
 711                        ssleep(1);
 712
 713                if (afu_is_irqpoll_enabled(afu)) {
 714                        for (i = 0; i < afu->num_hwqs; i++) {
 715                                hwq = get_hwq(afu, i);
 716
 717                                irq_poll_disable(&hwq->irqpoll);
 718                        }
 719                }
 720
 721                if (likely(afu->afu_map)) {
 722                        cfg->ops->psa_unmap(afu->afu_map);
 723                        afu->afu_map = NULL;
 724                }
 725        }
 726}
 727
 728/**
 729 * term_intr() - disables all AFU interrupts
 730 * @cfg:        Internal structure associated with the host.
 731 * @level:      Depth of allocation, where to begin waterfall tear down.
 732 * @index:      Index of the hardware queue.
 733 *
 734 * Safe to call with AFU/MC in partially allocated/initialized state.
 735 */
 736static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
 737                      u32 index)
 738{
 739        struct afu *afu = cfg->afu;
 740        struct device *dev = &cfg->dev->dev;
 741        struct hwq *hwq;
 742
 743        if (!afu) {
 744                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 745                return;
 746        }
 747
 748        hwq = get_hwq(afu, index);
 749
 750        if (!hwq->ctx_cookie) {
 751                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 752                return;
 753        }
 754
 755        switch (level) {
 756        case UNMAP_THREE:
 757                /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
 758                if (index == PRIMARY_HWQ)
 759                        cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
 760        case UNMAP_TWO:
 761                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
 762        case UNMAP_ONE:
 763                cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
 764        case FREE_IRQ:
 765                cfg->ops->free_afu_irqs(hwq->ctx_cookie);
 766                /* fall through */
 767        case UNDO_NOOP:
 768                /* No action required */
 769                break;
 770        }
 771}
 772
 773/**
 774 * term_mc() - terminates the master context
 775 * @cfg:        Internal structure associated with the host.
 776 * @index:      Index of the hardware queue.
 777 *
 778 * Safe to call with AFU/MC in partially allocated/initialized state.
 779 */
 780static void term_mc(struct cxlflash_cfg *cfg, u32 index)
 781{
 782        struct afu *afu = cfg->afu;
 783        struct device *dev = &cfg->dev->dev;
 784        struct hwq *hwq;
 785        ulong lock_flags;
 786
 787        if (!afu) {
 788                dev_err(dev, "%s: returning with NULL afu\n", __func__);
 789                return;
 790        }
 791
 792        hwq = get_hwq(afu, index);
 793
 794        if (!hwq->ctx_cookie) {
 795                dev_err(dev, "%s: returning with NULL MC\n", __func__);
 796                return;
 797        }
 798
 799        WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
 800        if (index != PRIMARY_HWQ)
 801                WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
 802        hwq->ctx_cookie = NULL;
 803
 804        spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
 805        hwq->hrrq_online = false;
 806        spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
 807
 808        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 809        flush_pending_cmds(hwq);
 810        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
 811}
 812
 813/**
 814 * term_afu() - terminates the AFU
 815 * @cfg:        Internal structure associated with the host.
 816 *
 817 * Safe to call with AFU/MC in partially allocated/initialized state.
 818 */
 819static void term_afu(struct cxlflash_cfg *cfg)
 820{
 821        struct device *dev = &cfg->dev->dev;
 822        int k;
 823
 824        /*
 825         * Tear down is carefully orchestrated to ensure
 826         * no interrupts can come in when the problem state
 827         * area is unmapped.
 828         *
 829         * 1) Disable all AFU interrupts for each master
 830         * 2) Unmap the problem state area
 831         * 3) Stop each master context
 832         */
 833        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 834                term_intr(cfg, UNMAP_THREE, k);
 835
 836        stop_afu(cfg);
 837
 838        for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
 839                term_mc(cfg, k);
 840
 841        dev_dbg(dev, "%s: returning\n", __func__);
 842}
 843
 844/**
 845 * notify_shutdown() - notifies device of pending shutdown
 846 * @cfg:        Internal structure associated with the host.
 847 * @wait:       Whether to wait for shutdown processing to complete.
 848 *
 849 * This function will notify the AFU that the adapter is being shutdown
 850 * and will wait for shutdown processing to complete if wait is true.
 851 * This notification should flush pending I/Os to the device and halt
 852 * further I/Os until the next AFU reset is issued and device restarted.
 853 */
 854static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
 855{
 856        struct afu *afu = cfg->afu;
 857        struct device *dev = &cfg->dev->dev;
 858        struct dev_dependent_vals *ddv;
 859        __be64 __iomem *fc_port_regs;
 860        u64 reg, status;
 861        int i, retry_cnt = 0;
 862
 863        ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
 864        if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
 865                return;
 866
 867        if (!afu || !afu->afu_map) {
 868                dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
 869                return;
 870        }
 871
 872        /* Notify AFU */
 873        for (i = 0; i < cfg->num_fc_ports; i++) {
 874                fc_port_regs = get_fc_port_regs(cfg, i);
 875
 876                reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
 877                reg |= SISL_FC_SHUTDOWN_NORMAL;
 878                writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
 879        }
 880
 881        if (!wait)
 882                return;
 883
 884        /* Wait up to 1.5 seconds for shutdown processing to complete */
 885        for (i = 0; i < cfg->num_fc_ports; i++) {
 886                fc_port_regs = get_fc_port_regs(cfg, i);
 887                retry_cnt = 0;
 888
 889                while (true) {
 890                        status = readq_be(&fc_port_regs[FC_STATUS / 8]);
 891                        if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
 892                                break;
 893                        if (++retry_cnt >= MC_RETRY_CNT) {
 894                                dev_dbg(dev, "%s: port %d shutdown processing "
 895                                        "not yet completed\n", __func__, i);
 896                                break;
 897                        }
 898                        msleep(100 * retry_cnt);
 899                }
 900        }
 901}
 902
 903/**
 904 * cxlflash_get_minor() - gets the first available minor number
 905 *
 906 * Return: Unique minor number that can be used to create the character device.
 907 */
 908static int cxlflash_get_minor(void)
 909{
 910        int minor;
 911        long bit;
 912
 913        bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
 914        if (bit >= CXLFLASH_MAX_ADAPTERS)
 915                return -1;
 916
 917        minor = bit & MINORMASK;
 918        set_bit(minor, cxlflash_minor);
 919        return minor;
 920}
 921
 922/**
 923 * cxlflash_put_minor() - releases the minor number
 924 * @minor:      Minor number that is no longer needed.
 925 */
 926static void cxlflash_put_minor(int minor)
 927{
 928        clear_bit(minor, cxlflash_minor);
 929}
 930
 931/**
 932 * cxlflash_release_chrdev() - release the character device for the host
 933 * @cfg:        Internal structure associated with the host.
 934 */
 935static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
 936{
 937        device_unregister(cfg->chardev);
 938        cfg->chardev = NULL;
 939        cdev_del(&cfg->cdev);
 940        cxlflash_put_minor(MINOR(cfg->cdev.dev));
 941}
 942
 943/**
 944 * cxlflash_remove() - PCI entry point to tear down host
 945 * @pdev:       PCI device associated with the host.
 946 *
 947 * Safe to use as a cleanup in partially allocated/initialized state. Note that
 948 * the reset_waitq is flushed as part of the stop/termination of user contexts.
 949 */
 950static void cxlflash_remove(struct pci_dev *pdev)
 951{
 952        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
 953        struct device *dev = &pdev->dev;
 954        ulong lock_flags;
 955
 956        if (!pci_is_enabled(pdev)) {
 957                dev_dbg(dev, "%s: Device is disabled\n", __func__);
 958                return;
 959        }
 960
 961        /* Yield to running recovery threads before continuing with remove */
 962        wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
 963                                     cfg->state != STATE_PROBING);
 964        spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 965        if (cfg->tmf_active)
 966                wait_event_interruptible_lock_irq(cfg->tmf_waitq,
 967                                                  !cfg->tmf_active,
 968                                                  cfg->tmf_slock);
 969        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 970
 971        /* Notify AFU and wait for shutdown processing to complete */
 972        notify_shutdown(cfg, true);
 973
 974        cfg->state = STATE_FAILTERM;
 975        cxlflash_stop_term_user_contexts(cfg);
 976
 977        switch (cfg->init_state) {
 978        case INIT_STATE_CDEV:
 979                cxlflash_release_chrdev(cfg);
 980        case INIT_STATE_SCSI:
 981                cxlflash_term_local_luns(cfg);
 982                scsi_remove_host(cfg->host);
 983        case INIT_STATE_AFU:
 984                term_afu(cfg);
 985        case INIT_STATE_PCI:
 986                cfg->ops->destroy_afu(cfg->afu_cookie);
 987                pci_disable_device(pdev);
 988        case INIT_STATE_NONE:
 989                free_mem(cfg);
 990                scsi_host_put(cfg->host);
 991                break;
 992        }
 993
 994        dev_dbg(dev, "%s: returning\n", __func__);
 995}
 996
 997/**
 998 * alloc_mem() - allocates the AFU and its command pool
 999 * @cfg:        Internal structure associated with the host.
1000 *
1001 * A partially allocated state remains on failure.
1002 *
1003 * Return:
1004 *      0 on success
1005 *      -ENOMEM on failure to allocate memory
1006 */
1007static int alloc_mem(struct cxlflash_cfg *cfg)
1008{
1009        int rc = 0;
1010        struct device *dev = &cfg->dev->dev;
1011
1012        /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1013        cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1014                                            get_order(sizeof(struct afu)));
1015        if (unlikely(!cfg->afu)) {
1016                dev_err(dev, "%s: cannot get %d free pages\n",
1017                        __func__, get_order(sizeof(struct afu)));
1018                rc = -ENOMEM;
1019                goto out;
1020        }
1021        cfg->afu->parent = cfg;
1022        cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1023        cfg->afu->afu_map = NULL;
1024out:
1025        return rc;
1026}
1027
1028/**
1029 * init_pci() - initializes the host as a PCI device
1030 * @cfg:        Internal structure associated with the host.
1031 *
1032 * Return: 0 on success, -errno on failure
1033 */
1034static int init_pci(struct cxlflash_cfg *cfg)
1035{
1036        struct pci_dev *pdev = cfg->dev;
1037        struct device *dev = &cfg->dev->dev;
1038        int rc = 0;
1039
1040        rc = pci_enable_device(pdev);
1041        if (rc || pci_channel_offline(pdev)) {
1042                if (pci_channel_offline(pdev)) {
1043                        cxlflash_wait_for_pci_err_recovery(cfg);
1044                        rc = pci_enable_device(pdev);
1045                }
1046
1047                if (rc) {
1048                        dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1049                        cxlflash_wait_for_pci_err_recovery(cfg);
1050                        goto out;
1051                }
1052        }
1053
1054out:
1055        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1056        return rc;
1057}
1058
1059/**
1060 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1061 * @cfg:        Internal structure associated with the host.
1062 *
1063 * Return: 0 on success, -errno on failure
1064 */
1065static int init_scsi(struct cxlflash_cfg *cfg)
1066{
1067        struct pci_dev *pdev = cfg->dev;
1068        struct device *dev = &cfg->dev->dev;
1069        int rc = 0;
1070
1071        rc = scsi_add_host(cfg->host, &pdev->dev);
1072        if (rc) {
1073                dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1074                goto out;
1075        }
1076
1077        scsi_scan_host(cfg->host);
1078
1079out:
1080        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1081        return rc;
1082}
1083
1084/**
1085 * set_port_online() - transitions the specified host FC port to online state
1086 * @fc_regs:    Top of MMIO region defined for specified port.
1087 *
1088 * The provided MMIO region must be mapped prior to call. Online state means
1089 * that the FC link layer has synced, completed the handshaking process, and
1090 * is ready for login to start.
1091 */
1092static void set_port_online(__be64 __iomem *fc_regs)
1093{
1094        u64 cmdcfg;
1095
1096        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1097        cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1098        cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);   /* set ON_LINE */
1099        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1100}
1101
1102/**
1103 * set_port_offline() - transitions the specified host FC port to offline state
1104 * @fc_regs:    Top of MMIO region defined for specified port.
1105 *
1106 * The provided MMIO region must be mapped prior to call.
1107 */
1108static void set_port_offline(__be64 __iomem *fc_regs)
1109{
1110        u64 cmdcfg;
1111
1112        cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1113        cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);  /* clear ON_LINE */
1114        cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);  /* set OFF_LINE */
1115        writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1116}
1117
1118/**
1119 * wait_port_online() - waits for the specified host FC port come online
1120 * @fc_regs:    Top of MMIO region defined for specified port.
1121 * @delay_us:   Number of microseconds to delay between reading port status.
1122 * @nretry:     Number of cycles to retry reading port status.
1123 *
1124 * The provided MMIO region must be mapped prior to call. This will timeout
1125 * when the cable is not plugged in.
1126 *
1127 * Return:
1128 *      TRUE (1) when the specified port is online
1129 *      FALSE (0) when the specified port fails to come online after timeout
1130 */
1131static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1132{
1133        u64 status;
1134
1135        WARN_ON(delay_us < 1000);
1136
1137        do {
1138                msleep(delay_us / 1000);
1139                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1140                if (status == U64_MAX)
1141                        nretry /= 2;
1142        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1143                 nretry--);
1144
1145        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1146}
1147
1148/**
1149 * wait_port_offline() - waits for the specified host FC port go offline
1150 * @fc_regs:    Top of MMIO region defined for specified port.
1151 * @delay_us:   Number of microseconds to delay between reading port status.
1152 * @nretry:     Number of cycles to retry reading port status.
1153 *
1154 * The provided MMIO region must be mapped prior to call.
1155 *
1156 * Return:
1157 *      TRUE (1) when the specified port is offline
1158 *      FALSE (0) when the specified port fails to go offline after timeout
1159 */
1160static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1161{
1162        u64 status;
1163
1164        WARN_ON(delay_us < 1000);
1165
1166        do {
1167                msleep(delay_us / 1000);
1168                status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1169                if (status == U64_MAX)
1170                        nretry /= 2;
1171        } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1172                 nretry--);
1173
1174        return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1175}
1176
1177/**
1178 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1179 * @afu:        AFU associated with the host that owns the specified FC port.
1180 * @port:       Port number being configured.
1181 * @fc_regs:    Top of MMIO region defined for specified port.
1182 * @wwpn:       The world-wide-port-number previously discovered for port.
1183 *
1184 * The provided MMIO region must be mapped prior to call. As part of the
1185 * sequence to configure the WWPN, the port is toggled offline and then back
1186 * online. This toggling action can cause this routine to delay up to a few
1187 * seconds. When configured to use the internal LUN feature of the AFU, a
1188 * failure to come online is overridden.
1189 */
1190static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1191                         u64 wwpn)
1192{
1193        struct cxlflash_cfg *cfg = afu->parent;
1194        struct device *dev = &cfg->dev->dev;
1195
1196        set_port_offline(fc_regs);
1197        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1198                               FC_PORT_STATUS_RETRY_CNT)) {
1199                dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1200                        __func__, port);
1201        }
1202
1203        writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1204
1205        set_port_online(fc_regs);
1206        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1207                              FC_PORT_STATUS_RETRY_CNT)) {
1208                dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1209                        __func__, port);
1210        }
1211}
1212
1213/**
1214 * afu_link_reset() - resets the specified host FC port
1215 * @afu:        AFU associated with the host that owns the specified FC port.
1216 * @port:       Port number being configured.
1217 * @fc_regs:    Top of MMIO region defined for specified port.
1218 *
1219 * The provided MMIO region must be mapped prior to call. The sequence to
1220 * reset the port involves toggling it offline and then back online. This
1221 * action can cause this routine to delay up to a few seconds. An effort
1222 * is made to maintain link with the device by switching to host to use
1223 * the alternate port exclusively while the reset takes place.
1224 * failure to come online is overridden.
1225 */
1226static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1227{
1228        struct cxlflash_cfg *cfg = afu->parent;
1229        struct device *dev = &cfg->dev->dev;
1230        u64 port_sel;
1231
1232        /* first switch the AFU to the other links, if any */
1233        port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1234        port_sel &= ~(1ULL << port);
1235        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1236        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1237
1238        set_port_offline(fc_regs);
1239        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1240                               FC_PORT_STATUS_RETRY_CNT))
1241                dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1242                        __func__, port);
1243
1244        set_port_online(fc_regs);
1245        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1246                              FC_PORT_STATUS_RETRY_CNT))
1247                dev_err(dev, "%s: wait on port %d to go online timed out\n",
1248                        __func__, port);
1249
1250        /* switch back to include this port */
1251        port_sel |= (1ULL << port);
1252        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1253        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1254
1255        dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1256}
1257
1258/**
1259 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1260 * @afu:        AFU associated with the host.
1261 */
1262static void afu_err_intr_init(struct afu *afu)
1263{
1264        struct cxlflash_cfg *cfg = afu->parent;
1265        __be64 __iomem *fc_port_regs;
1266        int i;
1267        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1268        u64 reg;
1269
1270        /* global async interrupts: AFU clears afu_ctrl on context exit
1271         * if async interrupts were sent to that context. This prevents
1272         * the AFU form sending further async interrupts when
1273         * there is
1274         * nobody to receive them.
1275         */
1276
1277        /* mask all */
1278        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1279        /* set LISN# to send and point to primary master context */
1280        reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1281
1282        if (afu->internal_lun)
1283                reg |= 1;       /* Bit 63 indicates local lun */
1284        writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1285        /* clear all */
1286        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1287        /* unmask bits that are of interest */
1288        /* note: afu can send an interrupt after this step */
1289        writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1290        /* clear again in case a bit came on after previous clear but before */
1291        /* unmask */
1292        writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1293
1294        /* Clear/Set internal lun bits */
1295        fc_port_regs = get_fc_port_regs(cfg, 0);
1296        reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1297        reg &= SISL_FC_INTERNAL_MASK;
1298        if (afu->internal_lun)
1299                reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1300        writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1301
1302        /* now clear FC errors */
1303        for (i = 0; i < cfg->num_fc_ports; i++) {
1304                fc_port_regs = get_fc_port_regs(cfg, i);
1305
1306                writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1307                writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1308        }
1309
1310        /* sync interrupts for master's IOARRIN write */
1311        /* note that unlike asyncs, there can be no pending sync interrupts */
1312        /* at this time (this is a fresh context and master has not written */
1313        /* IOARRIN yet), so there is nothing to clear. */
1314
1315        /* set LISN#, it is always sent to the context that wrote IOARRIN */
1316        for (i = 0; i < afu->num_hwqs; i++) {
1317                hwq = get_hwq(afu, i);
1318
1319                reg = readq_be(&hwq->host_map->ctx_ctrl);
1320                WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1321                reg |= SISL_MSI_SYNC_ERROR;
1322                writeq_be(reg, &hwq->host_map->ctx_ctrl);
1323                writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1324        }
1325}
1326
1327/**
1328 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1329 * @irq:        Interrupt number.
1330 * @data:       Private data provided at interrupt registration, the AFU.
1331 *
1332 * Return: Always return IRQ_HANDLED.
1333 */
1334static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1335{
1336        struct hwq *hwq = (struct hwq *)data;
1337        struct cxlflash_cfg *cfg = hwq->afu->parent;
1338        struct device *dev = &cfg->dev->dev;
1339        u64 reg;
1340        u64 reg_unmasked;
1341
1342        reg = readq_be(&hwq->host_map->intr_status);
1343        reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1344
1345        if (reg_unmasked == 0UL) {
1346                dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1347                        __func__, reg);
1348                goto cxlflash_sync_err_irq_exit;
1349        }
1350
1351        dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1352                __func__, reg);
1353
1354        writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1355
1356cxlflash_sync_err_irq_exit:
1357        return IRQ_HANDLED;
1358}
1359
1360/**
1361 * process_hrrq() - process the read-response queue
1362 * @afu:        AFU associated with the host.
1363 * @doneq:      Queue of commands harvested from the RRQ.
1364 * @budget:     Threshold of RRQ entries to process.
1365 *
1366 * This routine must be called holding the disabled RRQ spin lock.
1367 *
1368 * Return: The number of entries processed.
1369 */
1370static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1371{
1372        struct afu *afu = hwq->afu;
1373        struct afu_cmd *cmd;
1374        struct sisl_ioasa *ioasa;
1375        struct sisl_ioarcb *ioarcb;
1376        bool toggle = hwq->toggle;
1377        int num_hrrq = 0;
1378        u64 entry,
1379            *hrrq_start = hwq->hrrq_start,
1380            *hrrq_end = hwq->hrrq_end,
1381            *hrrq_curr = hwq->hrrq_curr;
1382
1383        /* Process ready RRQ entries up to the specified budget (if any) */
1384        while (true) {
1385                entry = *hrrq_curr;
1386
1387                if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1388                        break;
1389
1390                entry &= ~SISL_RESP_HANDLE_T_BIT;
1391
1392                if (afu_is_sq_cmd_mode(afu)) {
1393                        ioasa = (struct sisl_ioasa *)entry;
1394                        cmd = container_of(ioasa, struct afu_cmd, sa);
1395                } else {
1396                        ioarcb = (struct sisl_ioarcb *)entry;
1397                        cmd = container_of(ioarcb, struct afu_cmd, rcb);
1398                }
1399
1400                list_add_tail(&cmd->queue, doneq);
1401
1402                /* Advance to next entry or wrap and flip the toggle bit */
1403                if (hrrq_curr < hrrq_end)
1404                        hrrq_curr++;
1405                else {
1406                        hrrq_curr = hrrq_start;
1407                        toggle ^= SISL_RESP_HANDLE_T_BIT;
1408                }
1409
1410                atomic_inc(&hwq->hsq_credits);
1411                num_hrrq++;
1412
1413                if (budget > 0 && num_hrrq >= budget)
1414                        break;
1415        }
1416
1417        hwq->hrrq_curr = hrrq_curr;
1418        hwq->toggle = toggle;
1419
1420        return num_hrrq;
1421}
1422
1423/**
1424 * process_cmd_doneq() - process a queue of harvested RRQ commands
1425 * @doneq:      Queue of completed commands.
1426 *
1427 * Note that upon return the queue can no longer be trusted.
1428 */
1429static void process_cmd_doneq(struct list_head *doneq)
1430{
1431        struct afu_cmd *cmd, *tmp;
1432
1433        WARN_ON(list_empty(doneq));
1434
1435        list_for_each_entry_safe(cmd, tmp, doneq, queue)
1436                cmd_complete(cmd);
1437}
1438
1439/**
1440 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1441 * @irqpoll:    IRQ poll structure associated with queue to poll.
1442 * @budget:     Threshold of RRQ entries to process per poll.
1443 *
1444 * Return: The number of entries processed.
1445 */
1446static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1447{
1448        struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1449        unsigned long hrrq_flags;
1450        LIST_HEAD(doneq);
1451        int num_entries = 0;
1452
1453        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1454
1455        num_entries = process_hrrq(hwq, &doneq, budget);
1456        if (num_entries < budget)
1457                irq_poll_complete(irqpoll);
1458
1459        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1460
1461        process_cmd_doneq(&doneq);
1462        return num_entries;
1463}
1464
1465/**
1466 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1467 * @irq:        Interrupt number.
1468 * @data:       Private data provided at interrupt registration, the AFU.
1469 *
1470 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1471 */
1472static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1473{
1474        struct hwq *hwq = (struct hwq *)data;
1475        struct afu *afu = hwq->afu;
1476        unsigned long hrrq_flags;
1477        LIST_HEAD(doneq);
1478        int num_entries = 0;
1479
1480        spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1481
1482        /* Silently drop spurious interrupts when queue is not online */
1483        if (!hwq->hrrq_online) {
1484                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1485                return IRQ_HANDLED;
1486        }
1487
1488        if (afu_is_irqpoll_enabled(afu)) {
1489                irq_poll_sched(&hwq->irqpoll);
1490                spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1491                return IRQ_HANDLED;
1492        }
1493
1494        num_entries = process_hrrq(hwq, &doneq, -1);
1495        spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1496
1497        if (num_entries == 0)
1498                return IRQ_NONE;
1499
1500        process_cmd_doneq(&doneq);
1501        return IRQ_HANDLED;
1502}
1503
1504/*
1505 * Asynchronous interrupt information table
1506 *
1507 * NOTE:
1508 *      - Order matters here as this array is indexed by bit position.
1509 *
1510 *      - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1511 *        as complex and complains due to a lack of parentheses/braces.
1512 */
1513#define ASTATUS_FC(_a, _b, _c, _d)                                       \
1514        { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1515
1516#define BUILD_SISL_ASTATUS_FC_PORT(_a)                                   \
1517        ASTATUS_FC(_a, LINK_UP, "link up", 0),                           \
1518        ASTATUS_FC(_a, LINK_DN, "link down", 0),                         \
1519        ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),            \
1520        ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),            \
1521        ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1522        ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),     \
1523        ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),                \
1524        ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1525
1526static const struct asyc_intr_info ainfo[] = {
1527        BUILD_SISL_ASTATUS_FC_PORT(1),
1528        BUILD_SISL_ASTATUS_FC_PORT(0),
1529        BUILD_SISL_ASTATUS_FC_PORT(3),
1530        BUILD_SISL_ASTATUS_FC_PORT(2)
1531};
1532
1533/**
1534 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1535 * @irq:        Interrupt number.
1536 * @data:       Private data provided at interrupt registration, the AFU.
1537 *
1538 * Return: Always return IRQ_HANDLED.
1539 */
1540static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1541{
1542        struct hwq *hwq = (struct hwq *)data;
1543        struct afu *afu = hwq->afu;
1544        struct cxlflash_cfg *cfg = afu->parent;
1545        struct device *dev = &cfg->dev->dev;
1546        const struct asyc_intr_info *info;
1547        struct sisl_global_map __iomem *global = &afu->afu_map->global;
1548        __be64 __iomem *fc_port_regs;
1549        u64 reg_unmasked;
1550        u64 reg;
1551        u64 bit;
1552        u8 port;
1553
1554        reg = readq_be(&global->regs.aintr_status);
1555        reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1556
1557        if (unlikely(reg_unmasked == 0)) {
1558                dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1559                        __func__, reg);
1560                goto out;
1561        }
1562
1563        /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1564        writeq_be(reg_unmasked, &global->regs.aintr_clear);
1565
1566        /* Check each bit that is on */
1567        for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1568                if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1569                        WARN_ON_ONCE(1);
1570                        continue;
1571                }
1572
1573                info = &ainfo[bit];
1574                if (unlikely(info->status != 1ULL << bit)) {
1575                        WARN_ON_ONCE(1);
1576                        continue;
1577                }
1578
1579                port = info->port;
1580                fc_port_regs = get_fc_port_regs(cfg, port);
1581
1582                dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1583                        __func__, port, info->desc,
1584                       readq_be(&fc_port_regs[FC_STATUS / 8]));
1585
1586                /*
1587                 * Do link reset first, some OTHER errors will set FC_ERROR
1588                 * again if cleared before or w/o a reset
1589                 */
1590                if (info->action & LINK_RESET) {
1591                        dev_err(dev, "%s: FC Port %d: resetting link\n",
1592                                __func__, port);
1593                        cfg->lr_state = LINK_RESET_REQUIRED;
1594                        cfg->lr_port = port;
1595                        schedule_work(&cfg->work_q);
1596                }
1597
1598                if (info->action & CLR_FC_ERROR) {
1599                        reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1600
1601                        /*
1602                         * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1603                         * should be the same and tracing one is sufficient.
1604                         */
1605
1606                        dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1607                                __func__, port, reg);
1608
1609                        writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1610                        writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1611                }
1612
1613                if (info->action & SCAN_HOST) {
1614                        atomic_inc(&cfg->scan_host_needed);
1615                        schedule_work(&cfg->work_q);
1616                }
1617        }
1618
1619out:
1620        return IRQ_HANDLED;
1621}
1622
1623/**
1624 * read_vpd() - obtains the WWPNs from VPD
1625 * @cfg:        Internal structure associated with the host.
1626 * @wwpn:       Array of size MAX_FC_PORTS to pass back WWPNs
1627 *
1628 * Return: 0 on success, -errno on failure
1629 */
1630static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1631{
1632        struct device *dev = &cfg->dev->dev;
1633        struct pci_dev *pdev = cfg->dev;
1634        int rc = 0;
1635        int ro_start, ro_size, i, j, k;
1636        ssize_t vpd_size;
1637        char vpd_data[CXLFLASH_VPD_LEN];
1638        char tmp_buf[WWPN_BUF_LEN] = { 0 };
1639        const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1640                                                cfg->dev_id->driver_data;
1641        const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1642        const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1643
1644        /* Get the VPD data from the device */
1645        vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1646        if (unlikely(vpd_size <= 0)) {
1647                dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1648                        __func__, vpd_size);
1649                rc = -ENODEV;
1650                goto out;
1651        }
1652
1653        /* Get the read only section offset */
1654        ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1655                                    PCI_VPD_LRDT_RO_DATA);
1656        if (unlikely(ro_start < 0)) {
1657                dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1658                rc = -ENODEV;
1659                goto out;
1660        }
1661
1662        /* Get the read only section size, cap when extends beyond read VPD */
1663        ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1664        j = ro_size;
1665        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1666        if (unlikely((i + j) > vpd_size)) {
1667                dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1668                        __func__, (i + j), vpd_size);
1669                ro_size = vpd_size - i;
1670        }
1671
1672        /*
1673         * Find the offset of the WWPN tag within the read only
1674         * VPD data and validate the found field (partials are
1675         * no good to us). Convert the ASCII data to an integer
1676         * value. Note that we must copy to a temporary buffer
1677         * because the conversion service requires that the ASCII
1678         * string be terminated.
1679         *
1680         * Allow for WWPN not being found for all devices, setting
1681         * the returned WWPN to zero when not found. Notify with a
1682         * log error for cards that should have had WWPN keywords
1683         * in the VPD - cards requiring WWPN will not have their
1684         * ports programmed and operate in an undefined state.
1685         */
1686        for (k = 0; k < cfg->num_fc_ports; k++) {
1687                j = ro_size;
1688                i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1689
1690                i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1691                if (i < 0) {
1692                        if (wwpn_vpd_required)
1693                                dev_err(dev, "%s: Port %d WWPN not found\n",
1694                                        __func__, k);
1695                        wwpn[k] = 0ULL;
1696                        continue;
1697                }
1698
1699                j = pci_vpd_info_field_size(&vpd_data[i]);
1700                i += PCI_VPD_INFO_FLD_HDR_SIZE;
1701                if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1702                        dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1703                                __func__, k);
1704                        rc = -ENODEV;
1705                        goto out;
1706                }
1707
1708                memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1709                rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1710                if (unlikely(rc)) {
1711                        dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1712                                __func__, k);
1713                        rc = -ENODEV;
1714                        goto out;
1715                }
1716
1717                dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1718        }
1719
1720out:
1721        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1722        return rc;
1723}
1724
1725/**
1726 * init_pcr() - initialize the provisioning and control registers
1727 * @cfg:        Internal structure associated with the host.
1728 *
1729 * Also sets up fast access to the mapped registers and initializes AFU
1730 * command fields that never change.
1731 */
1732static void init_pcr(struct cxlflash_cfg *cfg)
1733{
1734        struct afu *afu = cfg->afu;
1735        struct sisl_ctrl_map __iomem *ctrl_map;
1736        struct hwq *hwq;
1737        void *cookie;
1738        int i;
1739
1740        for (i = 0; i < MAX_CONTEXT; i++) {
1741                ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1742                /* Disrupt any clients that could be running */
1743                /* e.g. clients that survived a master restart */
1744                writeq_be(0, &ctrl_map->rht_start);
1745                writeq_be(0, &ctrl_map->rht_cnt_id);
1746                writeq_be(0, &ctrl_map->ctx_cap);
1747        }
1748
1749        /* Copy frequently used fields into hwq */
1750        for (i = 0; i < afu->num_hwqs; i++) {
1751                hwq = get_hwq(afu, i);
1752                cookie = hwq->ctx_cookie;
1753
1754                hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1755                hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1756                hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1757
1758                /* Program the Endian Control for the master context */
1759                writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1760        }
1761}
1762
1763/**
1764 * init_global() - initialize AFU global registers
1765 * @cfg:        Internal structure associated with the host.
1766 */
1767static int init_global(struct cxlflash_cfg *cfg)
1768{
1769        struct afu *afu = cfg->afu;
1770        struct device *dev = &cfg->dev->dev;
1771        struct hwq *hwq;
1772        struct sisl_host_map __iomem *hmap;
1773        __be64 __iomem *fc_port_regs;
1774        u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1775        int i = 0, num_ports = 0;
1776        int rc = 0;
1777        int j;
1778        void *ctx;
1779        u64 reg;
1780
1781        rc = read_vpd(cfg, &wwpn[0]);
1782        if (rc) {
1783                dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1784                goto out;
1785        }
1786
1787        /* Set up RRQ and SQ in HWQ for master issued cmds */
1788        for (i = 0; i < afu->num_hwqs; i++) {
1789                hwq = get_hwq(afu, i);
1790                hmap = hwq->host_map;
1791
1792                writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1793                writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1794                hwq->hrrq_online = true;
1795
1796                if (afu_is_sq_cmd_mode(afu)) {
1797                        writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1798                        writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1799                }
1800        }
1801
1802        /* AFU configuration */
1803        reg = readq_be(&afu->afu_map->global.regs.afu_config);
1804        reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1805        /* enable all auto retry options and control endianness */
1806        /* leave others at default: */
1807        /* CTX_CAP write protected, mbox_r does not clear on read and */
1808        /* checker on if dual afu */
1809        writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1810
1811        /* Global port select: select either port */
1812        if (afu->internal_lun) {
1813                /* Only use port 0 */
1814                writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1815                num_ports = 0;
1816        } else {
1817                writeq_be(PORT_MASK(cfg->num_fc_ports),
1818                          &afu->afu_map->global.regs.afu_port_sel);
1819                num_ports = cfg->num_fc_ports;
1820        }
1821
1822        for (i = 0; i < num_ports; i++) {
1823                fc_port_regs = get_fc_port_regs(cfg, i);
1824
1825                /* Unmask all errors (but they are still masked at AFU) */
1826                writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1827                /* Clear CRC error cnt & set a threshold */
1828                (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1829                writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1830
1831                /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1832                if (wwpn[i] != 0)
1833                        afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1834                /* Programming WWPN back to back causes additional
1835                 * offline/online transitions and a PLOGI
1836                 */
1837                msleep(100);
1838        }
1839
1840        if (afu_is_ocxl_lisn(afu)) {
1841                /* Set up the LISN effective address for each master */
1842                for (i = 0; i < afu->num_hwqs; i++) {
1843                        hwq = get_hwq(afu, i);
1844                        ctx = hwq->ctx_cookie;
1845
1846                        for (j = 0; j < hwq->num_irqs; j++) {
1847                                reg = cfg->ops->get_irq_objhndl(ctx, j);
1848                                writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1849                        }
1850
1851                        reg = hwq->ctx_hndl;
1852                        writeq_be(SISL_LISN_PASID(reg, reg),
1853                                  &hwq->ctrl_map->lisn_pasid[0]);
1854                        writeq_be(SISL_LISN_PASID(0UL, reg),
1855                                  &hwq->ctrl_map->lisn_pasid[1]);
1856                }
1857        }
1858
1859        /* Set up master's own CTX_CAP to allow real mode, host translation */
1860        /* tables, afu cmds and read/write GSCSI cmds. */
1861        /* First, unlock ctx_cap write by reading mbox */
1862        for (i = 0; i < afu->num_hwqs; i++) {
1863                hwq = get_hwq(afu, i);
1864
1865                (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
1866                writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1867                        SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1868                        SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1869                        &hwq->ctrl_map->ctx_cap);
1870        }
1871
1872        /*
1873         * Determine write-same unmap support for host by evaluating the unmap
1874         * sector support bit of the context control register associated with
1875         * the primary hardware queue. Note that while this status is reflected
1876         * in a context register, the outcome can be assumed to be host-wide.
1877         */
1878        hwq = get_hwq(afu, PRIMARY_HWQ);
1879        reg = readq_be(&hwq->host_map->ctx_ctrl);
1880        if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1881                cfg->ws_unmap = true;
1882
1883        /* Initialize heartbeat */
1884        afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1885out:
1886        return rc;
1887}
1888
1889/**
1890 * start_afu() - initializes and starts the AFU
1891 * @cfg:        Internal structure associated with the host.
1892 */
1893static int start_afu(struct cxlflash_cfg *cfg)
1894{
1895        struct afu *afu = cfg->afu;
1896        struct device *dev = &cfg->dev->dev;
1897        struct hwq *hwq;
1898        int rc = 0;
1899        int i;
1900
1901        init_pcr(cfg);
1902
1903        /* Initialize each HWQ */
1904        for (i = 0; i < afu->num_hwqs; i++) {
1905                hwq = get_hwq(afu, i);
1906
1907                /* After an AFU reset, RRQ entries are stale, clear them */
1908                memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1909
1910                /* Initialize RRQ pointers */
1911                hwq->hrrq_start = &hwq->rrq_entry[0];
1912                hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1913                hwq->hrrq_curr = hwq->hrrq_start;
1914                hwq->toggle = 1;
1915
1916                /* Initialize spin locks */
1917                spin_lock_init(&hwq->hrrq_slock);
1918                spin_lock_init(&hwq->hsq_slock);
1919
1920                /* Initialize SQ */
1921                if (afu_is_sq_cmd_mode(afu)) {
1922                        memset(&hwq->sq, 0, sizeof(hwq->sq));
1923                        hwq->hsq_start = &hwq->sq[0];
1924                        hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1925                        hwq->hsq_curr = hwq->hsq_start;
1926
1927                        atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1928                }
1929
1930                /* Initialize IRQ poll */
1931                if (afu_is_irqpoll_enabled(afu))
1932                        irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1933                                      cxlflash_irqpoll);
1934
1935        }
1936
1937        rc = init_global(cfg);
1938
1939        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1940        return rc;
1941}
1942
1943/**
1944 * init_intr() - setup interrupt handlers for the master context
1945 * @cfg:        Internal structure associated with the host.
1946 * @hwq:        Hardware queue to initialize.
1947 *
1948 * Return: 0 on success, -errno on failure
1949 */
1950static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1951                                 struct hwq *hwq)
1952{
1953        struct device *dev = &cfg->dev->dev;
1954        void *ctx = hwq->ctx_cookie;
1955        int rc = 0;
1956        enum undo_level level = UNDO_NOOP;
1957        bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1958        int num_irqs = hwq->num_irqs;
1959
1960        rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1961        if (unlikely(rc)) {
1962                dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1963                        __func__, rc);
1964                level = UNDO_NOOP;
1965                goto out;
1966        }
1967
1968        rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1969                                   "SISL_MSI_SYNC_ERROR");
1970        if (unlikely(rc <= 0)) {
1971                dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1972                level = FREE_IRQ;
1973                goto out;
1974        }
1975
1976        rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1977                                   "SISL_MSI_RRQ_UPDATED");
1978        if (unlikely(rc <= 0)) {
1979                dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1980                level = UNMAP_ONE;
1981                goto out;
1982        }
1983
1984        /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1985        if (!is_primary_hwq)
1986                goto out;
1987
1988        rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1989                                   "SISL_MSI_ASYNC_ERROR");
1990        if (unlikely(rc <= 0)) {
1991                dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1992                level = UNMAP_TWO;
1993                goto out;
1994        }
1995out:
1996        return level;
1997}
1998
1999/**
2000 * init_mc() - create and register as the master context
2001 * @cfg:        Internal structure associated with the host.
2002 * index:       HWQ Index of the master context.
2003 *
2004 * Return: 0 on success, -errno on failure
2005 */
2006static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2007{
2008        void *ctx;
2009        struct device *dev = &cfg->dev->dev;
2010        struct hwq *hwq = get_hwq(cfg->afu, index);
2011        int rc = 0;
2012        int num_irqs;
2013        enum undo_level level;
2014
2015        hwq->afu = cfg->afu;
2016        hwq->index = index;
2017        INIT_LIST_HEAD(&hwq->pending_cmds);
2018
2019        if (index == PRIMARY_HWQ) {
2020                ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2021                num_irqs = 3;
2022        } else {
2023                ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2024                num_irqs = 2;
2025        }
2026        if (IS_ERR_OR_NULL(ctx)) {
2027                rc = -ENOMEM;
2028                goto err1;
2029        }
2030
2031        WARN_ON(hwq->ctx_cookie);
2032        hwq->ctx_cookie = ctx;
2033        hwq->num_irqs = num_irqs;
2034
2035        /* Set it up as a master with the CXL */
2036        cfg->ops->set_master(ctx);
2037
2038        /* Reset AFU when initializing primary context */
2039        if (index == PRIMARY_HWQ) {
2040                rc = cfg->ops->afu_reset(ctx);
2041                if (unlikely(rc)) {
2042                        dev_err(dev, "%s: AFU reset failed rc=%d\n",
2043                                      __func__, rc);
2044                        goto err1;
2045                }
2046        }
2047
2048        level = init_intr(cfg, hwq);
2049        if (unlikely(level)) {
2050                dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2051                goto err2;
2052        }
2053
2054        /* Finally, activate the context by starting it */
2055        rc = cfg->ops->start_context(hwq->ctx_cookie);
2056        if (unlikely(rc)) {
2057                dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2058                level = UNMAP_THREE;
2059                goto err2;
2060        }
2061
2062out:
2063        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2064        return rc;
2065err2:
2066        term_intr(cfg, level, index);
2067        if (index != PRIMARY_HWQ)
2068                cfg->ops->release_context(ctx);
2069err1:
2070        hwq->ctx_cookie = NULL;
2071        goto out;
2072}
2073
2074/**
2075 * get_num_afu_ports() - determines and configures the number of AFU ports
2076 * @cfg:        Internal structure associated with the host.
2077 *
2078 * This routine determines the number of AFU ports by converting the global
2079 * port selection mask. The converted value is only valid following an AFU
2080 * reset (explicit or power-on). This routine must be invoked shortly after
2081 * mapping as other routines are dependent on the number of ports during the
2082 * initialization sequence.
2083 *
2084 * To support legacy AFUs that might not have reflected an initial global
2085 * port mask (value read is 0), default to the number of ports originally
2086 * supported by the cxlflash driver (2) before hardware with other port
2087 * offerings was introduced.
2088 */
2089static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2090{
2091        struct afu *afu = cfg->afu;
2092        struct device *dev = &cfg->dev->dev;
2093        u64 port_mask;
2094        int num_fc_ports = LEGACY_FC_PORTS;
2095
2096        port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2097        if (port_mask != 0ULL)
2098                num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2099
2100        dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2101                __func__, port_mask, num_fc_ports);
2102
2103        cfg->num_fc_ports = num_fc_ports;
2104        cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2105}
2106
2107/**
2108 * init_afu() - setup as master context and start AFU
2109 * @cfg:        Internal structure associated with the host.
2110 *
2111 * This routine is a higher level of control for configuring the
2112 * AFU on probe and reset paths.
2113 *
2114 * Return: 0 on success, -errno on failure
2115 */
2116static int init_afu(struct cxlflash_cfg *cfg)
2117{
2118        u64 reg;
2119        int rc = 0;
2120        struct afu *afu = cfg->afu;
2121        struct device *dev = &cfg->dev->dev;
2122        struct hwq *hwq;
2123        int i;
2124
2125        cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2126
2127        mutex_init(&afu->sync_active);
2128        afu->num_hwqs = afu->desired_hwqs;
2129        for (i = 0; i < afu->num_hwqs; i++) {
2130                rc = init_mc(cfg, i);
2131                if (rc) {
2132                        dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2133                                __func__, rc, i);
2134                        goto err1;
2135                }
2136        }
2137
2138        /* Map the entire MMIO space of the AFU using the first context */
2139        hwq = get_hwq(afu, PRIMARY_HWQ);
2140        afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2141        if (!afu->afu_map) {
2142                dev_err(dev, "%s: psa_map failed\n", __func__);
2143                rc = -ENOMEM;
2144                goto err1;
2145        }
2146
2147        /* No byte reverse on reading afu_version or string will be backwards */
2148        reg = readq(&afu->afu_map->global.regs.afu_version);
2149        memcpy(afu->version, &reg, sizeof(reg));
2150        afu->interface_version =
2151            readq_be(&afu->afu_map->global.regs.interface_version);
2152        if ((afu->interface_version + 1) == 0) {
2153                dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2154                        "interface version %016llx\n", afu->version,
2155                       afu->interface_version);
2156                rc = -EINVAL;
2157                goto err1;
2158        }
2159
2160        if (afu_is_sq_cmd_mode(afu)) {
2161                afu->send_cmd = send_cmd_sq;
2162                afu->context_reset = context_reset_sq;
2163        } else {
2164                afu->send_cmd = send_cmd_ioarrin;
2165                afu->context_reset = context_reset_ioarrin;
2166        }
2167
2168        dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2169                afu->version, afu->interface_version);
2170
2171        get_num_afu_ports(cfg);
2172
2173        rc = start_afu(cfg);
2174        if (rc) {
2175                dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2176                goto err1;
2177        }
2178
2179        afu_err_intr_init(cfg->afu);
2180        for (i = 0; i < afu->num_hwqs; i++) {
2181                hwq = get_hwq(afu, i);
2182
2183                hwq->room = readq_be(&hwq->host_map->cmd_room);
2184        }
2185
2186        /* Restore the LUN mappings */
2187        cxlflash_restore_luntable(cfg);
2188out:
2189        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2190        return rc;
2191
2192err1:
2193        for (i = afu->num_hwqs - 1; i >= 0; i--) {
2194                term_intr(cfg, UNMAP_THREE, i);
2195                term_mc(cfg, i);
2196        }
2197        goto out;
2198}
2199
2200/**
2201 * afu_reset() - resets the AFU
2202 * @cfg:        Internal structure associated with the host.
2203 *
2204 * Return: 0 on success, -errno on failure
2205 */
2206static int afu_reset(struct cxlflash_cfg *cfg)
2207{
2208        struct device *dev = &cfg->dev->dev;
2209        int rc = 0;
2210
2211        /* Stop the context before the reset. Since the context is
2212         * no longer available restart it after the reset is complete
2213         */
2214        term_afu(cfg);
2215
2216        rc = init_afu(cfg);
2217
2218        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2219        return rc;
2220}
2221
2222/**
2223 * drain_ioctls() - wait until all currently executing ioctls have completed
2224 * @cfg:        Internal structure associated with the host.
2225 *
2226 * Obtain write access to read/write semaphore that wraps ioctl
2227 * handling to 'drain' ioctls currently executing.
2228 */
2229static void drain_ioctls(struct cxlflash_cfg *cfg)
2230{
2231        down_write(&cfg->ioctl_rwsem);
2232        up_write(&cfg->ioctl_rwsem);
2233}
2234
2235/**
2236 * cxlflash_async_reset_host() - asynchronous host reset handler
2237 * @data:       Private data provided while scheduling reset.
2238 * @cookie:     Cookie that can be used for checkpointing.
2239 */
2240static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2241{
2242        struct cxlflash_cfg *cfg = data;
2243        struct device *dev = &cfg->dev->dev;
2244        int rc = 0;
2245
2246        if (cfg->state != STATE_RESET) {
2247                dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2248                        __func__, cfg->state);
2249                goto out;
2250        }
2251
2252        drain_ioctls(cfg);
2253        cxlflash_mark_contexts_error(cfg);
2254        rc = afu_reset(cfg);
2255        if (rc)
2256                cfg->state = STATE_FAILTERM;
2257        else
2258                cfg->state = STATE_NORMAL;
2259        wake_up_all(&cfg->reset_waitq);
2260
2261out:
2262        scsi_unblock_requests(cfg->host);
2263}
2264
2265/**
2266 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2267 * @cfg:        Internal structure associated with the host.
2268 */
2269static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2270{
2271        struct device *dev = &cfg->dev->dev;
2272
2273        if (cfg->state != STATE_NORMAL) {
2274                dev_dbg(dev, "%s: Not performing reset state=%d\n",
2275                        __func__, cfg->state);
2276                return;
2277        }
2278
2279        cfg->state = STATE_RESET;
2280        scsi_block_requests(cfg->host);
2281        cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2282                                                 cfg);
2283}
2284
2285/**
2286 * send_afu_cmd() - builds and sends an internal AFU command
2287 * @afu:        AFU associated with the host.
2288 * @rcb:        Pre-populated IOARCB describing command to send.
2289 *
2290 * The AFU can only take one internal AFU command at a time. This limitation is
2291 * enforced by using a mutex to provide exclusive access to the AFU during the
2292 * operation. This design point requires calling threads to not be on interrupt
2293 * context due to the possibility of sleeping during concurrent AFU operations.
2294 *
2295 * The command status is optionally passed back to the caller when the caller
2296 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2297 *
2298 * Return:
2299 *      0 on success, -errno on failure
2300 */
2301static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2302{
2303        struct cxlflash_cfg *cfg = afu->parent;
2304        struct device *dev = &cfg->dev->dev;
2305        struct afu_cmd *cmd = NULL;
2306        struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2307        ulong lock_flags;
2308        char *buf = NULL;
2309        int rc = 0;
2310        int nretry = 0;
2311
2312        if (cfg->state != STATE_NORMAL) {
2313                dev_dbg(dev, "%s: Sync not required state=%u\n",
2314                        __func__, cfg->state);
2315                return 0;
2316        }
2317
2318        mutex_lock(&afu->sync_active);
2319        atomic_inc(&afu->cmds_active);
2320        buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2321        if (unlikely(!buf)) {
2322                dev_err(dev, "%s: no memory for command\n", __func__);
2323                rc = -ENOMEM;
2324                goto out;
2325        }
2326
2327        cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2328
2329retry:
2330        memset(cmd, 0, sizeof(*cmd));
2331        memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2332        INIT_LIST_HEAD(&cmd->queue);
2333        init_completion(&cmd->cevent);
2334        cmd->parent = afu;
2335        cmd->hwq_index = hwq->index;
2336        cmd->rcb.ctx_id = hwq->ctx_hndl;
2337
2338        dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2339                __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2340
2341        rc = afu->send_cmd(afu, cmd);
2342        if (unlikely(rc)) {
2343                rc = -ENOBUFS;
2344                goto out;
2345        }
2346
2347        rc = wait_resp(afu, cmd);
2348        switch (rc) {
2349        case -ETIMEDOUT:
2350                rc = afu->context_reset(hwq);
2351                if (rc) {
2352                        /* Delete the command from pending_cmds list */
2353                        spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2354                        list_del(&cmd->list);
2355                        spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2356
2357                        cxlflash_schedule_async_reset(cfg);
2358                        break;
2359                }
2360                /* fall through to retry */
2361        case -EAGAIN:
2362                if (++nretry < 2)
2363                        goto retry;
2364                /* fall through to exit */
2365        default:
2366                break;
2367        }
2368
2369        if (rcb->ioasa)
2370                *rcb->ioasa = cmd->sa;
2371out:
2372        atomic_dec(&afu->cmds_active);
2373        mutex_unlock(&afu->sync_active);
2374        kfree(buf);
2375        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2376        return rc;
2377}
2378
2379/**
2380 * cxlflash_afu_sync() - builds and sends an AFU sync command
2381 * @afu:        AFU associated with the host.
2382 * @ctx:        Identifies context requesting sync.
2383 * @res:        Identifies resource requesting sync.
2384 * @mode:       Type of sync to issue (lightweight, heavyweight, global).
2385 *
2386 * AFU sync operations are only necessary and allowed when the device is
2387 * operating normally. When not operating normally, sync requests can occur as
2388 * part of cleaning up resources associated with an adapter prior to removal.
2389 * In this scenario, these requests are simply ignored (safe due to the AFU
2390 * going away).
2391 *
2392 * Return:
2393 *      0 on success, -errno on failure
2394 */
2395int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2396{
2397        struct cxlflash_cfg *cfg = afu->parent;
2398        struct device *dev = &cfg->dev->dev;
2399        struct sisl_ioarcb rcb = { 0 };
2400
2401        dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2402                __func__, afu, ctx, res, mode);
2403
2404        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2405        rcb.msi = SISL_MSI_RRQ_UPDATED;
2406        rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2407
2408        rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2409        rcb.cdb[1] = mode;
2410        put_unaligned_be16(ctx, &rcb.cdb[2]);
2411        put_unaligned_be32(res, &rcb.cdb[4]);
2412
2413        return send_afu_cmd(afu, &rcb);
2414}
2415
2416/**
2417 * cxlflash_eh_abort_handler() - abort a SCSI command
2418 * @scp:        SCSI command to abort.
2419 *
2420 * CXL Flash devices do not support a single command abort. Reset the context
2421 * as per SISLite specification. Flush any pending commands in the hardware
2422 * queue before the reset.
2423 *
2424 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2425 */
2426static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2427{
2428        int rc = FAILED;
2429        struct Scsi_Host *host = scp->device->host;
2430        struct cxlflash_cfg *cfg = shost_priv(host);
2431        struct afu_cmd *cmd = sc_to_afuc(scp);
2432        struct device *dev = &cfg->dev->dev;
2433        struct afu *afu = cfg->afu;
2434        struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2435
2436        dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2437                "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2438                scp->device->channel, scp->device->id, scp->device->lun,
2439                get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2440                get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2441                get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2442                get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2443
2444        /* When the state is not normal, another reset/reload is in progress.
2445         * Return failed and the mid-layer will invoke host reset handler.
2446         */
2447        if (cfg->state != STATE_NORMAL) {
2448                dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2449                        __func__, cfg->state);
2450                goto out;
2451        }
2452
2453        rc = afu->context_reset(hwq);
2454        if (unlikely(rc))
2455                goto out;
2456
2457        rc = SUCCESS;
2458
2459out:
2460        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2461        return rc;
2462}
2463
2464/**
2465 * cxlflash_eh_device_reset_handler() - reset a single LUN
2466 * @scp:        SCSI command to send.
2467 *
2468 * Return:
2469 *      SUCCESS as defined in scsi/scsi.h
2470 *      FAILED as defined in scsi/scsi.h
2471 */
2472static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2473{
2474        int rc = SUCCESS;
2475        struct scsi_device *sdev = scp->device;
2476        struct Scsi_Host *host = sdev->host;
2477        struct cxlflash_cfg *cfg = shost_priv(host);
2478        struct device *dev = &cfg->dev->dev;
2479        int rcr = 0;
2480
2481        dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2482                host->host_no, sdev->channel, sdev->id, sdev->lun);
2483retry:
2484        switch (cfg->state) {
2485        case STATE_NORMAL:
2486                rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2487                if (unlikely(rcr))
2488                        rc = FAILED;
2489                break;
2490        case STATE_RESET:
2491                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2492                goto retry;
2493        default:
2494                rc = FAILED;
2495                break;
2496        }
2497
2498        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2499        return rc;
2500}
2501
2502/**
2503 * cxlflash_eh_host_reset_handler() - reset the host adapter
2504 * @scp:        SCSI command from stack identifying host.
2505 *
2506 * Following a reset, the state is evaluated again in case an EEH occurred
2507 * during the reset. In such a scenario, the host reset will either yield
2508 * until the EEH recovery is complete or return success or failure based
2509 * upon the current device state.
2510 *
2511 * Return:
2512 *      SUCCESS as defined in scsi/scsi.h
2513 *      FAILED as defined in scsi/scsi.h
2514 */
2515static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2516{
2517        int rc = SUCCESS;
2518        int rcr = 0;
2519        struct Scsi_Host *host = scp->device->host;
2520        struct cxlflash_cfg *cfg = shost_priv(host);
2521        struct device *dev = &cfg->dev->dev;
2522
2523        dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2524
2525        switch (cfg->state) {
2526        case STATE_NORMAL:
2527                cfg->state = STATE_RESET;
2528                drain_ioctls(cfg);
2529                cxlflash_mark_contexts_error(cfg);
2530                rcr = afu_reset(cfg);
2531                if (rcr) {
2532                        rc = FAILED;
2533                        cfg->state = STATE_FAILTERM;
2534                } else
2535                        cfg->state = STATE_NORMAL;
2536                wake_up_all(&cfg->reset_waitq);
2537                ssleep(1);
2538                /* fall through */
2539        case STATE_RESET:
2540                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2541                if (cfg->state == STATE_NORMAL)
2542                        break;
2543                /* fall through */
2544        default:
2545                rc = FAILED;
2546                break;
2547        }
2548
2549        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2550        return rc;
2551}
2552
2553/**
2554 * cxlflash_change_queue_depth() - change the queue depth for the device
2555 * @sdev:       SCSI device destined for queue depth change.
2556 * @qdepth:     Requested queue depth value to set.
2557 *
2558 * The requested queue depth is capped to the maximum supported value.
2559 *
2560 * Return: The actual queue depth set.
2561 */
2562static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2563{
2564
2565        if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2566                qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2567
2568        scsi_change_queue_depth(sdev, qdepth);
2569        return sdev->queue_depth;
2570}
2571
2572/**
2573 * cxlflash_show_port_status() - queries and presents the current port status
2574 * @port:       Desired port for status reporting.
2575 * @cfg:        Internal structure associated with the host.
2576 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2577 *
2578 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2579 */
2580static ssize_t cxlflash_show_port_status(u32 port,
2581                                         struct cxlflash_cfg *cfg,
2582                                         char *buf)
2583{
2584        struct device *dev = &cfg->dev->dev;
2585        char *disp_status;
2586        u64 status;
2587        __be64 __iomem *fc_port_regs;
2588
2589        WARN_ON(port >= MAX_FC_PORTS);
2590
2591        if (port >= cfg->num_fc_ports) {
2592                dev_info(dev, "%s: Port %d not supported on this card.\n",
2593                        __func__, port);
2594                return -EINVAL;
2595        }
2596
2597        fc_port_regs = get_fc_port_regs(cfg, port);
2598        status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2599        status &= FC_MTIP_STATUS_MASK;
2600
2601        if (status == FC_MTIP_STATUS_ONLINE)
2602                disp_status = "online";
2603        else if (status == FC_MTIP_STATUS_OFFLINE)
2604                disp_status = "offline";
2605        else
2606                disp_status = "unknown";
2607
2608        return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2609}
2610
2611/**
2612 * port0_show() - queries and presents the current status of port 0
2613 * @dev:        Generic device associated with the host owning the port.
2614 * @attr:       Device attribute representing the port.
2615 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2616 *
2617 * Return: The size of the ASCII string returned in @buf.
2618 */
2619static ssize_t port0_show(struct device *dev,
2620                          struct device_attribute *attr,
2621                          char *buf)
2622{
2623        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2624
2625        return cxlflash_show_port_status(0, cfg, buf);
2626}
2627
2628/**
2629 * port1_show() - queries and presents the current status of port 1
2630 * @dev:        Generic device associated with the host owning the port.
2631 * @attr:       Device attribute representing the port.
2632 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2633 *
2634 * Return: The size of the ASCII string returned in @buf.
2635 */
2636static ssize_t port1_show(struct device *dev,
2637                          struct device_attribute *attr,
2638                          char *buf)
2639{
2640        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2641
2642        return cxlflash_show_port_status(1, cfg, buf);
2643}
2644
2645/**
2646 * port2_show() - queries and presents the current status of port 2
2647 * @dev:        Generic device associated with the host owning the port.
2648 * @attr:       Device attribute representing the port.
2649 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2650 *
2651 * Return: The size of the ASCII string returned in @buf.
2652 */
2653static ssize_t port2_show(struct device *dev,
2654                          struct device_attribute *attr,
2655                          char *buf)
2656{
2657        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2658
2659        return cxlflash_show_port_status(2, cfg, buf);
2660}
2661
2662/**
2663 * port3_show() - queries and presents the current status of port 3
2664 * @dev:        Generic device associated with the host owning the port.
2665 * @attr:       Device attribute representing the port.
2666 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2667 *
2668 * Return: The size of the ASCII string returned in @buf.
2669 */
2670static ssize_t port3_show(struct device *dev,
2671                          struct device_attribute *attr,
2672                          char *buf)
2673{
2674        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2675
2676        return cxlflash_show_port_status(3, cfg, buf);
2677}
2678
2679/**
2680 * lun_mode_show() - presents the current LUN mode of the host
2681 * @dev:        Generic device associated with the host.
2682 * @attr:       Device attribute representing the LUN mode.
2683 * @buf:        Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2684 *
2685 * Return: The size of the ASCII string returned in @buf.
2686 */
2687static ssize_t lun_mode_show(struct device *dev,
2688                             struct device_attribute *attr, char *buf)
2689{
2690        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2691        struct afu *afu = cfg->afu;
2692
2693        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2694}
2695
2696/**
2697 * lun_mode_store() - sets the LUN mode of the host
2698 * @dev:        Generic device associated with the host.
2699 * @attr:       Device attribute representing the LUN mode.
2700 * @buf:        Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2701 * @count:      Length of data resizing in @buf.
2702 *
2703 * The CXL Flash AFU supports a dummy LUN mode where the external
2704 * links and storage are not required. Space on the FPGA is used
2705 * to create 1 or 2 small LUNs which are presented to the system
2706 * as if they were a normal storage device. This feature is useful
2707 * during development and also provides manufacturing with a way
2708 * to test the AFU without an actual device.
2709 *
2710 * 0 = external LUN[s] (default)
2711 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2712 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2713 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2714 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2715 *
2716 * Return: The size of the ASCII string returned in @buf.
2717 */
2718static ssize_t lun_mode_store(struct device *dev,
2719                              struct device_attribute *attr,
2720                              const char *buf, size_t count)
2721{
2722        struct Scsi_Host *shost = class_to_shost(dev);
2723        struct cxlflash_cfg *cfg = shost_priv(shost);
2724        struct afu *afu = cfg->afu;
2725        int rc;
2726        u32 lun_mode;
2727
2728        rc = kstrtouint(buf, 10, &lun_mode);
2729        if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2730                afu->internal_lun = lun_mode;
2731
2732                /*
2733                 * When configured for internal LUN, there is only one channel,
2734                 * channel number 0, else there will be one less than the number
2735                 * of fc ports for this card.
2736                 */
2737                if (afu->internal_lun)
2738                        shost->max_channel = 0;
2739                else
2740                        shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2741
2742                afu_reset(cfg);
2743                scsi_scan_host(cfg->host);
2744        }
2745
2746        return count;
2747}
2748
2749/**
2750 * ioctl_version_show() - presents the current ioctl version of the host
2751 * @dev:        Generic device associated with the host.
2752 * @attr:       Device attribute representing the ioctl version.
2753 * @buf:        Buffer of length PAGE_SIZE to report back the ioctl version.
2754 *
2755 * Return: The size of the ASCII string returned in @buf.
2756 */
2757static ssize_t ioctl_version_show(struct device *dev,
2758                                  struct device_attribute *attr, char *buf)
2759{
2760        ssize_t bytes = 0;
2761
2762        bytes = scnprintf(buf, PAGE_SIZE,
2763                          "disk: %u\n", DK_CXLFLASH_VERSION_0);
2764        bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2765                           "host: %u\n", HT_CXLFLASH_VERSION_0);
2766
2767        return bytes;
2768}
2769
2770/**
2771 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2772 * @port:       Desired port for status reporting.
2773 * @cfg:        Internal structure associated with the host.
2774 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2775 *
2776 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2777 */
2778static ssize_t cxlflash_show_port_lun_table(u32 port,
2779                                            struct cxlflash_cfg *cfg,
2780                                            char *buf)
2781{
2782        struct device *dev = &cfg->dev->dev;
2783        __be64 __iomem *fc_port_luns;
2784        int i;
2785        ssize_t bytes = 0;
2786
2787        WARN_ON(port >= MAX_FC_PORTS);
2788
2789        if (port >= cfg->num_fc_ports) {
2790                dev_info(dev, "%s: Port %d not supported on this card.\n",
2791                        __func__, port);
2792                return -EINVAL;
2793        }
2794
2795        fc_port_luns = get_fc_port_luns(cfg, port);
2796
2797        for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2798                bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2799                                   "%03d: %016llx\n",
2800                                   i, readq_be(&fc_port_luns[i]));
2801        return bytes;
2802}
2803
2804/**
2805 * port0_lun_table_show() - presents the current LUN table of port 0
2806 * @dev:        Generic device associated with the host owning the port.
2807 * @attr:       Device attribute representing the port.
2808 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2809 *
2810 * Return: The size of the ASCII string returned in @buf.
2811 */
2812static ssize_t port0_lun_table_show(struct device *dev,
2813                                    struct device_attribute *attr,
2814                                    char *buf)
2815{
2816        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2817
2818        return cxlflash_show_port_lun_table(0, cfg, buf);
2819}
2820
2821/**
2822 * port1_lun_table_show() - presents the current LUN table of port 1
2823 * @dev:        Generic device associated with the host owning the port.
2824 * @attr:       Device attribute representing the port.
2825 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2826 *
2827 * Return: The size of the ASCII string returned in @buf.
2828 */
2829static ssize_t port1_lun_table_show(struct device *dev,
2830                                    struct device_attribute *attr,
2831                                    char *buf)
2832{
2833        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2834
2835        return cxlflash_show_port_lun_table(1, cfg, buf);
2836}
2837
2838/**
2839 * port2_lun_table_show() - presents the current LUN table of port 2
2840 * @dev:        Generic device associated with the host owning the port.
2841 * @attr:       Device attribute representing the port.
2842 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2843 *
2844 * Return: The size of the ASCII string returned in @buf.
2845 */
2846static ssize_t port2_lun_table_show(struct device *dev,
2847                                    struct device_attribute *attr,
2848                                    char *buf)
2849{
2850        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2851
2852        return cxlflash_show_port_lun_table(2, cfg, buf);
2853}
2854
2855/**
2856 * port3_lun_table_show() - presents the current LUN table of port 3
2857 * @dev:        Generic device associated with the host owning the port.
2858 * @attr:       Device attribute representing the port.
2859 * @buf:        Buffer of length PAGE_SIZE to report back port status in ASCII.
2860 *
2861 * Return: The size of the ASCII string returned in @buf.
2862 */
2863static ssize_t port3_lun_table_show(struct device *dev,
2864                                    struct device_attribute *attr,
2865                                    char *buf)
2866{
2867        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2868
2869        return cxlflash_show_port_lun_table(3, cfg, buf);
2870}
2871
2872/**
2873 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2874 * @dev:        Generic device associated with the host.
2875 * @attr:       Device attribute representing the IRQ poll weight.
2876 * @buf:        Buffer of length PAGE_SIZE to report back the current IRQ poll
2877 *              weight in ASCII.
2878 *
2879 * An IRQ poll weight of 0 indicates polling is disabled.
2880 *
2881 * Return: The size of the ASCII string returned in @buf.
2882 */
2883static ssize_t irqpoll_weight_show(struct device *dev,
2884                                   struct device_attribute *attr, char *buf)
2885{
2886        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2887        struct afu *afu = cfg->afu;
2888
2889        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2890}
2891
2892/**
2893 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2894 * @dev:        Generic device associated with the host.
2895 * @attr:       Device attribute representing the IRQ poll weight.
2896 * @buf:        Buffer of length PAGE_SIZE containing the desired IRQ poll
2897 *              weight in ASCII.
2898 * @count:      Length of data resizing in @buf.
2899 *
2900 * An IRQ poll weight of 0 indicates polling is disabled.
2901 *
2902 * Return: The size of the ASCII string returned in @buf.
2903 */
2904static ssize_t irqpoll_weight_store(struct device *dev,
2905                                    struct device_attribute *attr,
2906                                    const char *buf, size_t count)
2907{
2908        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2909        struct device *cfgdev = &cfg->dev->dev;
2910        struct afu *afu = cfg->afu;
2911        struct hwq *hwq;
2912        u32 weight;
2913        int rc, i;
2914
2915        rc = kstrtouint(buf, 10, &weight);
2916        if (rc)
2917                return -EINVAL;
2918
2919        if (weight > 256) {
2920                dev_info(cfgdev,
2921                         "Invalid IRQ poll weight. It must be 256 or less.\n");
2922                return -EINVAL;
2923        }
2924
2925        if (weight == afu->irqpoll_weight) {
2926                dev_info(cfgdev,
2927                         "Current IRQ poll weight has the same weight.\n");
2928                return -EINVAL;
2929        }
2930
2931        if (afu_is_irqpoll_enabled(afu)) {
2932                for (i = 0; i < afu->num_hwqs; i++) {
2933                        hwq = get_hwq(afu, i);
2934
2935                        irq_poll_disable(&hwq->irqpoll);
2936                }
2937        }
2938
2939        afu->irqpoll_weight = weight;
2940
2941        if (weight > 0) {
2942                for (i = 0; i < afu->num_hwqs; i++) {
2943                        hwq = get_hwq(afu, i);
2944
2945                        irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2946                }
2947        }
2948
2949        return count;
2950}
2951
2952/**
2953 * num_hwqs_show() - presents the number of hardware queues for the host
2954 * @dev:        Generic device associated with the host.
2955 * @attr:       Device attribute representing the number of hardware queues.
2956 * @buf:        Buffer of length PAGE_SIZE to report back the number of hardware
2957 *              queues in ASCII.
2958 *
2959 * Return: The size of the ASCII string returned in @buf.
2960 */
2961static ssize_t num_hwqs_show(struct device *dev,
2962                             struct device_attribute *attr, char *buf)
2963{
2964        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2965        struct afu *afu = cfg->afu;
2966
2967        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2968}
2969
2970/**
2971 * num_hwqs_store() - sets the number of hardware queues for the host
2972 * @dev:        Generic device associated with the host.
2973 * @attr:       Device attribute representing the number of hardware queues.
2974 * @buf:        Buffer of length PAGE_SIZE containing the number of hardware
2975 *              queues in ASCII.
2976 * @count:      Length of data resizing in @buf.
2977 *
2978 * n > 0: num_hwqs = n
2979 * n = 0: num_hwqs = num_online_cpus()
2980 * n < 0: num_online_cpus() / abs(n)
2981 *
2982 * Return: The size of the ASCII string returned in @buf.
2983 */
2984static ssize_t num_hwqs_store(struct device *dev,
2985                              struct device_attribute *attr,
2986                              const char *buf, size_t count)
2987{
2988        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2989        struct afu *afu = cfg->afu;
2990        int rc;
2991        int nhwqs, num_hwqs;
2992
2993        rc = kstrtoint(buf, 10, &nhwqs);
2994        if (rc)
2995                return -EINVAL;
2996
2997        if (nhwqs >= 1)
2998                num_hwqs = nhwqs;
2999        else if (nhwqs == 0)
3000                num_hwqs = num_online_cpus();
3001        else
3002                num_hwqs = num_online_cpus() / abs(nhwqs);
3003
3004        afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3005        WARN_ON_ONCE(afu->desired_hwqs == 0);
3006
3007retry:
3008        switch (cfg->state) {
3009        case STATE_NORMAL:
3010                cfg->state = STATE_RESET;
3011                drain_ioctls(cfg);
3012                cxlflash_mark_contexts_error(cfg);
3013                rc = afu_reset(cfg);
3014                if (rc)
3015                        cfg->state = STATE_FAILTERM;
3016                else
3017                        cfg->state = STATE_NORMAL;
3018                wake_up_all(&cfg->reset_waitq);
3019                break;
3020        case STATE_RESET:
3021                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3022                if (cfg->state == STATE_NORMAL)
3023                        goto retry;
3024        default:
3025                /* Ideally should not happen */
3026                dev_err(dev, "%s: Device is not ready, state=%d\n",
3027                        __func__, cfg->state);
3028                break;
3029        }
3030
3031        return count;
3032}
3033
3034static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3035
3036/**
3037 * hwq_mode_show() - presents the HWQ steering mode for the host
3038 * @dev:        Generic device associated with the host.
3039 * @attr:       Device attribute representing the HWQ steering mode.
3040 * @buf:        Buffer of length PAGE_SIZE to report back the HWQ steering mode
3041 *              as a character string.
3042 *
3043 * Return: The size of the ASCII string returned in @buf.
3044 */
3045static ssize_t hwq_mode_show(struct device *dev,
3046                             struct device_attribute *attr, char *buf)
3047{
3048        struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3049        struct afu *afu = cfg->afu;
3050
3051        return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3052}
3053
3054/**
3055 * hwq_mode_store() - sets the HWQ steering mode for the host
3056 * @dev:        Generic device associated with the host.
3057 * @attr:       Device attribute representing the HWQ steering mode.
3058 * @buf:        Buffer of length PAGE_SIZE containing the HWQ steering mode
3059 *              as a character string.
3060 * @count:      Length of data resizing in @buf.
3061 *
3062 * rr = Round-Robin
3063 * tag = Block MQ Tagging
3064 * cpu = CPU Affinity
3065 *
3066 * Return: The size of the ASCII string returned in @buf.
3067 */
3068static ssize_t hwq_mode_store(struct device *dev,
3069                              struct device_attribute *attr,
3070                              const char *buf, size_t count)
3071{
3072        struct Scsi_Host *shost = class_to_shost(dev);
3073        struct cxlflash_cfg *cfg = shost_priv(shost);
3074        struct device *cfgdev = &cfg->dev->dev;
3075        struct afu *afu = cfg->afu;
3076        int i;
3077        u32 mode = MAX_HWQ_MODE;
3078
3079        for (i = 0; i < MAX_HWQ_MODE; i++) {
3080                if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3081                        mode = i;
3082                        break;
3083                }
3084        }
3085
3086        if (mode >= MAX_HWQ_MODE) {
3087                dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3088                return -EINVAL;
3089        }
3090
3091        afu->hwq_mode = mode;
3092
3093        return count;
3094}
3095
3096/**
3097 * mode_show() - presents the current mode of the device
3098 * @dev:        Generic device associated with the device.
3099 * @attr:       Device attribute representing the device mode.
3100 * @buf:        Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3101 *
3102 * Return: The size of the ASCII string returned in @buf.
3103 */
3104static ssize_t mode_show(struct device *dev,
3105                         struct device_attribute *attr, char *buf)
3106{
3107        struct scsi_device *sdev = to_scsi_device(dev);
3108
3109        return scnprintf(buf, PAGE_SIZE, "%s\n",
3110                         sdev->hostdata ? "superpipe" : "legacy");
3111}
3112
3113/*
3114 * Host attributes
3115 */
3116static DEVICE_ATTR_RO(port0);
3117static DEVICE_ATTR_RO(port1);
3118static DEVICE_ATTR_RO(port2);
3119static DEVICE_ATTR_RO(port3);
3120static DEVICE_ATTR_RW(lun_mode);
3121static DEVICE_ATTR_RO(ioctl_version);
3122static DEVICE_ATTR_RO(port0_lun_table);
3123static DEVICE_ATTR_RO(port1_lun_table);
3124static DEVICE_ATTR_RO(port2_lun_table);
3125static DEVICE_ATTR_RO(port3_lun_table);
3126static DEVICE_ATTR_RW(irqpoll_weight);
3127static DEVICE_ATTR_RW(num_hwqs);
3128static DEVICE_ATTR_RW(hwq_mode);
3129
3130static struct device_attribute *cxlflash_host_attrs[] = {
3131        &dev_attr_port0,
3132        &dev_attr_port1,
3133        &dev_attr_port2,
3134        &dev_attr_port3,
3135        &dev_attr_lun_mode,
3136        &dev_attr_ioctl_version,
3137        &dev_attr_port0_lun_table,
3138        &dev_attr_port1_lun_table,
3139        &dev_attr_port2_lun_table,
3140        &dev_attr_port3_lun_table,
3141        &dev_attr_irqpoll_weight,
3142        &dev_attr_num_hwqs,
3143        &dev_attr_hwq_mode,
3144        NULL
3145};
3146
3147/*
3148 * Device attributes
3149 */
3150static DEVICE_ATTR_RO(mode);
3151
3152static struct device_attribute *cxlflash_dev_attrs[] = {
3153        &dev_attr_mode,
3154        NULL
3155};
3156
3157/*
3158 * Host template
3159 */
3160static struct scsi_host_template driver_template = {
3161        .module = THIS_MODULE,
3162        .name = CXLFLASH_ADAPTER_NAME,
3163        .info = cxlflash_driver_info,
3164        .ioctl = cxlflash_ioctl,
3165        .proc_name = CXLFLASH_NAME,
3166        .queuecommand = cxlflash_queuecommand,
3167        .eh_abort_handler = cxlflash_eh_abort_handler,
3168        .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3169        .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3170        .change_queue_depth = cxlflash_change_queue_depth,
3171        .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3172        .can_queue = CXLFLASH_MAX_CMDS,
3173        .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3174        .this_id = -1,
3175        .sg_tablesize = 1,      /* No scatter gather support */
3176        .max_sectors = CXLFLASH_MAX_SECTORS,
3177        .use_clustering = ENABLE_CLUSTERING,
3178        .shost_attrs = cxlflash_host_attrs,
3179        .sdev_attrs = cxlflash_dev_attrs,
3180};
3181
3182/*
3183 * Device dependent values
3184 */
3185static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3186                                        CXLFLASH_WWPN_VPD_REQUIRED };
3187static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3188                                        CXLFLASH_NOTIFY_SHUTDOWN };
3189static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3190                                        (CXLFLASH_NOTIFY_SHUTDOWN |
3191                                        CXLFLASH_OCXL_DEV) };
3192
3193/*
3194 * PCI device binding table
3195 */
3196static struct pci_device_id cxlflash_pci_table[] = {
3197        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3198         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3199        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3200         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3201        {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3202         PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3203        {}
3204};
3205
3206MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3207
3208/**
3209 * cxlflash_worker_thread() - work thread handler for the AFU
3210 * @work:       Work structure contained within cxlflash associated with host.
3211 *
3212 * Handles the following events:
3213 * - Link reset which cannot be performed on interrupt context due to
3214 * blocking up to a few seconds
3215 * - Rescan the host
3216 */
3217static void cxlflash_worker_thread(struct work_struct *work)
3218{
3219        struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3220                                                work_q);
3221        struct afu *afu = cfg->afu;
3222        struct device *dev = &cfg->dev->dev;
3223        __be64 __iomem *fc_port_regs;
3224        int port;
3225        ulong lock_flags;
3226
3227        /* Avoid MMIO if the device has failed */
3228
3229        if (cfg->state != STATE_NORMAL)
3230                return;
3231
3232        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3233
3234        if (cfg->lr_state == LINK_RESET_REQUIRED) {
3235                port = cfg->lr_port;
3236                if (port < 0)
3237                        dev_err(dev, "%s: invalid port index %d\n",
3238                                __func__, port);
3239                else {
3240                        spin_unlock_irqrestore(cfg->host->host_lock,
3241                                               lock_flags);
3242
3243                        /* The reset can block... */
3244                        fc_port_regs = get_fc_port_regs(cfg, port);
3245                        afu_link_reset(afu, port, fc_port_regs);
3246                        spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3247                }
3248
3249                cfg->lr_state = LINK_RESET_COMPLETE;
3250        }
3251
3252        spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3253
3254        if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3255                scsi_scan_host(cfg->host);
3256}
3257
3258/**
3259 * cxlflash_chr_open() - character device open handler
3260 * @inode:      Device inode associated with this character device.
3261 * @file:       File pointer for this device.
3262 *
3263 * Only users with admin privileges are allowed to open the character device.
3264 *
3265 * Return: 0 on success, -errno on failure
3266 */
3267static int cxlflash_chr_open(struct inode *inode, struct file *file)
3268{
3269        struct cxlflash_cfg *cfg;
3270
3271        if (!capable(CAP_SYS_ADMIN))
3272                return -EACCES;
3273
3274        cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3275        file->private_data = cfg;
3276
3277        return 0;
3278}
3279
3280/**
3281 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3282 * @cmd:        The host ioctl command to decode.
3283 *
3284 * Return: A string identifying the decoded host ioctl.
3285 */
3286static char *decode_hioctl(int cmd)
3287{
3288        switch (cmd) {
3289        case HT_CXLFLASH_LUN_PROVISION:
3290                return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3291        }
3292
3293        return "UNKNOWN";
3294}
3295
3296/**
3297 * cxlflash_lun_provision() - host LUN provisioning handler
3298 * @cfg:        Internal structure associated with the host.
3299 * @arg:        Kernel copy of userspace ioctl data structure.
3300 *
3301 * Return: 0 on success, -errno on failure
3302 */
3303static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3304                                  struct ht_cxlflash_lun_provision *lunprov)
3305{
3306        struct afu *afu = cfg->afu;
3307        struct device *dev = &cfg->dev->dev;
3308        struct sisl_ioarcb rcb;
3309        struct sisl_ioasa asa;
3310        __be64 __iomem *fc_port_regs;
3311        u16 port = lunprov->port;
3312        u16 scmd = lunprov->hdr.subcmd;
3313        u16 type;
3314        u64 reg;
3315        u64 size;
3316        u64 lun_id;
3317        int rc = 0;
3318
3319        if (!afu_is_lun_provision(afu)) {
3320                rc = -ENOTSUPP;
3321                goto out;
3322        }
3323
3324        if (port >= cfg->num_fc_ports) {
3325                rc = -EINVAL;
3326                goto out;
3327        }
3328
3329        switch (scmd) {
3330        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3331                type = SISL_AFU_LUN_PROVISION_CREATE;
3332                size = lunprov->size;
3333                lun_id = 0;
3334                break;
3335        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3336                type = SISL_AFU_LUN_PROVISION_DELETE;
3337                size = 0;
3338                lun_id = lunprov->lun_id;
3339                break;
3340        case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3341                fc_port_regs = get_fc_port_regs(cfg, port);
3342
3343                reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3344                lunprov->max_num_luns = reg;
3345                reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3346                lunprov->cur_num_luns = reg;
3347                reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3348                lunprov->max_cap_port = reg;
3349                reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3350                lunprov->cur_cap_port = reg;
3351
3352                goto out;
3353        default:
3354                rc = -EINVAL;
3355                goto out;
3356        }
3357
3358        memset(&rcb, 0, sizeof(rcb));
3359        memset(&asa, 0, sizeof(asa));
3360        rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3361        rcb.lun_id = lun_id;
3362        rcb.msi = SISL_MSI_RRQ_UPDATED;
3363        rcb.timeout = MC_LUN_PROV_TIMEOUT;
3364        rcb.ioasa = &asa;
3365
3366        rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3367        rcb.cdb[1] = type;
3368        rcb.cdb[2] = port;
3369        put_unaligned_be64(size, &rcb.cdb[8]);
3370
3371        rc = send_afu_cmd(afu, &rcb);
3372        if (rc) {
3373                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3374                        __func__, rc, asa.ioasc, asa.afu_extra);
3375                goto out;
3376        }
3377
3378        if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3379                lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3380                memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3381        }
3382out:
3383        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3384        return rc;
3385}
3386
3387/**
3388 * cxlflash_afu_debug() - host AFU debug handler
3389 * @cfg:        Internal structure associated with the host.
3390 * @arg:        Kernel copy of userspace ioctl data structure.
3391 *
3392 * For debug requests requiring a data buffer, always provide an aligned
3393 * (cache line) buffer to the AFU to appease any alignment requirements.
3394 *
3395 * Return: 0 on success, -errno on failure
3396 */
3397static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3398                              struct ht_cxlflash_afu_debug *afu_dbg)
3399{
3400        struct afu *afu = cfg->afu;
3401        struct device *dev = &cfg->dev->dev;
3402        struct sisl_ioarcb rcb;
3403        struct sisl_ioasa asa;
3404        char *buf = NULL;
3405        char *kbuf = NULL;
3406        void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3407        u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3408        u32 ulen = afu_dbg->data_len;
3409        bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3410        int rc = 0;
3411
3412        if (!afu_is_afu_debug(afu)) {
3413                rc = -ENOTSUPP;
3414                goto out;
3415        }
3416
3417        if (ulen) {
3418                req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3419
3420                if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3421                        rc = -EINVAL;
3422                        goto out;
3423                }
3424
3425                buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3426                if (unlikely(!buf)) {
3427                        rc = -ENOMEM;
3428                        goto out;
3429                }
3430
3431                kbuf = PTR_ALIGN(buf, cache_line_size());
3432
3433                if (is_write) {
3434                        req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3435
3436                        if (copy_from_user(kbuf, ubuf, ulen)) {
3437                                rc = -EFAULT;
3438                                goto out;
3439                        }
3440                }
3441        }
3442
3443        memset(&rcb, 0, sizeof(rcb));
3444        memset(&asa, 0, sizeof(asa));
3445
3446        rcb.req_flags = req_flags;
3447        rcb.msi = SISL_MSI_RRQ_UPDATED;
3448        rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3449        rcb.ioasa = &asa;
3450
3451        if (ulen) {
3452                rcb.data_len = ulen;
3453                rcb.data_ea = (uintptr_t)kbuf;
3454        }
3455
3456        rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3457        memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3458               HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3459
3460        rc = send_afu_cmd(afu, &rcb);
3461        if (rc) {
3462                dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3463                        __func__, rc, asa.ioasc, asa.afu_extra);
3464                goto out;
3465        }
3466
3467        if (ulen && !is_write) {
3468                if (copy_to_user(ubuf, kbuf, ulen))
3469                        rc = -EFAULT;
3470        }
3471out:
3472        kfree(buf);
3473        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3474        return rc;
3475}
3476
3477/**
3478 * cxlflash_chr_ioctl() - character device IOCTL handler
3479 * @file:       File pointer for this device.
3480 * @cmd:        IOCTL command.
3481 * @arg:        Userspace ioctl data structure.
3482 *
3483 * A read/write semaphore is used to implement a 'drain' of currently
3484 * running ioctls. The read semaphore is taken at the beginning of each
3485 * ioctl thread and released upon concluding execution. Additionally the
3486 * semaphore should be released and then reacquired in any ioctl execution
3487 * path which will wait for an event to occur that is outside the scope of
3488 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3489 * a thread simply needs to acquire the write semaphore.
3490 *
3491 * Return: 0 on success, -errno on failure
3492 */
3493static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3494                               unsigned long arg)
3495{
3496        typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3497
3498        struct cxlflash_cfg *cfg = file->private_data;
3499        struct device *dev = &cfg->dev->dev;
3500        char buf[sizeof(union cxlflash_ht_ioctls)];
3501        void __user *uarg = (void __user *)arg;
3502        struct ht_cxlflash_hdr *hdr;
3503        size_t size = 0;
3504        bool known_ioctl = false;
3505        int idx = 0;
3506        int rc = 0;
3507        hioctl do_ioctl = NULL;
3508
3509        static const struct {
3510                size_t size;
3511                hioctl ioctl;
3512        } ioctl_tbl[] = {       /* NOTE: order matters here */
3513        { sizeof(struct ht_cxlflash_lun_provision),
3514                (hioctl)cxlflash_lun_provision },
3515        { sizeof(struct ht_cxlflash_afu_debug),
3516                (hioctl)cxlflash_afu_debug },
3517        };
3518
3519        /* Hold read semaphore so we can drain if needed */
3520        down_read(&cfg->ioctl_rwsem);
3521
3522        dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3523                __func__, cmd, idx, sizeof(ioctl_tbl));
3524
3525        switch (cmd) {
3526        case HT_CXLFLASH_LUN_PROVISION:
3527        case HT_CXLFLASH_AFU_DEBUG:
3528                known_ioctl = true;
3529                idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3530                size = ioctl_tbl[idx].size;
3531                do_ioctl = ioctl_tbl[idx].ioctl;
3532
3533                if (likely(do_ioctl))
3534                        break;
3535
3536                /* fall through */
3537        default:
3538                rc = -EINVAL;
3539                goto out;
3540        }
3541
3542        if (unlikely(copy_from_user(&buf, uarg, size))) {
3543                dev_err(dev, "%s: copy_from_user() fail "
3544                        "size=%lu cmd=%d (%s) uarg=%p\n",
3545                        __func__, size, cmd, decode_hioctl(cmd), uarg);
3546                rc = -EFAULT;
3547                goto out;
3548        }
3549
3550        hdr = (struct ht_cxlflash_hdr *)&buf;
3551        if (hdr->version != HT_CXLFLASH_VERSION_0) {
3552                dev_dbg(dev, "%s: Version %u not supported for %s\n",
3553                        __func__, hdr->version, decode_hioctl(cmd));
3554                rc = -EINVAL;
3555                goto out;
3556        }
3557
3558        if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3559                dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3560                rc = -EINVAL;
3561                goto out;
3562        }
3563
3564        rc = do_ioctl(cfg, (void *)&buf);
3565        if (likely(!rc))
3566                if (unlikely(copy_to_user(uarg, &buf, size))) {
3567                        dev_err(dev, "%s: copy_to_user() fail "
3568                                "size=%lu cmd=%d (%s) uarg=%p\n",
3569                                __func__, size, cmd, decode_hioctl(cmd), uarg);
3570                        rc = -EFAULT;
3571                }
3572
3573        /* fall through to exit */
3574
3575out:
3576        up_read(&cfg->ioctl_rwsem);
3577        if (unlikely(rc && known_ioctl))
3578                dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3579                        __func__, decode_hioctl(cmd), cmd, rc);
3580        else
3581                dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3582                        __func__, decode_hioctl(cmd), cmd, rc);
3583        return rc;
3584}
3585
3586/*
3587 * Character device file operations
3588 */
3589static const struct file_operations cxlflash_chr_fops = {
3590        .owner          = THIS_MODULE,
3591        .open           = cxlflash_chr_open,
3592        .unlocked_ioctl = cxlflash_chr_ioctl,
3593        .compat_ioctl   = cxlflash_chr_ioctl,
3594};
3595
3596/**
3597 * init_chrdev() - initialize the character device for the host
3598 * @cfg:        Internal structure associated with the host.
3599 *
3600 * Return: 0 on success, -errno on failure
3601 */
3602static int init_chrdev(struct cxlflash_cfg *cfg)
3603{
3604        struct device *dev = &cfg->dev->dev;
3605        struct device *char_dev;
3606        dev_t devno;
3607        int minor;
3608        int rc = 0;
3609
3610        minor = cxlflash_get_minor();
3611        if (unlikely(minor < 0)) {
3612                dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3613                rc = -ENOSPC;
3614                goto out;
3615        }
3616
3617        devno = MKDEV(cxlflash_major, minor);
3618        cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3619
3620        rc = cdev_add(&cfg->cdev, devno, 1);
3621        if (rc) {
3622                dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3623                goto err1;
3624        }
3625
3626        char_dev = device_create(cxlflash_class, NULL, devno,
3627                                 NULL, "cxlflash%d", minor);
3628        if (IS_ERR(char_dev)) {
3629                rc = PTR_ERR(char_dev);
3630                dev_err(dev, "%s: device_create failed rc=%d\n",
3631                        __func__, rc);
3632                goto err2;
3633        }
3634
3635        cfg->chardev = char_dev;
3636out:
3637        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3638        return rc;
3639err2:
3640        cdev_del(&cfg->cdev);
3641err1:
3642        cxlflash_put_minor(minor);
3643        goto out;
3644}
3645
3646/**
3647 * cxlflash_probe() - PCI entry point to add host
3648 * @pdev:       PCI device associated with the host.
3649 * @dev_id:     PCI device id associated with device.
3650 *
3651 * The device will initially start out in a 'probing' state and
3652 * transition to the 'normal' state at the end of a successful
3653 * probe. Should an EEH event occur during probe, the notification
3654 * thread (error_detected()) will wait until the probe handler
3655 * is nearly complete. At that time, the device will be moved to
3656 * a 'probed' state and the EEH thread woken up to drive the slot
3657 * reset and recovery (device moves to 'normal' state). Meanwhile,
3658 * the probe will be allowed to exit successfully.
3659 *
3660 * Return: 0 on success, -errno on failure
3661 */
3662static int cxlflash_probe(struct pci_dev *pdev,
3663                          const struct pci_device_id *dev_id)
3664{
3665        struct Scsi_Host *host;
3666        struct cxlflash_cfg *cfg = NULL;
3667        struct device *dev = &pdev->dev;
3668        struct dev_dependent_vals *ddv;
3669        int rc = 0;
3670        int k;
3671
3672        dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3673                __func__, pdev->irq);
3674
3675        ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3676        driver_template.max_sectors = ddv->max_sectors;
3677
3678        host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3679        if (!host) {
3680                dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3681                rc = -ENOMEM;
3682                goto out;
3683        }
3684
3685        host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3686        host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3687        host->unique_id = host->host_no;
3688        host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3689
3690        cfg = shost_priv(host);
3691        cfg->host = host;
3692        rc = alloc_mem(cfg);
3693        if (rc) {
3694                dev_err(dev, "%s: alloc_mem failed\n", __func__);
3695                rc = -ENOMEM;
3696                scsi_host_put(cfg->host);
3697                goto out;
3698        }
3699
3700        cfg->init_state = INIT_STATE_NONE;
3701        cfg->dev = pdev;
3702        cfg->cxl_fops = cxlflash_cxl_fops;
3703        cfg->ops = cxlflash_assign_ops(ddv);
3704        WARN_ON_ONCE(!cfg->ops);
3705
3706        /*
3707         * Promoted LUNs move to the top of the LUN table. The rest stay on
3708         * the bottom half. The bottom half grows from the end (index = 255),
3709         * whereas the top half grows from the beginning (index = 0).
3710         *
3711         * Initialize the last LUN index for all possible ports.
3712         */
3713        cfg->promote_lun_index = 0;
3714
3715        for (k = 0; k < MAX_FC_PORTS; k++)
3716                cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3717
3718        cfg->dev_id = (struct pci_device_id *)dev_id;
3719
3720        init_waitqueue_head(&cfg->tmf_waitq);
3721        init_waitqueue_head(&cfg->reset_waitq);
3722
3723        INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3724        cfg->lr_state = LINK_RESET_INVALID;
3725        cfg->lr_port = -1;
3726        spin_lock_init(&cfg->tmf_slock);
3727        mutex_init(&cfg->ctx_tbl_list_mutex);
3728        mutex_init(&cfg->ctx_recovery_mutex);
3729        init_rwsem(&cfg->ioctl_rwsem);
3730        INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3731        INIT_LIST_HEAD(&cfg->lluns);
3732
3733        pci_set_drvdata(pdev, cfg);
3734
3735        rc = init_pci(cfg);
3736        if (rc) {
3737                dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3738                goto out_remove;
3739        }
3740        cfg->init_state = INIT_STATE_PCI;
3741
3742        cfg->afu_cookie = cfg->ops->create_afu(pdev);
3743        if (unlikely(!cfg->afu_cookie)) {
3744                dev_err(dev, "%s: create_afu failed\n", __func__);
3745                goto out_remove;
3746        }
3747
3748        rc = init_afu(cfg);
3749        if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3750                dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3751                goto out_remove;
3752        }
3753        cfg->init_state = INIT_STATE_AFU;
3754
3755        rc = init_scsi(cfg);
3756        if (rc) {
3757                dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3758                goto out_remove;
3759        }
3760        cfg->init_state = INIT_STATE_SCSI;
3761
3762        rc = init_chrdev(cfg);
3763        if (rc) {
3764                dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3765                goto out_remove;
3766        }
3767        cfg->init_state = INIT_STATE_CDEV;
3768
3769        if (wq_has_sleeper(&cfg->reset_waitq)) {
3770                cfg->state = STATE_PROBED;
3771                wake_up_all(&cfg->reset_waitq);
3772        } else
3773                cfg->state = STATE_NORMAL;
3774out:
3775        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3776        return rc;
3777
3778out_remove:
3779        cxlflash_remove(pdev);
3780        goto out;
3781}
3782
3783/**
3784 * cxlflash_pci_error_detected() - called when a PCI error is detected
3785 * @pdev:       PCI device struct.
3786 * @state:      PCI channel state.
3787 *
3788 * When an EEH occurs during an active reset, wait until the reset is
3789 * complete and then take action based upon the device state.
3790 *
3791 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3792 */
3793static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3794                                                    pci_channel_state_t state)
3795{
3796        int rc = 0;
3797        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3798        struct device *dev = &cfg->dev->dev;
3799
3800        dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3801
3802        switch (state) {
3803        case pci_channel_io_frozen:
3804                wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3805                                             cfg->state != STATE_PROBING);
3806                if (cfg->state == STATE_FAILTERM)
3807                        return PCI_ERS_RESULT_DISCONNECT;
3808
3809                cfg->state = STATE_RESET;
3810                scsi_block_requests(cfg->host);
3811                drain_ioctls(cfg);
3812                rc = cxlflash_mark_contexts_error(cfg);
3813                if (unlikely(rc))
3814                        dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3815                                __func__, rc);
3816                term_afu(cfg);
3817                return PCI_ERS_RESULT_NEED_RESET;
3818        case pci_channel_io_perm_failure:
3819                cfg->state = STATE_FAILTERM;
3820                wake_up_all(&cfg->reset_waitq);
3821                scsi_unblock_requests(cfg->host);
3822                return PCI_ERS_RESULT_DISCONNECT;
3823        default:
3824                break;
3825        }
3826        return PCI_ERS_RESULT_NEED_RESET;
3827}
3828
3829/**
3830 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3831 * @pdev:       PCI device struct.
3832 *
3833 * This routine is called by the pci error recovery code after the PCI
3834 * slot has been reset, just before we should resume normal operations.
3835 *
3836 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3837 */
3838static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3839{
3840        int rc = 0;
3841        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3842        struct device *dev = &cfg->dev->dev;
3843
3844        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3845
3846        rc = init_afu(cfg);
3847        if (unlikely(rc)) {
3848                dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3849                return PCI_ERS_RESULT_DISCONNECT;
3850        }
3851
3852        return PCI_ERS_RESULT_RECOVERED;
3853}
3854
3855/**
3856 * cxlflash_pci_resume() - called when normal operation can resume
3857 * @pdev:       PCI device struct
3858 */
3859static void cxlflash_pci_resume(struct pci_dev *pdev)
3860{
3861        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3862        struct device *dev = &cfg->dev->dev;
3863
3864        dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3865
3866        cfg->state = STATE_NORMAL;
3867        wake_up_all(&cfg->reset_waitq);
3868        scsi_unblock_requests(cfg->host);
3869}
3870
3871/**
3872 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3873 * @dev:        Character device.
3874 * @mode:       Mode that can be used to verify access.
3875 *
3876 * Return: Allocated string describing the devtmpfs structure.
3877 */
3878static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3879{
3880        return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3881}
3882
3883/**
3884 * cxlflash_class_init() - create character device class
3885 *
3886 * Return: 0 on success, -errno on failure
3887 */
3888static int cxlflash_class_init(void)
3889{
3890        dev_t devno;
3891        int rc = 0;
3892
3893        rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3894        if (unlikely(rc)) {
3895                pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3896                goto out;
3897        }
3898
3899        cxlflash_major = MAJOR(devno);
3900
3901        cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3902        if (IS_ERR(cxlflash_class)) {
3903                rc = PTR_ERR(cxlflash_class);
3904                pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3905                goto err;
3906        }
3907
3908        cxlflash_class->devnode = cxlflash_devnode;
3909out:
3910        pr_debug("%s: returning rc=%d\n", __func__, rc);
3911        return rc;
3912err:
3913        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3914        goto out;
3915}
3916
3917/**
3918 * cxlflash_class_exit() - destroy character device class
3919 */
3920static void cxlflash_class_exit(void)
3921{
3922        dev_t devno = MKDEV(cxlflash_major, 0);
3923
3924        class_destroy(cxlflash_class);
3925        unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3926}
3927
3928static const struct pci_error_handlers cxlflash_err_handler = {
3929        .error_detected = cxlflash_pci_error_detected,
3930        .slot_reset = cxlflash_pci_slot_reset,
3931        .resume = cxlflash_pci_resume,
3932};
3933
3934/*
3935 * PCI device structure
3936 */
3937static struct pci_driver cxlflash_driver = {
3938        .name = CXLFLASH_NAME,
3939        .id_table = cxlflash_pci_table,
3940        .probe = cxlflash_probe,
3941        .remove = cxlflash_remove,
3942        .shutdown = cxlflash_remove,
3943        .err_handler = &cxlflash_err_handler,
3944};
3945
3946/**
3947 * init_cxlflash() - module entry point
3948 *
3949 * Return: 0 on success, -errno on failure
3950 */
3951static int __init init_cxlflash(void)
3952{
3953        int rc;
3954
3955        check_sizes();
3956        cxlflash_list_init();
3957        rc = cxlflash_class_init();
3958        if (unlikely(rc))
3959                goto out;
3960
3961        rc = pci_register_driver(&cxlflash_driver);
3962        if (unlikely(rc))
3963                goto err;
3964out:
3965        pr_debug("%s: returning rc=%d\n", __func__, rc);
3966        return rc;
3967err:
3968        cxlflash_class_exit();
3969        goto out;
3970}
3971
3972/**
3973 * exit_cxlflash() - module exit point
3974 */
3975static void __exit exit_cxlflash(void)
3976{
3977        cxlflash_term_global_luns();
3978        cxlflash_free_errpage();
3979
3980        pci_unregister_driver(&cxlflash_driver);
3981        cxlflash_class_exit();
3982}
3983
3984module_init(init_cxlflash);
3985module_exit(exit_cxlflash);
3986