linux/drivers/scsi/cxlflash/superpipe.c
<<
>>
Prefs
   1/*
   2 * CXL Flash Device Driver
   3 *
   4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
   5 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
   6 *
   7 * Copyright (C) 2015 IBM Corporation
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/delay.h>
  16#include <linux/file.h>
  17#include <linux/syscalls.h>
  18#include <misc/cxl.h>
  19#include <asm/unaligned.h>
  20
  21#include <scsi/scsi.h>
  22#include <scsi/scsi_host.h>
  23#include <scsi/scsi_cmnd.h>
  24#include <scsi/scsi_eh.h>
  25#include <uapi/scsi/cxlflash_ioctl.h>
  26
  27#include "sislite.h"
  28#include "common.h"
  29#include "vlun.h"
  30#include "superpipe.h"
  31
  32struct cxlflash_global global;
  33
  34/**
  35 * marshal_rele_to_resize() - translate release to resize structure
  36 * @rele:       Source structure from which to translate/copy.
  37 * @resize:     Destination structure for the translate/copy.
  38 */
  39static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
  40                                   struct dk_cxlflash_resize *resize)
  41{
  42        resize->hdr = release->hdr;
  43        resize->context_id = release->context_id;
  44        resize->rsrc_handle = release->rsrc_handle;
  45}
  46
  47/**
  48 * marshal_det_to_rele() - translate detach to release structure
  49 * @detach:     Destination structure for the translate/copy.
  50 * @rele:       Source structure from which to translate/copy.
  51 */
  52static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
  53                                struct dk_cxlflash_release *release)
  54{
  55        release->hdr = detach->hdr;
  56        release->context_id = detach->context_id;
  57}
  58
  59/**
  60 * cxlflash_free_errpage() - frees resources associated with global error page
  61 */
  62void cxlflash_free_errpage(void)
  63{
  64
  65        mutex_lock(&global.mutex);
  66        if (global.err_page) {
  67                __free_page(global.err_page);
  68                global.err_page = NULL;
  69        }
  70        mutex_unlock(&global.mutex);
  71}
  72
  73/**
  74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
  75 * @cfg:        Internal structure associated with the host.
  76 *
  77 * When the host needs to go down, all users must be quiesced and their
  78 * memory freed. This is accomplished by putting the contexts in error
  79 * state which will notify the user and let them 'drive' the tear down.
  80 * Meanwhile, this routine camps until all user contexts have been removed.
  81 */
  82void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
  83{
  84        struct device *dev = &cfg->dev->dev;
  85        int i, found;
  86
  87        cxlflash_mark_contexts_error(cfg);
  88
  89        while (true) {
  90                found = false;
  91
  92                for (i = 0; i < MAX_CONTEXT; i++)
  93                        if (cfg->ctx_tbl[i]) {
  94                                found = true;
  95                                break;
  96                        }
  97
  98                if (!found && list_empty(&cfg->ctx_err_recovery))
  99                        return;
 100
 101                dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
 102                        __func__);
 103                wake_up_all(&cfg->reset_waitq);
 104                ssleep(1);
 105        }
 106}
 107
 108/**
 109 * find_error_context() - locates a context by cookie on the error recovery list
 110 * @cfg:        Internal structure associated with the host.
 111 * @rctxid:     Desired context by id.
 112 * @file:       Desired context by file.
 113 *
 114 * Return: Found context on success, NULL on failure
 115 */
 116static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
 117                                           struct file *file)
 118{
 119        struct ctx_info *ctxi;
 120
 121        list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
 122                if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
 123                        return ctxi;
 124
 125        return NULL;
 126}
 127
 128/**
 129 * get_context() - obtains a validated and locked context reference
 130 * @cfg:        Internal structure associated with the host.
 131 * @rctxid:     Desired context (raw, un-decoded format).
 132 * @arg:        LUN information or file associated with request.
 133 * @ctx_ctrl:   Control information to 'steer' desired lookup.
 134 *
 135 * NOTE: despite the name pid, in linux, current->pid actually refers
 136 * to the lightweight process id (tid) and can change if the process is
 137 * multi threaded. The tgid remains constant for the process and only changes
 138 * when the process of fork. For all intents and purposes, think of tgid
 139 * as a pid in the traditional sense.
 140 *
 141 * Return: Validated context on success, NULL on failure
 142 */
 143struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
 144                             void *arg, enum ctx_ctrl ctx_ctrl)
 145{
 146        struct device *dev = &cfg->dev->dev;
 147        struct ctx_info *ctxi = NULL;
 148        struct lun_access *lun_access = NULL;
 149        struct file *file = NULL;
 150        struct llun_info *lli = arg;
 151        u64 ctxid = DECODE_CTXID(rctxid);
 152        int rc;
 153        pid_t pid = current->tgid, ctxpid = 0;
 154
 155        if (ctx_ctrl & CTX_CTRL_FILE) {
 156                lli = NULL;
 157                file = (struct file *)arg;
 158        }
 159
 160        if (ctx_ctrl & CTX_CTRL_CLONE)
 161                pid = current->parent->tgid;
 162
 163        if (likely(ctxid < MAX_CONTEXT)) {
 164                while (true) {
 165                        mutex_lock(&cfg->ctx_tbl_list_mutex);
 166                        ctxi = cfg->ctx_tbl[ctxid];
 167                        if (ctxi)
 168                                if ((file && (ctxi->file != file)) ||
 169                                    (!file && (ctxi->ctxid != rctxid)))
 170                                        ctxi = NULL;
 171
 172                        if ((ctx_ctrl & CTX_CTRL_ERR) ||
 173                            (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
 174                                ctxi = find_error_context(cfg, rctxid, file);
 175                        if (!ctxi) {
 176                                mutex_unlock(&cfg->ctx_tbl_list_mutex);
 177                                goto out;
 178                        }
 179
 180                        /*
 181                         * Need to acquire ownership of the context while still
 182                         * under the table/list lock to serialize with a remove
 183                         * thread. Use the 'try' to avoid stalling the
 184                         * table/list lock for a single context.
 185                         *
 186                         * Note that the lock order is:
 187                         *
 188                         *      cfg->ctx_tbl_list_mutex -> ctxi->mutex
 189                         *
 190                         * Therefore release ctx_tbl_list_mutex before retrying.
 191                         */
 192                        rc = mutex_trylock(&ctxi->mutex);
 193                        mutex_unlock(&cfg->ctx_tbl_list_mutex);
 194                        if (rc)
 195                                break; /* got the context's lock! */
 196                }
 197
 198                if (ctxi->unavail)
 199                        goto denied;
 200
 201                ctxpid = ctxi->pid;
 202                if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
 203                        if (pid != ctxpid)
 204                                goto denied;
 205
 206                if (lli) {
 207                        list_for_each_entry(lun_access, &ctxi->luns, list)
 208                                if (lun_access->lli == lli)
 209                                        goto out;
 210                        goto denied;
 211                }
 212        }
 213
 214out:
 215        dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
 216                "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
 217                ctx_ctrl);
 218
 219        return ctxi;
 220
 221denied:
 222        mutex_unlock(&ctxi->mutex);
 223        ctxi = NULL;
 224        goto out;
 225}
 226
 227/**
 228 * put_context() - release a context that was retrieved from get_context()
 229 * @ctxi:       Context to release.
 230 *
 231 * For now, releasing the context equates to unlocking it's mutex.
 232 */
 233void put_context(struct ctx_info *ctxi)
 234{
 235        mutex_unlock(&ctxi->mutex);
 236}
 237
 238/**
 239 * afu_attach() - attach a context to the AFU
 240 * @cfg:        Internal structure associated with the host.
 241 * @ctxi:       Context to attach.
 242 *
 243 * Upon setting the context capabilities, they must be confirmed with
 244 * a read back operation as the context might have been closed since
 245 * the mailbox was unlocked. When this occurs, registration is failed.
 246 *
 247 * Return: 0 on success, -errno on failure
 248 */
 249static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
 250{
 251        struct device *dev = &cfg->dev->dev;
 252        struct afu *afu = cfg->afu;
 253        struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
 254        int rc = 0;
 255        u64 val;
 256
 257        /* Unlock cap and restrict user to read/write cmds in translated mode */
 258        readq_be(&ctrl_map->mbox_r);
 259        val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
 260        writeq_be(val, &ctrl_map->ctx_cap);
 261        val = readq_be(&ctrl_map->ctx_cap);
 262        if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
 263                dev_err(dev, "%s: ctx may be closed val=%016llX\n",
 264                        __func__, val);
 265                rc = -EAGAIN;
 266                goto out;
 267        }
 268
 269        /* Set up MMIO registers pointing to the RHT */
 270        writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
 271        val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
 272        writeq_be(val, &ctrl_map->rht_cnt_id);
 273out:
 274        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
 275        return rc;
 276}
 277
 278/**
 279 * read_cap16() - issues a SCSI READ_CAP16 command
 280 * @sdev:       SCSI device associated with LUN.
 281 * @lli:        LUN destined for capacity request.
 282 *
 283 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
 284 * in scsi_execute(), the EEH handler will attempt to recover. As part of the
 285 * recovery, the handler drains all currently running ioctls, waiting until they
 286 * have completed before proceeding with a reset. As this routine is used on the
 287 * ioctl path, this can create a condition where the EEH handler becomes stuck,
 288 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
 289 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
 290 * This will allow the EEH handler to proceed with a recovery while this thread
 291 * is still running. Once the scsi_execute() returns, reacquire the ioctl read
 292 * semaphore and check the adapter state in case it changed while inside of
 293 * scsi_execute(). The state check will wait if the adapter is still being
 294 * recovered or return a failure if the recovery failed. In the event that the
 295 * adapter reset failed, simply return the failure as the ioctl would be unable
 296 * to continue.
 297 *
 298 * Note that the above puts a requirement on this routine to only be called on
 299 * an ioctl thread.
 300 *
 301 * Return: 0 on success, -errno on failure
 302 */
 303static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
 304{
 305        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
 306        struct device *dev = &cfg->dev->dev;
 307        struct glun_info *gli = lli->parent;
 308        u8 *cmd_buf = NULL;
 309        u8 *scsi_cmd = NULL;
 310        u8 *sense_buf = NULL;
 311        int rc = 0;
 312        int result = 0;
 313        int retry_cnt = 0;
 314        u32 to = CMD_TIMEOUT * HZ;
 315
 316retry:
 317        cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
 318        scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
 319        sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
 320        if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
 321                rc = -ENOMEM;
 322                goto out;
 323        }
 324
 325        scsi_cmd[0] = SERVICE_ACTION_IN_16;     /* read cap(16) */
 326        scsi_cmd[1] = SAI_READ_CAPACITY_16;     /* service action */
 327        put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
 328
 329        dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
 330                retry_cnt ? "re" : "", scsi_cmd[0]);
 331
 332        /* Drop the ioctl read semahpore across lengthy call */
 333        up_read(&cfg->ioctl_rwsem);
 334        result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
 335                              CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
 336        down_read(&cfg->ioctl_rwsem);
 337        rc = check_state(cfg);
 338        if (rc) {
 339                dev_err(dev, "%s: Failed state! result=0x08%X\n",
 340                        __func__, result);
 341                rc = -ENODEV;
 342                goto out;
 343        }
 344
 345        if (driver_byte(result) == DRIVER_SENSE) {
 346                result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
 347                if (result & SAM_STAT_CHECK_CONDITION) {
 348                        struct scsi_sense_hdr sshdr;
 349
 350                        scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
 351                                            &sshdr);
 352                        switch (sshdr.sense_key) {
 353                        case NO_SENSE:
 354                        case RECOVERED_ERROR:
 355                                /* fall through */
 356                        case NOT_READY:
 357                                result &= ~SAM_STAT_CHECK_CONDITION;
 358                                break;
 359                        case UNIT_ATTENTION:
 360                                switch (sshdr.asc) {
 361                                case 0x29: /* Power on Reset or Device Reset */
 362                                        /* fall through */
 363                                case 0x2A: /* Device capacity changed */
 364                                case 0x3F: /* Report LUNs changed */
 365                                        /* Retry the command once more */
 366                                        if (retry_cnt++ < 1) {
 367                                                kfree(cmd_buf);
 368                                                kfree(scsi_cmd);
 369                                                kfree(sense_buf);
 370                                                goto retry;
 371                                        }
 372                                }
 373                                break;
 374                        default:
 375                                break;
 376                        }
 377                }
 378        }
 379
 380        if (result) {
 381                dev_err(dev, "%s: command failed, result=0x%x\n",
 382                        __func__, result);
 383                rc = -EIO;
 384                goto out;
 385        }
 386
 387        /*
 388         * Read cap was successful, grab values from the buffer;
 389         * note that we don't need to worry about unaligned access
 390         * as the buffer is allocated on an aligned boundary.
 391         */
 392        mutex_lock(&gli->mutex);
 393        gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
 394        gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
 395        mutex_unlock(&gli->mutex);
 396
 397out:
 398        kfree(cmd_buf);
 399        kfree(scsi_cmd);
 400        kfree(sense_buf);
 401
 402        dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
 403                __func__, gli->max_lba, gli->blk_len, rc);
 404        return rc;
 405}
 406
 407/**
 408 * get_rhte() - obtains validated resource handle table entry reference
 409 * @ctxi:       Context owning the resource handle.
 410 * @rhndl:      Resource handle associated with entry.
 411 * @lli:        LUN associated with request.
 412 *
 413 * Return: Validated RHTE on success, NULL on failure
 414 */
 415struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
 416                                struct llun_info *lli)
 417{
 418        struct sisl_rht_entry *rhte = NULL;
 419
 420        if (unlikely(!ctxi->rht_start)) {
 421                pr_debug("%s: Context does not have allocated RHT!\n",
 422                         __func__);
 423                goto out;
 424        }
 425
 426        if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
 427                pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
 428                goto out;
 429        }
 430
 431        if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
 432                pr_debug("%s: Bad resource handle LUN! (%d)\n",
 433                         __func__, rhndl);
 434                goto out;
 435        }
 436
 437        rhte = &ctxi->rht_start[rhndl];
 438        if (unlikely(rhte->nmask == 0)) {
 439                pr_debug("%s: Unopened resource handle! (%d)\n",
 440                         __func__, rhndl);
 441                rhte = NULL;
 442                goto out;
 443        }
 444
 445out:
 446        return rhte;
 447}
 448
 449/**
 450 * rhte_checkout() - obtains free/empty resource handle table entry
 451 * @ctxi:       Context owning the resource handle.
 452 * @lli:        LUN associated with request.
 453 *
 454 * Return: Free RHTE on success, NULL on failure
 455 */
 456struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
 457                                     struct llun_info *lli)
 458{
 459        struct sisl_rht_entry *rhte = NULL;
 460        int i;
 461
 462        /* Find a free RHT entry */
 463        for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
 464                if (ctxi->rht_start[i].nmask == 0) {
 465                        rhte = &ctxi->rht_start[i];
 466                        ctxi->rht_out++;
 467                        break;
 468                }
 469
 470        if (likely(rhte))
 471                ctxi->rht_lun[i] = lli;
 472
 473        pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
 474        return rhte;
 475}
 476
 477/**
 478 * rhte_checkin() - releases a resource handle table entry
 479 * @ctxi:       Context owning the resource handle.
 480 * @rhte:       RHTE to release.
 481 */
 482void rhte_checkin(struct ctx_info *ctxi,
 483                  struct sisl_rht_entry *rhte)
 484{
 485        u32 rsrc_handle = rhte - ctxi->rht_start;
 486
 487        rhte->nmask = 0;
 488        rhte->fp = 0;
 489        ctxi->rht_out--;
 490        ctxi->rht_lun[rsrc_handle] = NULL;
 491        ctxi->rht_needs_ws[rsrc_handle] = false;
 492}
 493
 494/**
 495 * rhte_format1() - populates a RHTE for format 1
 496 * @rhte:       RHTE to populate.
 497 * @lun_id:     LUN ID of LUN associated with RHTE.
 498 * @perm:       Desired permissions for RHTE.
 499 * @port_sel:   Port selection mask
 500 */
 501static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
 502                        u32 port_sel)
 503{
 504        /*
 505         * Populate the Format 1 RHT entry for direct access (physical
 506         * LUN) using the synchronization sequence defined in the
 507         * SISLite specification.
 508         */
 509        struct sisl_rht_entry_f1 dummy = { 0 };
 510        struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
 511
 512        memset(rhte_f1, 0, sizeof(*rhte_f1));
 513        rhte_f1->fp = SISL_RHT_FP(1U, 0);
 514        dma_wmb(); /* Make setting of format bit visible */
 515
 516        rhte_f1->lun_id = lun_id;
 517        dma_wmb(); /* Make setting of LUN id visible */
 518
 519        /*
 520         * Use a dummy RHT Format 1 entry to build the second dword
 521         * of the entry that must be populated in a single write when
 522         * enabled (valid bit set to TRUE).
 523         */
 524        dummy.valid = 0x80;
 525        dummy.fp = SISL_RHT_FP(1U, perm);
 526        dummy.port_sel = port_sel;
 527        rhte_f1->dw = dummy.dw;
 528
 529        dma_wmb(); /* Make remaining RHT entry fields visible */
 530}
 531
 532/**
 533 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
 534 * @gli:        LUN to attach.
 535 * @mode:       Desired mode of the LUN.
 536 * @locked:     Mutex status on current thread.
 537 *
 538 * Return: 0 on success, -errno on failure
 539 */
 540int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
 541{
 542        int rc = 0;
 543
 544        if (!locked)
 545                mutex_lock(&gli->mutex);
 546
 547        if (gli->mode == MODE_NONE)
 548                gli->mode = mode;
 549        else if (gli->mode != mode) {
 550                pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
 551                         __func__, gli->mode, mode);
 552                rc = -EINVAL;
 553                goto out;
 554        }
 555
 556        gli->users++;
 557        WARN_ON(gli->users <= 0);
 558out:
 559        pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
 560                 __func__, rc, gli->mode, gli->users);
 561        if (!locked)
 562                mutex_unlock(&gli->mutex);
 563        return rc;
 564}
 565
 566/**
 567 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
 568 * @gli:        LUN to detach.
 569 *
 570 * When resetting the mode, terminate block allocation resources as they
 571 * are no longer required (service is safe to call even when block allocation
 572 * resources were not present - such as when transitioning from physical mode).
 573 * These resources will be reallocated when needed (subsequent transition to
 574 * virtual mode).
 575 */
 576void cxlflash_lun_detach(struct glun_info *gli)
 577{
 578        mutex_lock(&gli->mutex);
 579        WARN_ON(gli->mode == MODE_NONE);
 580        if (--gli->users == 0) {
 581                gli->mode = MODE_NONE;
 582                cxlflash_ba_terminate(&gli->blka.ba_lun);
 583        }
 584        pr_debug("%s: gli->users=%u\n", __func__, gli->users);
 585        WARN_ON(gli->users < 0);
 586        mutex_unlock(&gli->mutex);
 587}
 588
 589/**
 590 * _cxlflash_disk_release() - releases the specified resource entry
 591 * @sdev:       SCSI device associated with LUN.
 592 * @ctxi:       Context owning resources.
 593 * @release:    Release ioctl data structure.
 594 *
 595 * For LUNs in virtual mode, the virtual LUN associated with the specified
 596 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
 597 * AFU sync should _not_ be performed when the context is sitting on the error
 598 * recovery list. A context on the error recovery list is not known to the AFU
 599 * due to reset. When the context is recovered, it will be reattached and made
 600 * known again to the AFU.
 601 *
 602 * Return: 0 on success, -errno on failure
 603 */
 604int _cxlflash_disk_release(struct scsi_device *sdev,
 605                           struct ctx_info *ctxi,
 606                           struct dk_cxlflash_release *release)
 607{
 608        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
 609        struct device *dev = &cfg->dev->dev;
 610        struct llun_info *lli = sdev->hostdata;
 611        struct glun_info *gli = lli->parent;
 612        struct afu *afu = cfg->afu;
 613        bool put_ctx = false;
 614
 615        struct dk_cxlflash_resize size;
 616        res_hndl_t rhndl = release->rsrc_handle;
 617
 618        int rc = 0;
 619        u64 ctxid = DECODE_CTXID(release->context_id),
 620            rctxid = release->context_id;
 621
 622        struct sisl_rht_entry *rhte;
 623        struct sisl_rht_entry_f1 *rhte_f1;
 624
 625        dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
 626                __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
 627
 628        if (!ctxi) {
 629                ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
 630                if (unlikely(!ctxi)) {
 631                        dev_dbg(dev, "%s: Bad context! (%llu)\n",
 632                                __func__, ctxid);
 633                        rc = -EINVAL;
 634                        goto out;
 635                }
 636
 637                put_ctx = true;
 638        }
 639
 640        rhte = get_rhte(ctxi, rhndl, lli);
 641        if (unlikely(!rhte)) {
 642                dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
 643                        __func__, rhndl);
 644                rc = -EINVAL;
 645                goto out;
 646        }
 647
 648        /*
 649         * Resize to 0 for virtual LUNS by setting the size
 650         * to 0. This will clear LXT_START and LXT_CNT fields
 651         * in the RHT entry and properly sync with the AFU.
 652         *
 653         * Afterwards we clear the remaining fields.
 654         */
 655        switch (gli->mode) {
 656        case MODE_VIRTUAL:
 657                marshal_rele_to_resize(release, &size);
 658                size.req_size = 0;
 659                rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
 660                if (rc) {
 661                        dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
 662                        goto out;
 663                }
 664
 665                break;
 666        case MODE_PHYSICAL:
 667                /*
 668                 * Clear the Format 1 RHT entry for direct access
 669                 * (physical LUN) using the synchronization sequence
 670                 * defined in the SISLite specification.
 671                 */
 672                rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
 673
 674                rhte_f1->valid = 0;
 675                dma_wmb(); /* Make revocation of RHT entry visible */
 676
 677                rhte_f1->lun_id = 0;
 678                dma_wmb(); /* Make clearing of LUN id visible */
 679
 680                rhte_f1->dw = 0;
 681                dma_wmb(); /* Make RHT entry bottom-half clearing visible */
 682
 683                if (!ctxi->err_recovery_active)
 684                        cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
 685                break;
 686        default:
 687                WARN(1, "Unsupported LUN mode!");
 688                goto out;
 689        }
 690
 691        rhte_checkin(ctxi, rhte);
 692        cxlflash_lun_detach(gli);
 693
 694out:
 695        if (put_ctx)
 696                put_context(ctxi);
 697        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
 698        return rc;
 699}
 700
 701int cxlflash_disk_release(struct scsi_device *sdev,
 702                          struct dk_cxlflash_release *release)
 703{
 704        return _cxlflash_disk_release(sdev, NULL, release);
 705}
 706
 707/**
 708 * destroy_context() - releases a context
 709 * @cfg:        Internal structure associated with the host.
 710 * @ctxi:       Context to release.
 711 *
 712 * This routine is safe to be called with a a non-initialized context
 713 * and is tolerant of being called with the context's mutex held (it
 714 * will be unlocked if necessary before freeing). Also note that the
 715 * routine conditionally checks for the existence of the context control
 716 * map before clearing the RHT registers and context capabilities because
 717 * it is possible to destroy a context while the context is in the error
 718 * state (previous mapping was removed [so there is no need to worry about
 719 * clearing] and context is waiting for a new mapping).
 720 */
 721static void destroy_context(struct cxlflash_cfg *cfg,
 722                            struct ctx_info *ctxi)
 723{
 724        struct afu *afu = cfg->afu;
 725
 726        if (ctxi->initialized) {
 727                WARN_ON(!list_empty(&ctxi->luns));
 728
 729                /* Clear RHT registers and drop all capabilities for context */
 730                if (afu->afu_map && ctxi->ctrl_map) {
 731                        writeq_be(0, &ctxi->ctrl_map->rht_start);
 732                        writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
 733                        writeq_be(0, &ctxi->ctrl_map->ctx_cap);
 734                }
 735
 736                if (mutex_is_locked(&ctxi->mutex))
 737                        mutex_unlock(&ctxi->mutex);
 738        }
 739
 740        /* Free memory associated with context */
 741        free_page((ulong)ctxi->rht_start);
 742        kfree(ctxi->rht_needs_ws);
 743        kfree(ctxi->rht_lun);
 744        kfree(ctxi);
 745}
 746
 747/**
 748 * create_context() - allocates and initializes a context
 749 * @cfg:        Internal structure associated with the host.
 750 *
 751 * Return: Allocated context on success, NULL on failure
 752 */
 753static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
 754{
 755        struct device *dev = &cfg->dev->dev;
 756        struct ctx_info *ctxi = NULL;
 757        struct llun_info **lli = NULL;
 758        u8 *ws = NULL;
 759        struct sisl_rht_entry *rhte;
 760
 761        ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
 762        lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
 763        ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
 764        if (unlikely(!ctxi || !lli || !ws)) {
 765                dev_err(dev, "%s: Unable to allocate context!\n", __func__);
 766                goto err;
 767        }
 768
 769        rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
 770        if (unlikely(!rhte)) {
 771                dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
 772                goto err;
 773        }
 774
 775        ctxi->rht_lun = lli;
 776        ctxi->rht_needs_ws = ws;
 777        ctxi->rht_start = rhte;
 778out:
 779        return ctxi;
 780
 781err:
 782        kfree(ws);
 783        kfree(lli);
 784        kfree(ctxi);
 785        ctxi = NULL;
 786        goto out;
 787}
 788
 789/**
 790 * init_context() - initializes a previously allocated context
 791 * @ctxi:       Previously allocated context
 792 * @cfg:        Internal structure associated with the host.
 793 * @ctx:        Previously obtained CXL context reference.
 794 * @ctxid:      Previously obtained process element associated with CXL context.
 795 * @adap_fd:    Previously obtained adapter fd associated with CXL context.
 796 * @file:       Previously obtained file associated with CXL context.
 797 * @perms:      User-specified permissions.
 798 *
 799 * Upon return, the context is marked as initialized and the context's mutex
 800 * is locked.
 801 */
 802static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
 803                         struct cxl_context *ctx, int ctxid, int adap_fd,
 804                         struct file *file, u32 perms)
 805{
 806        struct afu *afu = cfg->afu;
 807
 808        ctxi->rht_perms = perms;
 809        ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
 810        ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
 811        ctxi->lfd = adap_fd;
 812        ctxi->pid = current->tgid; /* tgid = pid */
 813        ctxi->ctx = ctx;
 814        ctxi->file = file;
 815        ctxi->initialized = true;
 816        mutex_init(&ctxi->mutex);
 817        INIT_LIST_HEAD(&ctxi->luns);
 818        INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
 819
 820        mutex_lock(&ctxi->mutex);
 821}
 822
 823/**
 824 * _cxlflash_disk_detach() - detaches a LUN from a context
 825 * @sdev:       SCSI device associated with LUN.
 826 * @ctxi:       Context owning resources.
 827 * @detach:     Detach ioctl data structure.
 828 *
 829 * As part of the detach, all per-context resources associated with the LUN
 830 * are cleaned up. When detaching the last LUN for a context, the context
 831 * itself is cleaned up and released.
 832 *
 833 * Return: 0 on success, -errno on failure
 834 */
 835static int _cxlflash_disk_detach(struct scsi_device *sdev,
 836                                 struct ctx_info *ctxi,
 837                                 struct dk_cxlflash_detach *detach)
 838{
 839        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
 840        struct device *dev = &cfg->dev->dev;
 841        struct llun_info *lli = sdev->hostdata;
 842        struct lun_access *lun_access, *t;
 843        struct dk_cxlflash_release rel;
 844        bool put_ctx = false;
 845
 846        int i;
 847        int rc = 0;
 848        int lfd;
 849        u64 ctxid = DECODE_CTXID(detach->context_id),
 850            rctxid = detach->context_id;
 851
 852        dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
 853
 854        if (!ctxi) {
 855                ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
 856                if (unlikely(!ctxi)) {
 857                        dev_dbg(dev, "%s: Bad context! (%llu)\n",
 858                                __func__, ctxid);
 859                        rc = -EINVAL;
 860                        goto out;
 861                }
 862
 863                put_ctx = true;
 864        }
 865
 866        /* Cleanup outstanding resources tied to this LUN */
 867        if (ctxi->rht_out) {
 868                marshal_det_to_rele(detach, &rel);
 869                for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
 870                        if (ctxi->rht_lun[i] == lli) {
 871                                rel.rsrc_handle = i;
 872                                _cxlflash_disk_release(sdev, ctxi, &rel);
 873                        }
 874
 875                        /* No need to loop further if we're done */
 876                        if (ctxi->rht_out == 0)
 877                                break;
 878                }
 879        }
 880
 881        /* Take our LUN out of context, free the node */
 882        list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
 883                if (lun_access->lli == lli) {
 884                        list_del(&lun_access->list);
 885                        kfree(lun_access);
 886                        lun_access = NULL;
 887                        break;
 888                }
 889
 890        /* Tear down context following last LUN cleanup */
 891        if (list_empty(&ctxi->luns)) {
 892                ctxi->unavail = true;
 893                mutex_unlock(&ctxi->mutex);
 894                mutex_lock(&cfg->ctx_tbl_list_mutex);
 895                mutex_lock(&ctxi->mutex);
 896
 897                /* Might not have been in error list so conditionally remove */
 898                if (!list_empty(&ctxi->list))
 899                        list_del(&ctxi->list);
 900                cfg->ctx_tbl[ctxid] = NULL;
 901                mutex_unlock(&cfg->ctx_tbl_list_mutex);
 902                mutex_unlock(&ctxi->mutex);
 903
 904                lfd = ctxi->lfd;
 905                destroy_context(cfg, ctxi);
 906                ctxi = NULL;
 907                put_ctx = false;
 908
 909                /*
 910                 * As a last step, clean up external resources when not
 911                 * already on an external cleanup thread, i.e.: close(adap_fd).
 912                 *
 913                 * NOTE: this will free up the context from the CXL services,
 914                 * allowing it to dole out the same context_id on a future
 915                 * (or even currently in-flight) disk_attach operation.
 916                 */
 917                if (lfd != -1)
 918                        sys_close(lfd);
 919        }
 920
 921        /* Release the sdev reference that bound this LUN to the context */
 922        scsi_device_put(sdev);
 923
 924out:
 925        if (put_ctx)
 926                put_context(ctxi);
 927        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
 928        return rc;
 929}
 930
 931static int cxlflash_disk_detach(struct scsi_device *sdev,
 932                                struct dk_cxlflash_detach *detach)
 933{
 934        return _cxlflash_disk_detach(sdev, NULL, detach);
 935}
 936
 937/**
 938 * cxlflash_cxl_release() - release handler for adapter file descriptor
 939 * @inode:      File-system inode associated with fd.
 940 * @file:       File installed with adapter file descriptor.
 941 *
 942 * This routine is the release handler for the fops registered with
 943 * the CXL services on an initial attach for a context. It is called
 944 * when a close is performed on the adapter file descriptor returned
 945 * to the user. Programmatically, the user is not required to perform
 946 * the close, as it is handled internally via the detach ioctl when
 947 * a context is being removed. Note that nothing prevents the user
 948 * from performing a close, but the user should be aware that doing
 949 * so is considered catastrophic and subsequent usage of the superpipe
 950 * API with previously saved off tokens will fail.
 951 *
 952 * When initiated from an external close (either by the user or via
 953 * a process tear down), the routine derives the context reference
 954 * and calls detach for each LUN associated with the context. The
 955 * final detach operation will cause the context itself to be freed.
 956 * Note that the saved off lfd is reset prior to calling detach to
 957 * signify that the final detach should not perform a close.
 958 *
 959 * When initiated from a detach operation as part of the tear down
 960 * of a context, the context is first completely freed and then the
 961 * close is performed. This routine will fail to derive the context
 962 * reference (due to the context having already been freed) and then
 963 * call into the CXL release entry point.
 964 *
 965 * Thus, with exception to when the CXL process element (context id)
 966 * lookup fails (a case that should theoretically never occur), every
 967 * call into this routine results in a complete freeing of a context.
 968 *
 969 * As part of the detach, all per-context resources associated with the LUN
 970 * are cleaned up. When detaching the last LUN for a context, the context
 971 * itself is cleaned up and released.
 972 *
 973 * Return: 0 on success
 974 */
 975static int cxlflash_cxl_release(struct inode *inode, struct file *file)
 976{
 977        struct cxl_context *ctx = cxl_fops_get_context(file);
 978        struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
 979                                                cxl_fops);
 980        struct device *dev = &cfg->dev->dev;
 981        struct ctx_info *ctxi = NULL;
 982        struct dk_cxlflash_detach detach = { { 0 }, 0 };
 983        struct lun_access *lun_access, *t;
 984        enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
 985        int ctxid;
 986
 987        ctxid = cxl_process_element(ctx);
 988        if (unlikely(ctxid < 0)) {
 989                dev_err(dev, "%s: Context %p was closed! (%d)\n",
 990                        __func__, ctx, ctxid);
 991                goto out;
 992        }
 993
 994        ctxi = get_context(cfg, ctxid, file, ctrl);
 995        if (unlikely(!ctxi)) {
 996                ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
 997                if (!ctxi) {
 998                        dev_dbg(dev, "%s: Context %d already free!\n",
 999                                __func__, ctxid);
1000                        goto out_release;
1001                }
1002
1003                dev_dbg(dev, "%s: Another process owns context %d!\n",
1004                        __func__, ctxid);
1005                put_context(ctxi);
1006                goto out;
1007        }
1008
1009        dev_dbg(dev, "%s: close(%d) for context %d\n",
1010                __func__, ctxi->lfd, ctxid);
1011
1012        /* Reset the file descriptor to indicate we're on a close() thread */
1013        ctxi->lfd = -1;
1014        detach.context_id = ctxi->ctxid;
1015        list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1016                _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1017out_release:
1018        cxl_fd_release(inode, file);
1019out:
1020        dev_dbg(dev, "%s: returning\n", __func__);
1021        return 0;
1022}
1023
1024/**
1025 * unmap_context() - clears a previously established mapping
1026 * @ctxi:       Context owning the mapping.
1027 *
1028 * This routine is used to switch between the error notification page
1029 * (dummy page of all 1's) and the real mapping (established by the CXL
1030 * fault handler).
1031 */
1032static void unmap_context(struct ctx_info *ctxi)
1033{
1034        unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1035}
1036
1037/**
1038 * get_err_page() - obtains and allocates the error notification page
1039 *
1040 * Return: error notification page on success, NULL on failure
1041 */
1042static struct page *get_err_page(void)
1043{
1044        struct page *err_page = global.err_page;
1045
1046        if (unlikely(!err_page)) {
1047                err_page = alloc_page(GFP_KERNEL);
1048                if (unlikely(!err_page)) {
1049                        pr_err("%s: Unable to allocate err_page!\n", __func__);
1050                        goto out;
1051                }
1052
1053                memset(page_address(err_page), -1, PAGE_SIZE);
1054
1055                /* Serialize update w/ other threads to avoid a leak */
1056                mutex_lock(&global.mutex);
1057                if (likely(!global.err_page))
1058                        global.err_page = err_page;
1059                else {
1060                        __free_page(err_page);
1061                        err_page = global.err_page;
1062                }
1063                mutex_unlock(&global.mutex);
1064        }
1065
1066out:
1067        pr_debug("%s: returning err_page=%p\n", __func__, err_page);
1068        return err_page;
1069}
1070
1071/**
1072 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1073 * @vma:        VM area associated with mapping.
1074 * @vmf:        VM fault associated with current fault.
1075 *
1076 * To support error notification via MMIO, faults are 'caught' by this routine
1077 * that was inserted before passing back the adapter file descriptor on attach.
1078 * When a fault occurs, this routine evaluates if error recovery is active and
1079 * if so, installs the error page to 'notify' the user about the error state.
1080 * During normal operation, the fault is simply handled by the original fault
1081 * handler that was installed by CXL services as part of initializing the
1082 * adapter file descriptor. The VMA's page protection bits are toggled to
1083 * indicate cached/not-cached depending on the memory backing the fault.
1084 *
1085 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1086 */
1087static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1088{
1089        struct file *file = vma->vm_file;
1090        struct cxl_context *ctx = cxl_fops_get_context(file);
1091        struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1092                                                cxl_fops);
1093        struct device *dev = &cfg->dev->dev;
1094        struct ctx_info *ctxi = NULL;
1095        struct page *err_page = NULL;
1096        enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1097        int rc = 0;
1098        int ctxid;
1099
1100        ctxid = cxl_process_element(ctx);
1101        if (unlikely(ctxid < 0)) {
1102                dev_err(dev, "%s: Context %p was closed! (%d)\n",
1103                        __func__, ctx, ctxid);
1104                goto err;
1105        }
1106
1107        ctxi = get_context(cfg, ctxid, file, ctrl);
1108        if (unlikely(!ctxi)) {
1109                dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1110                goto err;
1111        }
1112
1113        dev_dbg(dev, "%s: fault(%d) for context %d\n",
1114                __func__, ctxi->lfd, ctxid);
1115
1116        if (likely(!ctxi->err_recovery_active)) {
1117                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1118                rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
1119        } else {
1120                dev_dbg(dev, "%s: err recovery active, use err_page!\n",
1121                        __func__);
1122
1123                err_page = get_err_page();
1124                if (unlikely(!err_page)) {
1125                        dev_err(dev, "%s: Could not obtain error page!\n",
1126                                __func__);
1127                        rc = VM_FAULT_RETRY;
1128                        goto out;
1129                }
1130
1131                get_page(err_page);
1132                vmf->page = err_page;
1133                vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1134        }
1135
1136out:
1137        if (likely(ctxi))
1138                put_context(ctxi);
1139        dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1140        return rc;
1141
1142err:
1143        rc = VM_FAULT_SIGBUS;
1144        goto out;
1145}
1146
1147/*
1148 * Local MMAP vmops to 'catch' faults
1149 */
1150static const struct vm_operations_struct cxlflash_mmap_vmops = {
1151        .fault = cxlflash_mmap_fault,
1152};
1153
1154/**
1155 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1156 * @file:       File installed with adapter file descriptor.
1157 * @vma:        VM area associated with mapping.
1158 *
1159 * Installs local mmap vmops to 'catch' faults for error notification support.
1160 *
1161 * Return: 0 on success, -errno on failure
1162 */
1163static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1164{
1165        struct cxl_context *ctx = cxl_fops_get_context(file);
1166        struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1167                                                cxl_fops);
1168        struct device *dev = &cfg->dev->dev;
1169        struct ctx_info *ctxi = NULL;
1170        enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1171        int ctxid;
1172        int rc = 0;
1173
1174        ctxid = cxl_process_element(ctx);
1175        if (unlikely(ctxid < 0)) {
1176                dev_err(dev, "%s: Context %p was closed! (%d)\n",
1177                        __func__, ctx, ctxid);
1178                rc = -EIO;
1179                goto out;
1180        }
1181
1182        ctxi = get_context(cfg, ctxid, file, ctrl);
1183        if (unlikely(!ctxi)) {
1184                dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1185                rc = -EIO;
1186                goto out;
1187        }
1188
1189        dev_dbg(dev, "%s: mmap(%d) for context %d\n",
1190                __func__, ctxi->lfd, ctxid);
1191
1192        rc = cxl_fd_mmap(file, vma);
1193        if (likely(!rc)) {
1194                /* Insert ourself in the mmap fault handler path */
1195                ctxi->cxl_mmap_vmops = vma->vm_ops;
1196                vma->vm_ops = &cxlflash_mmap_vmops;
1197        }
1198
1199out:
1200        if (likely(ctxi))
1201                put_context(ctxi);
1202        return rc;
1203}
1204
1205const struct file_operations cxlflash_cxl_fops = {
1206        .owner = THIS_MODULE,
1207        .mmap = cxlflash_cxl_mmap,
1208        .release = cxlflash_cxl_release,
1209};
1210
1211/**
1212 * cxlflash_mark_contexts_error() - move contexts to error state and list
1213 * @cfg:        Internal structure associated with the host.
1214 *
1215 * A context is only moved over to the error list when there are no outstanding
1216 * references to it. This ensures that a running operation has completed.
1217 *
1218 * Return: 0 on success, -errno on failure
1219 */
1220int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1221{
1222        int i, rc = 0;
1223        struct ctx_info *ctxi = NULL;
1224
1225        mutex_lock(&cfg->ctx_tbl_list_mutex);
1226
1227        for (i = 0; i < MAX_CONTEXT; i++) {
1228                ctxi = cfg->ctx_tbl[i];
1229                if (ctxi) {
1230                        mutex_lock(&ctxi->mutex);
1231                        cfg->ctx_tbl[i] = NULL;
1232                        list_add(&ctxi->list, &cfg->ctx_err_recovery);
1233                        ctxi->err_recovery_active = true;
1234                        ctxi->ctrl_map = NULL;
1235                        unmap_context(ctxi);
1236                        mutex_unlock(&ctxi->mutex);
1237                }
1238        }
1239
1240        mutex_unlock(&cfg->ctx_tbl_list_mutex);
1241        return rc;
1242}
1243
1244/*
1245 * Dummy NULL fops
1246 */
1247static const struct file_operations null_fops = {
1248        .owner = THIS_MODULE,
1249};
1250
1251/**
1252 * check_state() - checks and responds to the current adapter state
1253 * @cfg:        Internal structure associated with the host.
1254 *
1255 * This routine can block and should only be used on process context.
1256 * It assumes that the caller is an ioctl thread and holding the ioctl
1257 * read semaphore. This is temporarily let up across the wait to allow
1258 * for draining actively running ioctls. Also note that when waking up
1259 * from waiting in reset, the state is unknown and must be checked again
1260 * before proceeding.
1261 *
1262 * Return: 0 on success, -errno on failure
1263 */
1264int check_state(struct cxlflash_cfg *cfg)
1265{
1266        struct device *dev = &cfg->dev->dev;
1267        int rc = 0;
1268
1269retry:
1270        switch (cfg->state) {
1271        case STATE_RESET:
1272                dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1273                up_read(&cfg->ioctl_rwsem);
1274                rc = wait_event_interruptible(cfg->reset_waitq,
1275                                              cfg->state != STATE_RESET);
1276                down_read(&cfg->ioctl_rwsem);
1277                if (unlikely(rc))
1278                        break;
1279                goto retry;
1280        case STATE_FAILTERM:
1281                dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
1282                rc = -ENODEV;
1283                break;
1284        default:
1285                break;
1286        }
1287
1288        return rc;
1289}
1290
1291/**
1292 * cxlflash_disk_attach() - attach a LUN to a context
1293 * @sdev:       SCSI device associated with LUN.
1294 * @attach:     Attach ioctl data structure.
1295 *
1296 * Creates a context and attaches LUN to it. A LUN can only be attached
1297 * one time to a context (subsequent attaches for the same context/LUN pair
1298 * are not supported). Additional LUNs can be attached to a context by
1299 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1300 *
1301 * Return: 0 on success, -errno on failure
1302 */
1303static int cxlflash_disk_attach(struct scsi_device *sdev,
1304                                struct dk_cxlflash_attach *attach)
1305{
1306        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1307        struct device *dev = &cfg->dev->dev;
1308        struct afu *afu = cfg->afu;
1309        struct llun_info *lli = sdev->hostdata;
1310        struct glun_info *gli = lli->parent;
1311        struct cxl_ioctl_start_work *work;
1312        struct ctx_info *ctxi = NULL;
1313        struct lun_access *lun_access = NULL;
1314        int rc = 0;
1315        u32 perms;
1316        int ctxid = -1;
1317        u64 rctxid = 0UL;
1318        struct file *file = NULL;
1319
1320        struct cxl_context *ctx = NULL;
1321
1322        int fd = -1;
1323
1324        if (attach->num_interrupts > 4) {
1325                dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1326                        __func__, attach->num_interrupts);
1327                rc = -EINVAL;
1328                goto out;
1329        }
1330
1331        if (gli->max_lba == 0) {
1332                dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
1333                        __func__, lli->lun_id[sdev->channel]);
1334                rc = read_cap16(sdev, lli);
1335                if (rc) {
1336                        dev_err(dev, "%s: Invalid device! (%d)\n",
1337                                __func__, rc);
1338                        rc = -ENODEV;
1339                        goto out;
1340                }
1341                dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
1342                dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
1343        }
1344
1345        if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1346                rctxid = attach->context_id;
1347                ctxi = get_context(cfg, rctxid, NULL, 0);
1348                if (!ctxi) {
1349                        dev_dbg(dev, "%s: Bad context! (%016llX)\n",
1350                                __func__, rctxid);
1351                        rc = -EINVAL;
1352                        goto out;
1353                }
1354
1355                list_for_each_entry(lun_access, &ctxi->luns, list)
1356                        if (lun_access->lli == lli) {
1357                                dev_dbg(dev, "%s: Already attached!\n",
1358                                        __func__);
1359                                rc = -EINVAL;
1360                                goto out;
1361                        }
1362        }
1363
1364        rc = scsi_device_get(sdev);
1365        if (unlikely(rc)) {
1366                dev_err(dev, "%s: Unable to get sdev reference!\n", __func__);
1367                goto out;
1368        }
1369
1370        lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1371        if (unlikely(!lun_access)) {
1372                dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
1373                rc = -ENOMEM;
1374                goto err;
1375        }
1376
1377        lun_access->lli = lli;
1378        lun_access->sdev = sdev;
1379
1380        /* Non-NULL context indicates reuse */
1381        if (ctxi) {
1382                dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
1383                        __func__, rctxid);
1384                list_add(&lun_access->list, &ctxi->luns);
1385                fd = ctxi->lfd;
1386                goto out_attach;
1387        }
1388
1389        ctxi = create_context(cfg);
1390        if (unlikely(!ctxi)) {
1391                dev_err(dev, "%s: Failed to create context! (%d)\n",
1392                        __func__, ctxid);
1393                goto err;
1394        }
1395
1396        ctx = cxl_dev_context_init(cfg->dev);
1397        if (IS_ERR_OR_NULL(ctx)) {
1398                dev_err(dev, "%s: Could not initialize context %p\n",
1399                        __func__, ctx);
1400                rc = -ENODEV;
1401                goto err;
1402        }
1403
1404        work = &ctxi->work;
1405        work->num_interrupts = attach->num_interrupts;
1406        work->flags = CXL_START_WORK_NUM_IRQS;
1407
1408        rc = cxl_start_work(ctx, work);
1409        if (unlikely(rc)) {
1410                dev_dbg(dev, "%s: Could not start context rc=%d\n",
1411                        __func__, rc);
1412                goto err;
1413        }
1414
1415        ctxid = cxl_process_element(ctx);
1416        if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1417                dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1418                rc = -EPERM;
1419                goto err;
1420        }
1421
1422        file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1423        if (unlikely(fd < 0)) {
1424                rc = -ENODEV;
1425                dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1426                goto err;
1427        }
1428
1429        /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1430        perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1431
1432        /* Context mutex is locked upon return */
1433        init_context(ctxi, cfg, ctx, ctxid, fd, file, perms);
1434
1435        rc = afu_attach(cfg, ctxi);
1436        if (unlikely(rc)) {
1437                dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1438                goto err;
1439        }
1440
1441        /*
1442         * No error paths after this point. Once the fd is installed it's
1443         * visible to user space and can't be undone safely on this thread.
1444         * There is no need to worry about a deadlock here because no one
1445         * knows about us yet; we can be the only one holding our mutex.
1446         */
1447        list_add(&lun_access->list, &ctxi->luns);
1448        mutex_unlock(&ctxi->mutex);
1449        mutex_lock(&cfg->ctx_tbl_list_mutex);
1450        mutex_lock(&ctxi->mutex);
1451        cfg->ctx_tbl[ctxid] = ctxi;
1452        mutex_unlock(&cfg->ctx_tbl_list_mutex);
1453        fd_install(fd, file);
1454
1455out_attach:
1456        attach->hdr.return_flags = 0;
1457        attach->context_id = ctxi->ctxid;
1458        attach->block_size = gli->blk_len;
1459        attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1460        attach->last_lba = gli->max_lba;
1461        attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1462        attach->max_xfer /= gli->blk_len;
1463
1464out:
1465        attach->adap_fd = fd;
1466
1467        if (ctxi)
1468                put_context(ctxi);
1469
1470        dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1471                __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1472        return rc;
1473
1474err:
1475        /* Cleanup CXL context; okay to 'stop' even if it was not started */
1476        if (!IS_ERR_OR_NULL(ctx)) {
1477                cxl_stop_context(ctx);
1478                cxl_release_context(ctx);
1479                ctx = NULL;
1480        }
1481
1482        /*
1483         * Here, we're overriding the fops with a dummy all-NULL fops because
1484         * fput() calls the release fop, which will cause us to mistakenly
1485         * call into the CXL code. Rather than try to add yet more complexity
1486         * to that routine (cxlflash_cxl_release) we should try to fix the
1487         * issue here.
1488         */
1489        if (fd > 0) {
1490                file->f_op = &null_fops;
1491                fput(file);
1492                put_unused_fd(fd);
1493                fd = -1;
1494                file = NULL;
1495        }
1496
1497        /* Cleanup our context; safe to call even with mutex locked */
1498        if (ctxi) {
1499                destroy_context(cfg, ctxi);
1500                ctxi = NULL;
1501        }
1502
1503        kfree(lun_access);
1504        scsi_device_put(sdev);
1505        goto out;
1506}
1507
1508/**
1509 * recover_context() - recovers a context in error
1510 * @cfg:        Internal structure associated with the host.
1511 * @ctxi:       Context to release.
1512 *
1513 * Restablishes the state for a context-in-error.
1514 *
1515 * Return: 0 on success, -errno on failure
1516 */
1517static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
1518{
1519        struct device *dev = &cfg->dev->dev;
1520        int rc = 0;
1521        int old_fd, fd = -1;
1522        int ctxid = -1;
1523        struct file *file;
1524        struct cxl_context *ctx;
1525        struct afu *afu = cfg->afu;
1526
1527        ctx = cxl_dev_context_init(cfg->dev);
1528        if (IS_ERR_OR_NULL(ctx)) {
1529                dev_err(dev, "%s: Could not initialize context %p\n",
1530                        __func__, ctx);
1531                rc = -ENODEV;
1532                goto out;
1533        }
1534
1535        rc = cxl_start_work(ctx, &ctxi->work);
1536        if (unlikely(rc)) {
1537                dev_dbg(dev, "%s: Could not start context rc=%d\n",
1538                        __func__, rc);
1539                goto err1;
1540        }
1541
1542        ctxid = cxl_process_element(ctx);
1543        if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1544                dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1545                rc = -EPERM;
1546                goto err2;
1547        }
1548
1549        file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1550        if (unlikely(fd < 0)) {
1551                rc = -ENODEV;
1552                dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1553                goto err2;
1554        }
1555
1556        /* Update with new MMIO area based on updated context id */
1557        ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1558
1559        rc = afu_attach(cfg, ctxi);
1560        if (rc) {
1561                dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1562                goto err3;
1563        }
1564
1565        /*
1566         * No error paths after this point. Once the fd is installed it's
1567         * visible to user space and can't be undone safely on this thread.
1568         */
1569        old_fd = ctxi->lfd;
1570        ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1571        ctxi->lfd = fd;
1572        ctxi->ctx = ctx;
1573        ctxi->file = file;
1574
1575        /*
1576         * Put context back in table (note the reinit of the context list);
1577         * we must first drop the context's mutex and then acquire it in
1578         * order with the table/list mutex to avoid a deadlock - safe to do
1579         * here because no one can find us at this moment in time.
1580         */
1581        mutex_unlock(&ctxi->mutex);
1582        mutex_lock(&cfg->ctx_tbl_list_mutex);
1583        mutex_lock(&ctxi->mutex);
1584        list_del_init(&ctxi->list);
1585        cfg->ctx_tbl[ctxid] = ctxi;
1586        mutex_unlock(&cfg->ctx_tbl_list_mutex);
1587        fd_install(fd, file);
1588
1589        /* Release the original adapter fd and associated CXL resources */
1590        sys_close(old_fd);
1591out:
1592        dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1593                __func__, ctxid, fd, rc);
1594        return rc;
1595
1596err3:
1597        fput(file);
1598        put_unused_fd(fd);
1599err2:
1600        cxl_stop_context(ctx);
1601err1:
1602        cxl_release_context(ctx);
1603        goto out;
1604}
1605
1606/**
1607 * cxlflash_afu_recover() - initiates AFU recovery
1608 * @sdev:       SCSI device associated with LUN.
1609 * @recover:    Recover ioctl data structure.
1610 *
1611 * Only a single recovery is allowed at a time to avoid exhausting CXL
1612 * resources (leading to recovery failure) in the event that we're up
1613 * against the maximum number of contexts limit. For similar reasons,
1614 * a context recovery is retried if there are multiple recoveries taking
1615 * place at the same time and the failure was due to CXL services being
1616 * unable to keep up.
1617 *
1618 * As this routine is called on ioctl context, it holds the ioctl r/w
1619 * semaphore that is used to drain ioctls in recovery scenarios. The
1620 * implementation to achieve the pacing described above (a local mutex)
1621 * requires that the ioctl r/w semaphore be dropped and reacquired to
1622 * avoid a 3-way deadlock when multiple process recoveries operate in
1623 * parallel.
1624 *
1625 * Because a user can detect an error condition before the kernel, it is
1626 * quite possible for this routine to act as the kernel's EEH detection
1627 * source (MMIO read of mbox_r). Because of this, there is a window of
1628 * time where an EEH might have been detected but not yet 'serviced'
1629 * (callback invoked, causing the device to enter reset state). To avoid
1630 * looping in this routine during that window, a 1 second sleep is in place
1631 * between the time the MMIO failure is detected and the time a wait on the
1632 * reset wait queue is attempted via check_state().
1633 *
1634 * Return: 0 on success, -errno on failure
1635 */
1636static int cxlflash_afu_recover(struct scsi_device *sdev,
1637                                struct dk_cxlflash_recover_afu *recover)
1638{
1639        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1640        struct device *dev = &cfg->dev->dev;
1641        struct llun_info *lli = sdev->hostdata;
1642        struct afu *afu = cfg->afu;
1643        struct ctx_info *ctxi = NULL;
1644        struct mutex *mutex = &cfg->ctx_recovery_mutex;
1645        u64 ctxid = DECODE_CTXID(recover->context_id),
1646            rctxid = recover->context_id;
1647        long reg;
1648        int lretry = 20; /* up to 2 seconds */
1649        int rc = 0;
1650
1651        atomic_inc(&cfg->recovery_threads);
1652        up_read(&cfg->ioctl_rwsem);
1653        rc = mutex_lock_interruptible(mutex);
1654        down_read(&cfg->ioctl_rwsem);
1655        if (rc)
1656                goto out;
1657        rc = check_state(cfg);
1658        if (rc) {
1659                dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
1660                rc = -ENODEV;
1661                goto out;
1662        }
1663
1664        dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
1665                __func__, recover->reason, rctxid);
1666
1667retry:
1668        /* Ensure that this process is attached to the context */
1669        ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1670        if (unlikely(!ctxi)) {
1671                dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1672                rc = -EINVAL;
1673                goto out;
1674        }
1675
1676        if (ctxi->err_recovery_active) {
1677retry_recover:
1678                rc = recover_context(cfg, ctxi);
1679                if (unlikely(rc)) {
1680                        dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
1681                                __func__, ctxid, rc);
1682                        if ((rc == -ENODEV) &&
1683                            ((atomic_read(&cfg->recovery_threads) > 1) ||
1684                             (lretry--))) {
1685                                dev_dbg(dev, "%s: Going to try again!\n",
1686                                        __func__);
1687                                mutex_unlock(mutex);
1688                                msleep(100);
1689                                rc = mutex_lock_interruptible(mutex);
1690                                if (rc)
1691                                        goto out;
1692                                goto retry_recover;
1693                        }
1694
1695                        goto out;
1696                }
1697
1698                ctxi->err_recovery_active = false;
1699                recover->context_id = ctxi->ctxid;
1700                recover->adap_fd = ctxi->lfd;
1701                recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1702                recover->hdr.return_flags |=
1703                        DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1704                goto out;
1705        }
1706
1707        /* Test if in error state */
1708        reg = readq_be(&afu->ctrl_map->mbox_r);
1709        if (reg == -1) {
1710                dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1711
1712                /*
1713                 * Before checking the state, put back the context obtained with
1714                 * get_context() as it is no longer needed and sleep for a short
1715                 * period of time (see prolog notes).
1716                 */
1717                put_context(ctxi);
1718                ctxi = NULL;
1719                ssleep(1);
1720                rc = check_state(cfg);
1721                if (unlikely(rc))
1722                        goto out;
1723                goto retry;
1724        }
1725
1726        dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
1727out:
1728        if (likely(ctxi))
1729                put_context(ctxi);
1730        mutex_unlock(mutex);
1731        atomic_dec_if_positive(&cfg->recovery_threads);
1732        return rc;
1733}
1734
1735/**
1736 * process_sense() - evaluates and processes sense data
1737 * @sdev:       SCSI device associated with LUN.
1738 * @verify:     Verify ioctl data structure.
1739 *
1740 * Return: 0 on success, -errno on failure
1741 */
1742static int process_sense(struct scsi_device *sdev,
1743                         struct dk_cxlflash_verify *verify)
1744{
1745        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1746        struct device *dev = &cfg->dev->dev;
1747        struct llun_info *lli = sdev->hostdata;
1748        struct glun_info *gli = lli->parent;
1749        u64 prev_lba = gli->max_lba;
1750        struct scsi_sense_hdr sshdr = { 0 };
1751        int rc = 0;
1752
1753        rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1754                                  DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1755        if (!rc) {
1756                dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
1757                rc = -EINVAL;
1758                goto out;
1759        }
1760
1761        switch (sshdr.sense_key) {
1762        case NO_SENSE:
1763        case RECOVERED_ERROR:
1764                /* fall through */
1765        case NOT_READY:
1766                break;
1767        case UNIT_ATTENTION:
1768                switch (sshdr.asc) {
1769                case 0x29: /* Power on Reset or Device Reset */
1770                        /* fall through */
1771                case 0x2A: /* Device settings/capacity changed */
1772                        rc = read_cap16(sdev, lli);
1773                        if (rc) {
1774                                rc = -ENODEV;
1775                                break;
1776                        }
1777                        if (prev_lba != gli->max_lba)
1778                                dev_dbg(dev, "%s: Capacity changed old=%lld "
1779                                        "new=%lld\n", __func__, prev_lba,
1780                                        gli->max_lba);
1781                        break;
1782                case 0x3F: /* Report LUNs changed, Rescan. */
1783                        scsi_scan_host(cfg->host);
1784                        break;
1785                default:
1786                        rc = -EIO;
1787                        break;
1788                }
1789                break;
1790        default:
1791                rc = -EIO;
1792                break;
1793        }
1794out:
1795        dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1796                sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1797        return rc;
1798}
1799
1800/**
1801 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1802 * @sdev:       SCSI device associated with LUN.
1803 * @verify:     Verify ioctl data structure.
1804 *
1805 * Return: 0 on success, -errno on failure
1806 */
1807static int cxlflash_disk_verify(struct scsi_device *sdev,
1808                                struct dk_cxlflash_verify *verify)
1809{
1810        int rc = 0;
1811        struct ctx_info *ctxi = NULL;
1812        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1813        struct device *dev = &cfg->dev->dev;
1814        struct llun_info *lli = sdev->hostdata;
1815        struct glun_info *gli = lli->parent;
1816        struct sisl_rht_entry *rhte = NULL;
1817        res_hndl_t rhndl = verify->rsrc_handle;
1818        u64 ctxid = DECODE_CTXID(verify->context_id),
1819            rctxid = verify->context_id;
1820        u64 last_lba = 0;
1821
1822        dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
1823                "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
1824                verify->hint, verify->hdr.flags);
1825
1826        ctxi = get_context(cfg, rctxid, lli, 0);
1827        if (unlikely(!ctxi)) {
1828                dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1829                rc = -EINVAL;
1830                goto out;
1831        }
1832
1833        rhte = get_rhte(ctxi, rhndl, lli);
1834        if (unlikely(!rhte)) {
1835                dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
1836                        __func__, rhndl);
1837                rc = -EINVAL;
1838                goto out;
1839        }
1840
1841        /*
1842         * Look at the hint/sense to see if it requires us to redrive
1843         * inquiry (i.e. the Unit attention is due to the WWN changing).
1844         */
1845        if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1846                /* Can't hold mutex across process_sense/read_cap16,
1847                 * since we could have an intervening EEH event.
1848                 */
1849                ctxi->unavail = true;
1850                mutex_unlock(&ctxi->mutex);
1851                rc = process_sense(sdev, verify);
1852                if (unlikely(rc)) {
1853                        dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1854                                __func__, rc);
1855                        mutex_lock(&ctxi->mutex);
1856                        ctxi->unavail = false;
1857                        goto out;
1858                }
1859                mutex_lock(&ctxi->mutex);
1860                ctxi->unavail = false;
1861        }
1862
1863        switch (gli->mode) {
1864        case MODE_PHYSICAL:
1865                last_lba = gli->max_lba;
1866                break;
1867        case MODE_VIRTUAL:
1868                /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1869                last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1870                last_lba /= CXLFLASH_BLOCK_SIZE;
1871                last_lba--;
1872                break;
1873        default:
1874                WARN(1, "Unsupported LUN mode!");
1875        }
1876
1877        verify->last_lba = last_lba;
1878
1879out:
1880        if (likely(ctxi))
1881                put_context(ctxi);
1882        dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
1883                __func__, rc, verify->last_lba);
1884        return rc;
1885}
1886
1887/**
1888 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1889 * @cmd:        The ioctl command to decode.
1890 *
1891 * Return: A string identifying the decoded ioctl.
1892 */
1893static char *decode_ioctl(int cmd)
1894{
1895        switch (cmd) {
1896        case DK_CXLFLASH_ATTACH:
1897                return __stringify_1(DK_CXLFLASH_ATTACH);
1898        case DK_CXLFLASH_USER_DIRECT:
1899                return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1900        case DK_CXLFLASH_USER_VIRTUAL:
1901                return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1902        case DK_CXLFLASH_VLUN_RESIZE:
1903                return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1904        case DK_CXLFLASH_RELEASE:
1905                return __stringify_1(DK_CXLFLASH_RELEASE);
1906        case DK_CXLFLASH_DETACH:
1907                return __stringify_1(DK_CXLFLASH_DETACH);
1908        case DK_CXLFLASH_VERIFY:
1909                return __stringify_1(DK_CXLFLASH_VERIFY);
1910        case DK_CXLFLASH_VLUN_CLONE:
1911                return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1912        case DK_CXLFLASH_RECOVER_AFU:
1913                return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1914        case DK_CXLFLASH_MANAGE_LUN:
1915                return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1916        }
1917
1918        return "UNKNOWN";
1919}
1920
1921/**
1922 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1923 * @sdev:       SCSI device associated with LUN.
1924 * @arg:        UDirect ioctl data structure.
1925 *
1926 * On successful return, the user is informed of the resource handle
1927 * to be used to identify the direct lun and the size (in blocks) of
1928 * the direct lun in last LBA format.
1929 *
1930 * Return: 0 on success, -errno on failure
1931 */
1932static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1933{
1934        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1935        struct device *dev = &cfg->dev->dev;
1936        struct afu *afu = cfg->afu;
1937        struct llun_info *lli = sdev->hostdata;
1938        struct glun_info *gli = lli->parent;
1939
1940        struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1941
1942        u64 ctxid = DECODE_CTXID(pphys->context_id),
1943            rctxid = pphys->context_id;
1944        u64 lun_size = 0;
1945        u64 last_lba = 0;
1946        u64 rsrc_handle = -1;
1947        u32 port = CHAN2PORT(sdev->channel);
1948
1949        int rc = 0;
1950
1951        struct ctx_info *ctxi = NULL;
1952        struct sisl_rht_entry *rhte = NULL;
1953
1954        pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
1955
1956        rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1957        if (unlikely(rc)) {
1958                dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
1959                        __func__);
1960                goto out;
1961        }
1962
1963        ctxi = get_context(cfg, rctxid, lli, 0);
1964        if (unlikely(!ctxi)) {
1965                dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1966                rc = -EINVAL;
1967                goto err1;
1968        }
1969
1970        rhte = rhte_checkout(ctxi, lli);
1971        if (unlikely(!rhte)) {
1972                dev_dbg(dev, "%s: too many opens for this context\n", __func__);
1973                rc = -EMFILE;   /* too many opens  */
1974                goto err1;
1975        }
1976
1977        rsrc_handle = (rhte - ctxi->rht_start);
1978
1979        rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
1980        cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
1981
1982        last_lba = gli->max_lba;
1983        pphys->hdr.return_flags = 0;
1984        pphys->last_lba = last_lba;
1985        pphys->rsrc_handle = rsrc_handle;
1986
1987out:
1988        if (likely(ctxi))
1989                put_context(ctxi);
1990        dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
1991                __func__, rsrc_handle, rc, last_lba);
1992        return rc;
1993
1994err1:
1995        cxlflash_lun_detach(gli);
1996        goto out;
1997}
1998
1999/**
2000 * ioctl_common() - common IOCTL handler for driver
2001 * @sdev:       SCSI device associated with LUN.
2002 * @cmd:        IOCTL command.
2003 *
2004 * Handles common fencing operations that are valid for multiple ioctls. Always
2005 * allow through ioctls that are cleanup oriented in nature, even when operating
2006 * in a failed/terminating state.
2007 *
2008 * Return: 0 on success, -errno on failure
2009 */
2010static int ioctl_common(struct scsi_device *sdev, int cmd)
2011{
2012        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
2013        struct device *dev = &cfg->dev->dev;
2014        struct llun_info *lli = sdev->hostdata;
2015        int rc = 0;
2016
2017        if (unlikely(!lli)) {
2018                dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2019                rc = -EINVAL;
2020                goto out;
2021        }
2022
2023        rc = check_state(cfg);
2024        if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2025                switch (cmd) {
2026                case DK_CXLFLASH_VLUN_RESIZE:
2027                case DK_CXLFLASH_RELEASE:
2028                case DK_CXLFLASH_DETACH:
2029                        dev_dbg(dev, "%s: Command override! (%d)\n",
2030                                __func__, rc);
2031                        rc = 0;
2032                        break;
2033                }
2034        }
2035out:
2036        return rc;
2037}
2038
2039/**
2040 * cxlflash_ioctl() - IOCTL handler for driver
2041 * @sdev:       SCSI device associated with LUN.
2042 * @cmd:        IOCTL command.
2043 * @arg:        Userspace ioctl data structure.
2044 *
2045 * A read/write semaphore is used to implement a 'drain' of currently
2046 * running ioctls. The read semaphore is taken at the beginning of each
2047 * ioctl thread and released upon concluding execution. Additionally the
2048 * semaphore should be released and then reacquired in any ioctl execution
2049 * path which will wait for an event to occur that is outside the scope of
2050 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
2051 * a thread simply needs to acquire the write semaphore.
2052 *
2053 * Return: 0 on success, -errno on failure
2054 */
2055int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2056{
2057        typedef int (*sioctl) (struct scsi_device *, void *);
2058
2059        struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
2060        struct device *dev = &cfg->dev->dev;
2061        struct afu *afu = cfg->afu;
2062        struct dk_cxlflash_hdr *hdr;
2063        char buf[sizeof(union cxlflash_ioctls)];
2064        size_t size = 0;
2065        bool known_ioctl = false;
2066        int idx;
2067        int rc = 0;
2068        struct Scsi_Host *shost = sdev->host;
2069        sioctl do_ioctl = NULL;
2070
2071        static const struct {
2072                size_t size;
2073                sioctl ioctl;
2074        } ioctl_tbl[] = {       /* NOTE: order matters here */
2075        {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2076        {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2077        {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2078        {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2079        {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2080        {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2081        {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2082        {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2083        {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2084        {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2085        };
2086
2087        /* Hold read semaphore so we can drain if needed */
2088        down_read(&cfg->ioctl_rwsem);
2089
2090        /* Restrict command set to physical support only for internal LUN */
2091        if (afu->internal_lun)
2092                switch (cmd) {
2093                case DK_CXLFLASH_RELEASE:
2094                case DK_CXLFLASH_USER_VIRTUAL:
2095                case DK_CXLFLASH_VLUN_RESIZE:
2096                case DK_CXLFLASH_VLUN_CLONE:
2097                        dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2098                                __func__, decode_ioctl(cmd), afu->internal_lun);
2099                        rc = -EINVAL;
2100                        goto cxlflash_ioctl_exit;
2101                }
2102
2103        switch (cmd) {
2104        case DK_CXLFLASH_ATTACH:
2105        case DK_CXLFLASH_USER_DIRECT:
2106        case DK_CXLFLASH_RELEASE:
2107        case DK_CXLFLASH_DETACH:
2108        case DK_CXLFLASH_VERIFY:
2109        case DK_CXLFLASH_RECOVER_AFU:
2110        case DK_CXLFLASH_USER_VIRTUAL:
2111        case DK_CXLFLASH_VLUN_RESIZE:
2112        case DK_CXLFLASH_VLUN_CLONE:
2113                dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%d)\n",
2114                        __func__, decode_ioctl(cmd), cmd, shost->host_no,
2115                        sdev->channel, sdev->id, sdev->lun);
2116                rc = ioctl_common(sdev, cmd);
2117                if (unlikely(rc))
2118                        goto cxlflash_ioctl_exit;
2119
2120                /* fall through */
2121
2122        case DK_CXLFLASH_MANAGE_LUN:
2123                known_ioctl = true;
2124                idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2125                size = ioctl_tbl[idx].size;
2126                do_ioctl = ioctl_tbl[idx].ioctl;
2127
2128                if (likely(do_ioctl))
2129                        break;
2130
2131                /* fall through */
2132        default:
2133                rc = -EINVAL;
2134                goto cxlflash_ioctl_exit;
2135        }
2136
2137        if (unlikely(copy_from_user(&buf, arg, size))) {
2138                dev_err(dev, "%s: copy_from_user() fail! "
2139                        "size=%lu cmd=%d (%s) arg=%p\n",
2140                        __func__, size, cmd, decode_ioctl(cmd), arg);
2141                rc = -EFAULT;
2142                goto cxlflash_ioctl_exit;
2143        }
2144
2145        hdr = (struct dk_cxlflash_hdr *)&buf;
2146        if (hdr->version != DK_CXLFLASH_VERSION_0) {
2147                dev_dbg(dev, "%s: Version %u not supported for %s\n",
2148                        __func__, hdr->version, decode_ioctl(cmd));
2149                rc = -EINVAL;
2150                goto cxlflash_ioctl_exit;
2151        }
2152
2153        if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2154                dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
2155                rc = -EINVAL;
2156                goto cxlflash_ioctl_exit;
2157        }
2158
2159        rc = do_ioctl(sdev, (void *)&buf);
2160        if (likely(!rc))
2161                if (unlikely(copy_to_user(arg, &buf, size))) {
2162                        dev_err(dev, "%s: copy_to_user() fail! "
2163                                "size=%lu cmd=%d (%s) arg=%p\n",
2164                                __func__, size, cmd, decode_ioctl(cmd), arg);
2165                        rc = -EFAULT;
2166                }
2167
2168        /* fall through to exit */
2169
2170cxlflash_ioctl_exit:
2171        up_read(&cfg->ioctl_rwsem);
2172        if (unlikely(rc && known_ioctl))
2173                dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%d) "
2174                        "returned rc %d\n", __func__,
2175                        decode_ioctl(cmd), cmd, shost->host_no,
2176                        sdev->channel, sdev->id, sdev->lun, rc);
2177        else
2178                dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%d) "
2179                        "returned rc %d\n", __func__, decode_ioctl(cmd),
2180                        cmd, shost->host_no, sdev->channel, sdev->id,
2181                        sdev->lun, rc);
2182        return rc;
2183}
2184