linux/drivers/misc/ocxl/context.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright 2017 IBM Corp.
   3#include <linux/sched/mm.h>
   4#include "trace.h"
   5#include "ocxl_internal.h"
   6
   7int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
   8                struct address_space *mapping)
   9{
  10        int pasid;
  11        struct ocxl_context *ctx;
  12
  13        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  14        if (!ctx)
  15                return -ENOMEM;
  16
  17        ctx->afu = afu;
  18        mutex_lock(&afu->contexts_lock);
  19        pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
  20                        afu->pasid_base + afu->pasid_max, GFP_KERNEL);
  21        if (pasid < 0) {
  22                mutex_unlock(&afu->contexts_lock);
  23                kfree(ctx);
  24                return pasid;
  25        }
  26        afu->pasid_count++;
  27        mutex_unlock(&afu->contexts_lock);
  28
  29        ctx->pasid = pasid;
  30        ctx->status = OPENED;
  31        mutex_init(&ctx->status_mutex);
  32        ctx->mapping = mapping;
  33        mutex_init(&ctx->mapping_lock);
  34        init_waitqueue_head(&ctx->events_wq);
  35        mutex_init(&ctx->xsl_error_lock);
  36        mutex_init(&ctx->irq_lock);
  37        idr_init(&ctx->irq_idr);
  38        ctx->tidr = 0;
  39
  40        /*
  41         * Keep a reference on the AFU to make sure it's valid for the
  42         * duration of the life of the context
  43         */
  44        ocxl_afu_get(afu);
  45        *context = ctx;
  46        return 0;
  47}
  48EXPORT_SYMBOL_GPL(ocxl_context_alloc);
  49
  50/*
  51 * Callback for when a translation fault triggers an error
  52 * data:        a pointer to the context which triggered the fault
  53 * addr:        the address that triggered the error
  54 * dsisr:       the value of the PPC64 dsisr register
  55 */
  56static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
  57{
  58        struct ocxl_context *ctx = (struct ocxl_context *) data;
  59
  60        mutex_lock(&ctx->xsl_error_lock);
  61        ctx->xsl_error.addr = addr;
  62        ctx->xsl_error.dsisr = dsisr;
  63        ctx->xsl_error.count++;
  64        mutex_unlock(&ctx->xsl_error_lock);
  65
  66        wake_up_all(&ctx->events_wq);
  67}
  68
  69int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
  70{
  71        int rc;
  72        unsigned long pidr = 0;
  73        struct pci_dev *dev;
  74
  75        // Locks both status & tidr
  76        mutex_lock(&ctx->status_mutex);
  77        if (ctx->status != OPENED) {
  78                rc = -EIO;
  79                goto out;
  80        }
  81
  82        if (mm)
  83                pidr = mm->context.id;
  84
  85        dev = to_pci_dev(ctx->afu->fn->dev.parent);
  86        rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
  87                              amr, pci_dev_id(dev), mm, xsl_fault_error, ctx);
  88        if (rc)
  89                goto out;
  90
  91        ctx->status = ATTACHED;
  92out:
  93        mutex_unlock(&ctx->status_mutex);
  94        return rc;
  95}
  96EXPORT_SYMBOL_GPL(ocxl_context_attach);
  97
  98static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
  99                u64 offset, struct ocxl_context *ctx)
 100{
 101        u64 trigger_addr;
 102        int irq_id = ocxl_irq_offset_to_id(ctx, offset);
 103
 104        trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id);
 105        if (!trigger_addr)
 106                return VM_FAULT_SIGBUS;
 107
 108        return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
 109}
 110
 111static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
 112                u64 offset, struct ocxl_context *ctx)
 113{
 114        u64 pp_mmio_addr;
 115        int pasid_off;
 116        vm_fault_t ret;
 117
 118        if (offset >= ctx->afu->config.pp_mmio_stride)
 119                return VM_FAULT_SIGBUS;
 120
 121        mutex_lock(&ctx->status_mutex);
 122        if (ctx->status != ATTACHED) {
 123                mutex_unlock(&ctx->status_mutex);
 124                pr_debug("%s: Context not attached, failing mmio mmap\n",
 125                        __func__);
 126                return VM_FAULT_SIGBUS;
 127        }
 128
 129        pasid_off = ctx->pasid - ctx->afu->pasid_base;
 130        pp_mmio_addr = ctx->afu->pp_mmio_start +
 131                pasid_off * ctx->afu->config.pp_mmio_stride +
 132                offset;
 133
 134        ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
 135        mutex_unlock(&ctx->status_mutex);
 136        return ret;
 137}
 138
 139static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf)
 140{
 141        struct vm_area_struct *vma = vmf->vma;
 142        struct ocxl_context *ctx = vma->vm_file->private_data;
 143        u64 offset;
 144        vm_fault_t ret;
 145
 146        offset = vmf->pgoff << PAGE_SHIFT;
 147        pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
 148                ctx->pasid, vmf->address, offset);
 149
 150        if (offset < ctx->afu->irq_base_offset)
 151                ret = map_pp_mmio(vma, vmf->address, offset, ctx);
 152        else
 153                ret = map_afu_irq(vma, vmf->address, offset, ctx);
 154        return ret;
 155}
 156
 157static const struct vm_operations_struct ocxl_vmops = {
 158        .fault = ocxl_mmap_fault,
 159};
 160
 161static int check_mmap_afu_irq(struct ocxl_context *ctx,
 162                        struct vm_area_struct *vma)
 163{
 164        int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT);
 165
 166        /* only one page */
 167        if (vma_pages(vma) != 1)
 168                return -EINVAL;
 169
 170        /* check offset validty */
 171        if (!ocxl_afu_irq_get_addr(ctx, irq_id))
 172                return -EINVAL;
 173
 174        /*
 175         * trigger page should only be accessible in write mode.
 176         *
 177         * It's a bit theoretical, as a page mmaped with only
 178         * PROT_WRITE is currently readable, but it doesn't hurt.
 179         */
 180        if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
 181                !(vma->vm_flags & VM_WRITE))
 182                return -EINVAL;
 183        vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
 184        return 0;
 185}
 186
 187static int check_mmap_mmio(struct ocxl_context *ctx,
 188                        struct vm_area_struct *vma)
 189{
 190        if ((vma_pages(vma) + vma->vm_pgoff) >
 191                (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT))
 192                return -EINVAL;
 193        return 0;
 194}
 195
 196int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
 197{
 198        int rc;
 199
 200        if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset)
 201                rc = check_mmap_mmio(ctx, vma);
 202        else
 203                rc = check_mmap_afu_irq(ctx, vma);
 204        if (rc)
 205                return rc;
 206
 207        vma->vm_flags |= VM_IO | VM_PFNMAP;
 208        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 209        vma->vm_ops = &ocxl_vmops;
 210        return 0;
 211}
 212
 213int ocxl_context_detach(struct ocxl_context *ctx)
 214{
 215        struct pci_dev *dev;
 216        int afu_control_pos;
 217        enum ocxl_context_status status;
 218        int rc;
 219
 220        mutex_lock(&ctx->status_mutex);
 221        status = ctx->status;
 222        ctx->status = CLOSED;
 223        mutex_unlock(&ctx->status_mutex);
 224        if (status != ATTACHED)
 225                return 0;
 226
 227        dev = to_pci_dev(ctx->afu->fn->dev.parent);
 228        afu_control_pos = ctx->afu->config.dvsec_afu_control_pos;
 229
 230        mutex_lock(&ctx->afu->afu_control_lock);
 231        rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid);
 232        mutex_unlock(&ctx->afu->afu_control_lock);
 233        trace_ocxl_terminate_pasid(ctx->pasid, rc);
 234        if (rc) {
 235                /*
 236                 * If we timeout waiting for the AFU to terminate the
 237                 * pasid, then it's dangerous to clean up the Process
 238                 * Element entry in the SPA, as it may be referenced
 239                 * in the future by the AFU. In which case, we would
 240                 * checkstop because of an invalid PE access (FIR
 241                 * register 2, bit 42). So leave the PE
 242                 * defined. Caller shouldn't free the context so that
 243                 * PASID remains allocated.
 244                 *
 245                 * A link reset will be required to cleanup the AFU
 246                 * and the SPA.
 247                 */
 248                if (rc == -EBUSY)
 249                        return rc;
 250        }
 251        rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
 252        if (rc) {
 253                dev_warn(&dev->dev,
 254                        "Couldn't remove PE entry cleanly: %d\n", rc);
 255        }
 256        return 0;
 257}
 258EXPORT_SYMBOL_GPL(ocxl_context_detach);
 259
 260void ocxl_context_detach_all(struct ocxl_afu *afu)
 261{
 262        struct ocxl_context *ctx;
 263        int tmp;
 264
 265        mutex_lock(&afu->contexts_lock);
 266        idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
 267                ocxl_context_detach(ctx);
 268                /*
 269                 * We are force detaching - remove any active mmio
 270                 * mappings so userspace cannot interfere with the
 271                 * card if it comes back.  Easiest way to exercise
 272                 * this is to unbind and rebind the driver via sysfs
 273                 * while it is in use.
 274                 */
 275                mutex_lock(&ctx->mapping_lock);
 276                if (ctx->mapping)
 277                        unmap_mapping_range(ctx->mapping, 0, 0, 1);
 278                mutex_unlock(&ctx->mapping_lock);
 279        }
 280        mutex_unlock(&afu->contexts_lock);
 281}
 282
 283void ocxl_context_free(struct ocxl_context *ctx)
 284{
 285        mutex_lock(&ctx->afu->contexts_lock);
 286        ctx->afu->pasid_count--;
 287        idr_remove(&ctx->afu->contexts_idr, ctx->pasid);
 288        mutex_unlock(&ctx->afu->contexts_lock);
 289
 290        ocxl_afu_irq_free_all(ctx);
 291        idr_destroy(&ctx->irq_idr);
 292        /* reference to the AFU taken in ocxl_context_alloc() */
 293        ocxl_afu_put(ctx->afu);
 294        kfree(ctx);
 295}
 296EXPORT_SYMBOL_GPL(ocxl_context_free);
 297