linux/drivers/misc/ocxl/context.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright 2017 IBM Corp.
   3#include <linux/sched/mm.h>
   4#include "trace.h"
   5#include "ocxl_internal.h"
   6
   7int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
   8                struct address_space *mapping)
   9{
  10        int pasid;
  11        struct ocxl_context *ctx;
  12
  13        *context = kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
  14        if (!*context)
  15                return -ENOMEM;
  16
  17        ctx = *context;
  18
  19        ctx->afu = afu;
  20        mutex_lock(&afu->contexts_lock);
  21        pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
  22                        afu->pasid_base + afu->pasid_max, GFP_KERNEL);
  23        if (pasid < 0) {
  24                mutex_unlock(&afu->contexts_lock);
  25                return pasid;
  26        }
  27        afu->pasid_count++;
  28        mutex_unlock(&afu->contexts_lock);
  29
  30        ctx->pasid = pasid;
  31        ctx->status = OPENED;
  32        mutex_init(&ctx->status_mutex);
  33        ctx->mapping = mapping;
  34        mutex_init(&ctx->mapping_lock);
  35        init_waitqueue_head(&ctx->events_wq);
  36        mutex_init(&ctx->xsl_error_lock);
  37        mutex_init(&ctx->irq_lock);
  38        idr_init(&ctx->irq_idr);
  39        ctx->tidr = 0;
  40
  41        /*
  42         * Keep a reference on the AFU to make sure it's valid for the
  43         * duration of the life of the context
  44         */
  45        ocxl_afu_get(afu);
  46        return 0;
  47}
  48EXPORT_SYMBOL_GPL(ocxl_context_alloc);
  49
  50/*
  51 * Callback for when a translation fault triggers an error
  52 * data:        a pointer to the context which triggered the fault
  53 * addr:        the address that triggered the error
  54 * dsisr:       the value of the PPC64 dsisr register
  55 */
  56static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
  57{
  58        struct ocxl_context *ctx = (struct ocxl_context *) data;
  59
  60        mutex_lock(&ctx->xsl_error_lock);
  61        ctx->xsl_error.addr = addr;
  62        ctx->xsl_error.dsisr = dsisr;
  63        ctx->xsl_error.count++;
  64        mutex_unlock(&ctx->xsl_error_lock);
  65
  66        wake_up_all(&ctx->events_wq);
  67}
  68
  69int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
  70{
  71        int rc;
  72        unsigned long pidr = 0;
  73
  74        // Locks both status & tidr
  75        mutex_lock(&ctx->status_mutex);
  76        if (ctx->status != OPENED) {
  77                rc = -EIO;
  78                goto out;
  79        }
  80
  81        if (mm)
  82                pidr = mm->context.id;
  83
  84        rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
  85                              amr, mm, xsl_fault_error, ctx);
  86        if (rc)
  87                goto out;
  88
  89        ctx->status = ATTACHED;
  90out:
  91        mutex_unlock(&ctx->status_mutex);
  92        return rc;
  93}
  94EXPORT_SYMBOL_GPL(ocxl_context_attach);
  95
  96static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address,
  97                u64 offset, struct ocxl_context *ctx)
  98{
  99        u64 trigger_addr;
 100        int irq_id = ocxl_irq_offset_to_id(ctx, offset);
 101
 102        trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id);
 103        if (!trigger_addr)
 104                return VM_FAULT_SIGBUS;
 105
 106        return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT);
 107}
 108
 109static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
 110                u64 offset, struct ocxl_context *ctx)
 111{
 112        u64 pp_mmio_addr;
 113        int pasid_off;
 114        vm_fault_t ret;
 115
 116        if (offset >= ctx->afu->config.pp_mmio_stride)
 117                return VM_FAULT_SIGBUS;
 118
 119        mutex_lock(&ctx->status_mutex);
 120        if (ctx->status != ATTACHED) {
 121                mutex_unlock(&ctx->status_mutex);
 122                pr_debug("%s: Context not attached, failing mmio mmap\n",
 123                        __func__);
 124                return VM_FAULT_SIGBUS;
 125        }
 126
 127        pasid_off = ctx->pasid - ctx->afu->pasid_base;
 128        pp_mmio_addr = ctx->afu->pp_mmio_start +
 129                pasid_off * ctx->afu->config.pp_mmio_stride +
 130                offset;
 131
 132        ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
 133        mutex_unlock(&ctx->status_mutex);
 134        return ret;
 135}
 136
 137static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf)
 138{
 139        struct vm_area_struct *vma = vmf->vma;
 140        struct ocxl_context *ctx = vma->vm_file->private_data;
 141        u64 offset;
 142        vm_fault_t ret;
 143
 144        offset = vmf->pgoff << PAGE_SHIFT;
 145        pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
 146                ctx->pasid, vmf->address, offset);
 147
 148        if (offset < ctx->afu->irq_base_offset)
 149                ret = map_pp_mmio(vma, vmf->address, offset, ctx);
 150        else
 151                ret = map_afu_irq(vma, vmf->address, offset, ctx);
 152        return ret;
 153}
 154
 155static const struct vm_operations_struct ocxl_vmops = {
 156        .fault = ocxl_mmap_fault,
 157};
 158
 159static int check_mmap_afu_irq(struct ocxl_context *ctx,
 160                        struct vm_area_struct *vma)
 161{
 162        int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT);
 163
 164        /* only one page */
 165        if (vma_pages(vma) != 1)
 166                return -EINVAL;
 167
 168        /* check offset validty */
 169        if (!ocxl_afu_irq_get_addr(ctx, irq_id))
 170                return -EINVAL;
 171
 172        /*
 173         * trigger page should only be accessible in write mode.
 174         *
 175         * It's a bit theoretical, as a page mmaped with only
 176         * PROT_WRITE is currently readable, but it doesn't hurt.
 177         */
 178        if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
 179                !(vma->vm_flags & VM_WRITE))
 180                return -EINVAL;
 181        vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
 182        return 0;
 183}
 184
 185static int check_mmap_mmio(struct ocxl_context *ctx,
 186                        struct vm_area_struct *vma)
 187{
 188        if ((vma_pages(vma) + vma->vm_pgoff) >
 189                (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT))
 190                return -EINVAL;
 191        return 0;
 192}
 193
 194int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
 195{
 196        int rc;
 197
 198        if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset)
 199                rc = check_mmap_mmio(ctx, vma);
 200        else
 201                rc = check_mmap_afu_irq(ctx, vma);
 202        if (rc)
 203                return rc;
 204
 205        vma->vm_flags |= VM_IO | VM_PFNMAP;
 206        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 207        vma->vm_ops = &ocxl_vmops;
 208        return 0;
 209}
 210
 211int ocxl_context_detach(struct ocxl_context *ctx)
 212{
 213        struct pci_dev *dev;
 214        int afu_control_pos;
 215        enum ocxl_context_status status;
 216        int rc;
 217
 218        mutex_lock(&ctx->status_mutex);
 219        status = ctx->status;
 220        ctx->status = CLOSED;
 221        mutex_unlock(&ctx->status_mutex);
 222        if (status != ATTACHED)
 223                return 0;
 224
 225        dev = to_pci_dev(ctx->afu->fn->dev.parent);
 226        afu_control_pos = ctx->afu->config.dvsec_afu_control_pos;
 227
 228        mutex_lock(&ctx->afu->afu_control_lock);
 229        rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid);
 230        mutex_unlock(&ctx->afu->afu_control_lock);
 231        trace_ocxl_terminate_pasid(ctx->pasid, rc);
 232        if (rc) {
 233                /*
 234                 * If we timeout waiting for the AFU to terminate the
 235                 * pasid, then it's dangerous to clean up the Process
 236                 * Element entry in the SPA, as it may be referenced
 237                 * in the future by the AFU. In which case, we would
 238                 * checkstop because of an invalid PE access (FIR
 239                 * register 2, bit 42). So leave the PE
 240                 * defined. Caller shouldn't free the context so that
 241                 * PASID remains allocated.
 242                 *
 243                 * A link reset will be required to cleanup the AFU
 244                 * and the SPA.
 245                 */
 246                if (rc == -EBUSY)
 247                        return rc;
 248        }
 249        rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
 250        if (rc) {
 251                dev_warn(&dev->dev,
 252                        "Couldn't remove PE entry cleanly: %d\n", rc);
 253        }
 254        return 0;
 255}
 256EXPORT_SYMBOL_GPL(ocxl_context_detach);
 257
 258void ocxl_context_detach_all(struct ocxl_afu *afu)
 259{
 260        struct ocxl_context *ctx;
 261        int tmp;
 262
 263        mutex_lock(&afu->contexts_lock);
 264        idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
 265                ocxl_context_detach(ctx);
 266                /*
 267                 * We are force detaching - remove any active mmio
 268                 * mappings so userspace cannot interfere with the
 269                 * card if it comes back.  Easiest way to exercise
 270                 * this is to unbind and rebind the driver via sysfs
 271                 * while it is in use.
 272                 */
 273                mutex_lock(&ctx->mapping_lock);
 274                if (ctx->mapping)
 275                        unmap_mapping_range(ctx->mapping, 0, 0, 1);
 276                mutex_unlock(&ctx->mapping_lock);
 277        }
 278        mutex_unlock(&afu->contexts_lock);
 279}
 280
 281void ocxl_context_free(struct ocxl_context *ctx)
 282{
 283        mutex_lock(&ctx->afu->contexts_lock);
 284        ctx->afu->pasid_count--;
 285        idr_remove(&ctx->afu->contexts_idr, ctx->pasid);
 286        mutex_unlock(&ctx->afu->contexts_lock);
 287
 288        ocxl_afu_irq_free_all(ctx);
 289        idr_destroy(&ctx->irq_idr);
 290        /* reference to the AFU taken in ocxl_context_init */
 291        ocxl_afu_put(ctx->afu);
 292        kfree(ctx);
 293}
 294EXPORT_SYMBOL_GPL(ocxl_context_free);
 295