linux/drivers/misc/cxl/api.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 IBM Corp.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version
   7 * 2 of the License, or (at your option) any later version.
   8 */
   9
  10#include <linux/pci.h>
  11#include <linux/slab.h>
  12#include <linux/anon_inodes.h>
  13#include <linux/file.h>
  14#include <misc/cxl.h>
  15#include <linux/fs.h>
  16#include <asm/pnv-pci.h>
  17#include <linux/msi.h>
  18
  19#include "cxl.h"
  20
  21struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
  22{
  23        struct address_space *mapping;
  24        struct cxl_afu *afu;
  25        struct cxl_context  *ctx;
  26        int rc;
  27
  28        afu = cxl_pci_to_afu(dev);
  29        if (IS_ERR(afu))
  30                return ERR_CAST(afu);
  31
  32        ctx = cxl_context_alloc();
  33        if (IS_ERR(ctx)) {
  34                rc = PTR_ERR(ctx);
  35                goto err_dev;
  36        }
  37
  38        ctx->kernelapi = true;
  39
  40        /*
  41         * Make our own address space since we won't have one from the
  42         * filesystem like the user api has, and even if we do associate a file
  43         * with this context we don't want to use the global anonymous inode's
  44         * address space as that can invalidate unrelated users:
  45         */
  46        mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
  47        if (!mapping) {
  48                rc = -ENOMEM;
  49                goto err_ctx;
  50        }
  51        address_space_init_once(mapping);
  52
  53        /* Make it a slave context.  We can promote it later? */
  54        rc = cxl_context_init(ctx, afu, false, mapping);
  55        if (rc)
  56                goto err_mapping;
  57
  58        return ctx;
  59
  60err_mapping:
  61        kfree(mapping);
  62err_ctx:
  63        kfree(ctx);
  64err_dev:
  65        return ERR_PTR(rc);
  66}
  67EXPORT_SYMBOL_GPL(cxl_dev_context_init);
  68
  69struct cxl_context *cxl_get_context(struct pci_dev *dev)
  70{
  71        return dev->dev.archdata.cxl_ctx;
  72}
  73EXPORT_SYMBOL_GPL(cxl_get_context);
  74
  75int cxl_release_context(struct cxl_context *ctx)
  76{
  77        if (ctx->status >= STARTED)
  78                return -EBUSY;
  79
  80        cxl_context_free(ctx);
  81
  82        return 0;
  83}
  84EXPORT_SYMBOL_GPL(cxl_release_context);
  85
  86static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
  87{
  88        __u16 range;
  89        int r;
  90
  91        for (r = 0; r < CXL_IRQ_RANGES; r++) {
  92                range = ctx->irqs.range[r];
  93                if (num < range) {
  94                        return ctx->irqs.offset[r] + num;
  95                }
  96                num -= range;
  97        }
  98        return 0;
  99}
 100
 101int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
 102{
 103        if (*ctx == NULL || *afu_irq == 0) {
 104                *afu_irq = 1;
 105                *ctx = cxl_get_context(pdev);
 106        } else {
 107                (*afu_irq)++;
 108                if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
 109                        *ctx = list_next_entry(*ctx, extra_irq_contexts);
 110                        *afu_irq = 1;
 111                }
 112        }
 113        return cxl_find_afu_irq(*ctx, *afu_irq);
 114}
 115/* Exported via cxl_base */
 116
 117int cxl_set_priv(struct cxl_context *ctx, void *priv)
 118{
 119        if (!ctx)
 120                return -EINVAL;
 121
 122        ctx->priv = priv;
 123
 124        return 0;
 125}
 126EXPORT_SYMBOL_GPL(cxl_set_priv);
 127
 128void *cxl_get_priv(struct cxl_context *ctx)
 129{
 130        if (!ctx)
 131                return ERR_PTR(-EINVAL);
 132
 133        return ctx->priv;
 134}
 135EXPORT_SYMBOL_GPL(cxl_get_priv);
 136
 137int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
 138{
 139        int res;
 140        irq_hw_number_t hwirq;
 141
 142        if (num == 0)
 143                num = ctx->afu->pp_irqs;
 144        res = afu_allocate_irqs(ctx, num);
 145        if (res)
 146                return res;
 147
 148        if (!cpu_has_feature(CPU_FTR_HVMODE)) {
 149                /* In a guest, the PSL interrupt is not multiplexed. It was
 150                 * allocated above, and we need to set its handler
 151                 */
 152                hwirq = cxl_find_afu_irq(ctx, 0);
 153                if (hwirq)
 154                        cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
 155        }
 156
 157        if (ctx->status == STARTED) {
 158                if (cxl_ops->update_ivtes)
 159                        cxl_ops->update_ivtes(ctx);
 160                else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
 161        }
 162
 163        return res;
 164}
 165EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
 166
 167void cxl_free_afu_irqs(struct cxl_context *ctx)
 168{
 169        irq_hw_number_t hwirq;
 170        unsigned int virq;
 171
 172        if (!cpu_has_feature(CPU_FTR_HVMODE)) {
 173                hwirq = cxl_find_afu_irq(ctx, 0);
 174                if (hwirq) {
 175                        virq = irq_find_mapping(NULL, hwirq);
 176                        if (virq)
 177                                cxl_unmap_irq(virq, ctx);
 178                }
 179        }
 180        afu_irq_name_free(ctx);
 181        cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 182}
 183EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
 184
 185int cxl_map_afu_irq(struct cxl_context *ctx, int num,
 186                    irq_handler_t handler, void *cookie, char *name)
 187{
 188        irq_hw_number_t hwirq;
 189
 190        /*
 191         * Find interrupt we are to register.
 192         */
 193        hwirq = cxl_find_afu_irq(ctx, num);
 194        if (!hwirq)
 195                return -ENOENT;
 196
 197        return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
 198}
 199EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
 200
 201void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
 202{
 203        irq_hw_number_t hwirq;
 204        unsigned int virq;
 205
 206        hwirq = cxl_find_afu_irq(ctx, num);
 207        if (!hwirq)
 208                return;
 209
 210        virq = irq_find_mapping(NULL, hwirq);
 211        if (virq)
 212                cxl_unmap_irq(virq, cookie);
 213}
 214EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
 215
 216/*
 217 * Start a context
 218 * Code here similar to afu_ioctl_start_work().
 219 */
 220int cxl_start_context(struct cxl_context *ctx, u64 wed,
 221                      struct task_struct *task)
 222{
 223        int rc = 0;
 224        bool kernel = true;
 225
 226        pr_devel("%s: pe: %i\n", __func__, ctx->pe);
 227
 228        mutex_lock(&ctx->status_mutex);
 229        if (ctx->status == STARTED)
 230                goto out; /* already started */
 231
 232        /*
 233         * Increment the mapped context count for adapter. This also checks
 234         * if adapter_context_lock is taken.
 235         */
 236        rc = cxl_adapter_context_get(ctx->afu->adapter);
 237        if (rc)
 238                goto out;
 239
 240        if (task) {
 241                ctx->pid = get_task_pid(task, PIDTYPE_PID);
 242                ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
 243                kernel = false;
 244                ctx->real_mode = false;
 245        }
 246
 247        cxl_ctx_get();
 248
 249        if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
 250                put_pid(ctx->glpid);
 251                put_pid(ctx->pid);
 252                ctx->glpid = ctx->pid = NULL;
 253                cxl_adapter_context_put(ctx->afu->adapter);
 254                cxl_ctx_put();
 255                goto out;
 256        }
 257
 258        ctx->status = STARTED;
 259out:
 260        mutex_unlock(&ctx->status_mutex);
 261        return rc;
 262}
 263EXPORT_SYMBOL_GPL(cxl_start_context);
 264
 265int cxl_process_element(struct cxl_context *ctx)
 266{
 267        return ctx->external_pe;
 268}
 269EXPORT_SYMBOL_GPL(cxl_process_element);
 270
 271/* Stop a context.  Returns 0 on success, otherwise -Errno */
 272int cxl_stop_context(struct cxl_context *ctx)
 273{
 274        return __detach_context(ctx);
 275}
 276EXPORT_SYMBOL_GPL(cxl_stop_context);
 277
 278void cxl_set_master(struct cxl_context *ctx)
 279{
 280        ctx->master = true;
 281}
 282EXPORT_SYMBOL_GPL(cxl_set_master);
 283
 284int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode)
 285{
 286        if (ctx->status == STARTED) {
 287                /*
 288                 * We could potentially update the PE and issue an update LLCMD
 289                 * to support this, but it doesn't seem to have a good use case
 290                 * since it's trivial to just create a second kernel context
 291                 * with different translation modes, so until someone convinces
 292                 * me otherwise:
 293                 */
 294                return -EBUSY;
 295        }
 296
 297        ctx->real_mode = real_mode;
 298        return 0;
 299}
 300EXPORT_SYMBOL_GPL(cxl_set_translation_mode);
 301
 302/* wrappers around afu_* file ops which are EXPORTED */
 303int cxl_fd_open(struct inode *inode, struct file *file)
 304{
 305        return afu_open(inode, file);
 306}
 307EXPORT_SYMBOL_GPL(cxl_fd_open);
 308int cxl_fd_release(struct inode *inode, struct file *file)
 309{
 310        return afu_release(inode, file);
 311}
 312EXPORT_SYMBOL_GPL(cxl_fd_release);
 313long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 314{
 315        return afu_ioctl(file, cmd, arg);
 316}
 317EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
 318int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
 319{
 320        return afu_mmap(file, vm);
 321}
 322EXPORT_SYMBOL_GPL(cxl_fd_mmap);
 323unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
 324{
 325        return afu_poll(file, poll);
 326}
 327EXPORT_SYMBOL_GPL(cxl_fd_poll);
 328ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
 329                        loff_t *off)
 330{
 331        return afu_read(file, buf, count, off);
 332}
 333EXPORT_SYMBOL_GPL(cxl_fd_read);
 334
 335#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
 336
 337/* Get a struct file and fd for a context and attach the ops */
 338struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
 339                        int *fd)
 340{
 341        struct file *file;
 342        int rc, flags, fdtmp;
 343
 344        flags = O_RDWR | O_CLOEXEC;
 345
 346        /* This code is similar to anon_inode_getfd() */
 347        rc = get_unused_fd_flags(flags);
 348        if (rc < 0)
 349                return ERR_PTR(rc);
 350        fdtmp = rc;
 351
 352        /*
 353         * Patch the file ops.  Needs to be careful that this is rentrant safe.
 354         */
 355        if (fops) {
 356                PATCH_FOPS(open);
 357                PATCH_FOPS(poll);
 358                PATCH_FOPS(read);
 359                PATCH_FOPS(release);
 360                PATCH_FOPS(unlocked_ioctl);
 361                PATCH_FOPS(compat_ioctl);
 362                PATCH_FOPS(mmap);
 363        } else /* use default ops */
 364                fops = (struct file_operations *)&afu_fops;
 365
 366        file = anon_inode_getfile("cxl", fops, ctx, flags);
 367        if (IS_ERR(file))
 368                goto err_fd;
 369
 370        file->f_mapping = ctx->mapping;
 371
 372        *fd = fdtmp;
 373        return file;
 374
 375err_fd:
 376        put_unused_fd(fdtmp);
 377        return NULL;
 378}
 379EXPORT_SYMBOL_GPL(cxl_get_fd);
 380
 381struct cxl_context *cxl_fops_get_context(struct file *file)
 382{
 383        return file->private_data;
 384}
 385EXPORT_SYMBOL_GPL(cxl_fops_get_context);
 386
 387void cxl_set_driver_ops(struct cxl_context *ctx,
 388                        struct cxl_afu_driver_ops *ops)
 389{
 390        WARN_ON(!ops->fetch_event || !ops->event_delivered);
 391        atomic_set(&ctx->afu_driver_events, 0);
 392        ctx->afu_driver_ops = ops;
 393}
 394EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
 395
 396void cxl_context_events_pending(struct cxl_context *ctx,
 397                                unsigned int new_events)
 398{
 399        atomic_add(new_events, &ctx->afu_driver_events);
 400        wake_up_all(&ctx->wq);
 401}
 402EXPORT_SYMBOL_GPL(cxl_context_events_pending);
 403
 404int cxl_start_work(struct cxl_context *ctx,
 405                   struct cxl_ioctl_start_work *work)
 406{
 407        int rc;
 408
 409        /* code taken from afu_ioctl_start_work */
 410        if (!(work->flags & CXL_START_WORK_NUM_IRQS))
 411                work->num_interrupts = ctx->afu->pp_irqs;
 412        else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
 413                 (work->num_interrupts > ctx->afu->irqs_max)) {
 414                return -EINVAL;
 415        }
 416
 417        rc = afu_register_irqs(ctx, work->num_interrupts);
 418        if (rc)
 419                return rc;
 420
 421        rc = cxl_start_context(ctx, work->work_element_descriptor, current);
 422        if (rc < 0) {
 423                afu_release_irqs(ctx, ctx);
 424                return rc;
 425        }
 426
 427        return 0;
 428}
 429EXPORT_SYMBOL_GPL(cxl_start_work);
 430
 431void __iomem *cxl_psa_map(struct cxl_context *ctx)
 432{
 433        if (ctx->status != STARTED)
 434                return NULL;
 435
 436        pr_devel("%s: psn_phys%llx size:%llx\n",
 437                __func__, ctx->psn_phys, ctx->psn_size);
 438        return ioremap(ctx->psn_phys, ctx->psn_size);
 439}
 440EXPORT_SYMBOL_GPL(cxl_psa_map);
 441
 442void cxl_psa_unmap(void __iomem *addr)
 443{
 444        iounmap(addr);
 445}
 446EXPORT_SYMBOL_GPL(cxl_psa_unmap);
 447
 448int cxl_afu_reset(struct cxl_context *ctx)
 449{
 450        struct cxl_afu *afu = ctx->afu;
 451        int rc;
 452
 453        rc = cxl_ops->afu_reset(afu);
 454        if (rc)
 455                return rc;
 456
 457        return cxl_ops->afu_check_and_enable(afu);
 458}
 459EXPORT_SYMBOL_GPL(cxl_afu_reset);
 460
 461void cxl_perst_reloads_same_image(struct cxl_afu *afu,
 462                                  bool perst_reloads_same_image)
 463{
 464        afu->adapter->perst_same_image = perst_reloads_same_image;
 465}
 466EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
 467
 468ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
 469{
 470        struct cxl_afu *afu = cxl_pci_to_afu(dev);
 471        if (IS_ERR(afu))
 472                return -ENODEV;
 473
 474        return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
 475}
 476EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
 477
 478int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
 479{
 480        struct cxl_afu *afu = cxl_pci_to_afu(dev);
 481        if (IS_ERR(afu))
 482                return -ENODEV;
 483
 484        if (irqs > afu->adapter->user_irqs)
 485                return -EINVAL;
 486
 487        /* Limit user_irqs to prevent the user increasing this via sysfs */
 488        afu->adapter->user_irqs = irqs;
 489        afu->irqs_max = irqs;
 490
 491        return 0;
 492}
 493EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
 494
 495int cxl_get_max_irqs_per_process(struct pci_dev *dev)
 496{
 497        struct cxl_afu *afu = cxl_pci_to_afu(dev);
 498        if (IS_ERR(afu))
 499                return -ENODEV;
 500
 501        return afu->irqs_max;
 502}
 503EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
 504
 505/*
 506 * This is a special interrupt allocation routine called from the PHB's MSI
 507 * setup function. When capi interrupts are allocated in this manner they must
 508 * still be associated with a running context, but since the MSI APIs have no
 509 * way to specify this we use the default context associated with the device.
 510 *
 511 * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
 512 * interrupt number, so in order to overcome this their driver informs us of
 513 * the restriction by setting the maximum interrupts per context, and we
 514 * allocate additional contexts as necessary so that we can keep the AFU
 515 * interrupt number within the supported range.
 516 */
 517int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 518{
 519        struct cxl_context *ctx, *new_ctx, *default_ctx;
 520        int remaining;
 521        int rc;
 522
 523        ctx = default_ctx = cxl_get_context(pdev);
 524        if (WARN_ON(!default_ctx))
 525                return -ENODEV;
 526
 527        remaining = nvec;
 528        while (remaining > 0) {
 529                rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
 530                if (rc) {
 531                        pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
 532                        return rc;
 533                }
 534                remaining -= ctx->afu->irqs_max;
 535
 536                if (ctx != default_ctx && default_ctx->status == STARTED) {
 537                        WARN_ON(cxl_start_context(ctx,
 538                                be64_to_cpu(default_ctx->elem->common.wed),
 539                                NULL));
 540                }
 541
 542                if (remaining > 0) {
 543                        new_ctx = cxl_dev_context_init(pdev);
 544                        if (!new_ctx) {
 545                                pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
 546                                return -ENOSPC;
 547                        }
 548                        list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
 549                        ctx = new_ctx;
 550                }
 551        }
 552
 553        return 0;
 554}
 555/* Exported via cxl_base */
 556
 557void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
 558{
 559        struct cxl_context *ctx, *pos, *tmp;
 560
 561        ctx = cxl_get_context(pdev);
 562        if (WARN_ON(!ctx))
 563                return;
 564
 565        cxl_free_afu_irqs(ctx);
 566        list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
 567                cxl_stop_context(pos);
 568                cxl_free_afu_irqs(pos);
 569                list_del(&pos->extra_irq_contexts);
 570                cxl_release_context(pos);
 571        }
 572}
 573/* Exported via cxl_base */
 574