linux/drivers/misc/uacce/uacce.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2#include <linux/compat.h>
   3#include <linux/dma-mapping.h>
   4#include <linux/iommu.h>
   5#include <linux/module.h>
   6#include <linux/poll.h>
   7#include <linux/slab.h>
   8#include <linux/uacce.h>
   9
  10static struct class *uacce_class;
  11static dev_t uacce_devt;
  12static DEFINE_MUTEX(uacce_mutex);
  13static DEFINE_XARRAY_ALLOC(uacce_xa);
  14
  15static int uacce_start_queue(struct uacce_queue *q)
  16{
  17        int ret = 0;
  18
  19        mutex_lock(&uacce_mutex);
  20
  21        if (q->state != UACCE_Q_INIT) {
  22                ret = -EINVAL;
  23                goto out_with_lock;
  24        }
  25
  26        if (q->uacce->ops->start_queue) {
  27                ret = q->uacce->ops->start_queue(q);
  28                if (ret < 0)
  29                        goto out_with_lock;
  30        }
  31
  32        q->state = UACCE_Q_STARTED;
  33
  34out_with_lock:
  35        mutex_unlock(&uacce_mutex);
  36
  37        return ret;
  38}
  39
  40static int uacce_put_queue(struct uacce_queue *q)
  41{
  42        struct uacce_device *uacce = q->uacce;
  43
  44        mutex_lock(&uacce_mutex);
  45
  46        if (q->state == UACCE_Q_ZOMBIE)
  47                goto out;
  48
  49        if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
  50                uacce->ops->stop_queue(q);
  51
  52        if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
  53             uacce->ops->put_queue)
  54                uacce->ops->put_queue(q);
  55
  56        q->state = UACCE_Q_ZOMBIE;
  57out:
  58        mutex_unlock(&uacce_mutex);
  59
  60        return 0;
  61}
  62
  63static long uacce_fops_unl_ioctl(struct file *filep,
  64                                 unsigned int cmd, unsigned long arg)
  65{
  66        struct uacce_queue *q = filep->private_data;
  67        struct uacce_device *uacce = q->uacce;
  68
  69        switch (cmd) {
  70        case UACCE_CMD_START_Q:
  71                return uacce_start_queue(q);
  72
  73        case UACCE_CMD_PUT_Q:
  74                return uacce_put_queue(q);
  75
  76        default:
  77                if (!uacce->ops->ioctl)
  78                        return -EINVAL;
  79
  80                return uacce->ops->ioctl(q, cmd, arg);
  81        }
  82}
  83
  84#ifdef CONFIG_COMPAT
  85static long uacce_fops_compat_ioctl(struct file *filep,
  86                                   unsigned int cmd, unsigned long arg)
  87{
  88        arg = (unsigned long)compat_ptr(arg);
  89
  90        return uacce_fops_unl_ioctl(filep, cmd, arg);
  91}
  92#endif
  93
  94static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
  95{
  96        int pasid;
  97        struct iommu_sva *handle;
  98
  99        if (!(uacce->flags & UACCE_DEV_SVA))
 100                return 0;
 101
 102        handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
 103        if (IS_ERR(handle))
 104                return PTR_ERR(handle);
 105
 106        pasid = iommu_sva_get_pasid(handle);
 107        if (pasid == IOMMU_PASID_INVALID) {
 108                iommu_sva_unbind_device(handle);
 109                return -ENODEV;
 110        }
 111
 112        q->handle = handle;
 113        q->pasid = pasid;
 114        return 0;
 115}
 116
 117static void uacce_unbind_queue(struct uacce_queue *q)
 118{
 119        if (!q->handle)
 120                return;
 121        iommu_sva_unbind_device(q->handle);
 122        q->handle = NULL;
 123}
 124
 125static int uacce_fops_open(struct inode *inode, struct file *filep)
 126{
 127        struct uacce_device *uacce;
 128        struct uacce_queue *q;
 129        int ret = 0;
 130
 131        uacce = xa_load(&uacce_xa, iminor(inode));
 132        if (!uacce)
 133                return -ENODEV;
 134
 135        q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
 136        if (!q)
 137                return -ENOMEM;
 138
 139        ret = uacce_bind_queue(uacce, q);
 140        if (ret)
 141                goto out_with_mem;
 142
 143        q->uacce = uacce;
 144
 145        if (uacce->ops->get_queue) {
 146                ret = uacce->ops->get_queue(uacce, q->pasid, q);
 147                if (ret < 0)
 148                        goto out_with_bond;
 149        }
 150
 151        init_waitqueue_head(&q->wait);
 152        filep->private_data = q;
 153        uacce->inode = inode;
 154        q->state = UACCE_Q_INIT;
 155
 156        mutex_lock(&uacce->queues_lock);
 157        list_add(&q->list, &uacce->queues);
 158        mutex_unlock(&uacce->queues_lock);
 159
 160        return 0;
 161
 162out_with_bond:
 163        uacce_unbind_queue(q);
 164out_with_mem:
 165        kfree(q);
 166        return ret;
 167}
 168
 169static int uacce_fops_release(struct inode *inode, struct file *filep)
 170{
 171        struct uacce_queue *q = filep->private_data;
 172
 173        mutex_lock(&q->uacce->queues_lock);
 174        list_del(&q->list);
 175        mutex_unlock(&q->uacce->queues_lock);
 176        uacce_put_queue(q);
 177        uacce_unbind_queue(q);
 178        kfree(q);
 179
 180        return 0;
 181}
 182
 183static void uacce_vma_close(struct vm_area_struct *vma)
 184{
 185        struct uacce_queue *q = vma->vm_private_data;
 186        struct uacce_qfile_region *qfr = NULL;
 187
 188        if (vma->vm_pgoff < UACCE_MAX_REGION)
 189                qfr = q->qfrs[vma->vm_pgoff];
 190
 191        kfree(qfr);
 192}
 193
 194static const struct vm_operations_struct uacce_vm_ops = {
 195        .close = uacce_vma_close,
 196};
 197
 198static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
 199{
 200        struct uacce_queue *q = filep->private_data;
 201        struct uacce_device *uacce = q->uacce;
 202        struct uacce_qfile_region *qfr;
 203        enum uacce_qfrt type = UACCE_MAX_REGION;
 204        int ret = 0;
 205
 206        if (vma->vm_pgoff < UACCE_MAX_REGION)
 207                type = vma->vm_pgoff;
 208        else
 209                return -EINVAL;
 210
 211        qfr = kzalloc(sizeof(*qfr), GFP_KERNEL);
 212        if (!qfr)
 213                return -ENOMEM;
 214
 215        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
 216        vma->vm_ops = &uacce_vm_ops;
 217        vma->vm_private_data = q;
 218        qfr->type = type;
 219
 220        mutex_lock(&uacce_mutex);
 221
 222        if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
 223                ret = -EINVAL;
 224                goto out_with_lock;
 225        }
 226
 227        if (q->qfrs[type]) {
 228                ret = -EEXIST;
 229                goto out_with_lock;
 230        }
 231
 232        switch (type) {
 233        case UACCE_QFRT_MMIO:
 234                if (!uacce->ops->mmap) {
 235                        ret = -EINVAL;
 236                        goto out_with_lock;
 237                }
 238
 239                ret = uacce->ops->mmap(q, vma, qfr);
 240                if (ret)
 241                        goto out_with_lock;
 242
 243                break;
 244
 245        case UACCE_QFRT_DUS:
 246                if (!uacce->ops->mmap) {
 247                        ret = -EINVAL;
 248                        goto out_with_lock;
 249                }
 250
 251                ret = uacce->ops->mmap(q, vma, qfr);
 252                if (ret)
 253                        goto out_with_lock;
 254                break;
 255
 256        default:
 257                ret = -EINVAL;
 258                goto out_with_lock;
 259        }
 260
 261        q->qfrs[type] = qfr;
 262        mutex_unlock(&uacce_mutex);
 263
 264        return ret;
 265
 266out_with_lock:
 267        mutex_unlock(&uacce_mutex);
 268        kfree(qfr);
 269        return ret;
 270}
 271
 272static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
 273{
 274        struct uacce_queue *q = file->private_data;
 275        struct uacce_device *uacce = q->uacce;
 276
 277        poll_wait(file, &q->wait, wait);
 278        if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
 279                return EPOLLIN | EPOLLRDNORM;
 280
 281        return 0;
 282}
 283
 284static const struct file_operations uacce_fops = {
 285        .owner          = THIS_MODULE,
 286        .open           = uacce_fops_open,
 287        .release        = uacce_fops_release,
 288        .unlocked_ioctl = uacce_fops_unl_ioctl,
 289#ifdef CONFIG_COMPAT
 290        .compat_ioctl   = uacce_fops_compat_ioctl,
 291#endif
 292        .mmap           = uacce_fops_mmap,
 293        .poll           = uacce_fops_poll,
 294};
 295
 296#define to_uacce_device(dev) container_of(dev, struct uacce_device, dev)
 297
 298static ssize_t api_show(struct device *dev,
 299                        struct device_attribute *attr, char *buf)
 300{
 301        struct uacce_device *uacce = to_uacce_device(dev);
 302
 303        return sprintf(buf, "%s\n", uacce->api_ver);
 304}
 305
 306static ssize_t flags_show(struct device *dev,
 307                          struct device_attribute *attr, char *buf)
 308{
 309        struct uacce_device *uacce = to_uacce_device(dev);
 310
 311        return sprintf(buf, "%u\n", uacce->flags);
 312}
 313
 314static ssize_t available_instances_show(struct device *dev,
 315                                        struct device_attribute *attr,
 316                                        char *buf)
 317{
 318        struct uacce_device *uacce = to_uacce_device(dev);
 319
 320        if (!uacce->ops->get_available_instances)
 321                return -ENODEV;
 322
 323        return sprintf(buf, "%d\n",
 324                       uacce->ops->get_available_instances(uacce));
 325}
 326
 327static ssize_t algorithms_show(struct device *dev,
 328                               struct device_attribute *attr, char *buf)
 329{
 330        struct uacce_device *uacce = to_uacce_device(dev);
 331
 332        return sprintf(buf, "%s\n", uacce->algs);
 333}
 334
 335static ssize_t region_mmio_size_show(struct device *dev,
 336                                     struct device_attribute *attr, char *buf)
 337{
 338        struct uacce_device *uacce = to_uacce_device(dev);
 339
 340        return sprintf(buf, "%lu\n",
 341                       uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
 342}
 343
 344static ssize_t region_dus_size_show(struct device *dev,
 345                                    struct device_attribute *attr, char *buf)
 346{
 347        struct uacce_device *uacce = to_uacce_device(dev);
 348
 349        return sprintf(buf, "%lu\n",
 350                       uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
 351}
 352
 353static DEVICE_ATTR_RO(api);
 354static DEVICE_ATTR_RO(flags);
 355static DEVICE_ATTR_RO(available_instances);
 356static DEVICE_ATTR_RO(algorithms);
 357static DEVICE_ATTR_RO(region_mmio_size);
 358static DEVICE_ATTR_RO(region_dus_size);
 359
 360static struct attribute *uacce_dev_attrs[] = {
 361        &dev_attr_api.attr,
 362        &dev_attr_flags.attr,
 363        &dev_attr_available_instances.attr,
 364        &dev_attr_algorithms.attr,
 365        &dev_attr_region_mmio_size.attr,
 366        &dev_attr_region_dus_size.attr,
 367        NULL,
 368};
 369
 370static umode_t uacce_dev_is_visible(struct kobject *kobj,
 371                                    struct attribute *attr, int n)
 372{
 373        struct device *dev = container_of(kobj, struct device, kobj);
 374        struct uacce_device *uacce = to_uacce_device(dev);
 375
 376        if (((attr == &dev_attr_region_mmio_size.attr) &&
 377            (!uacce->qf_pg_num[UACCE_QFRT_MMIO])) ||
 378            ((attr == &dev_attr_region_dus_size.attr) &&
 379            (!uacce->qf_pg_num[UACCE_QFRT_DUS])))
 380                return 0;
 381
 382        return attr->mode;
 383}
 384
 385static struct attribute_group uacce_dev_group = {
 386        .is_visible     = uacce_dev_is_visible,
 387        .attrs          = uacce_dev_attrs,
 388};
 389
 390__ATTRIBUTE_GROUPS(uacce_dev);
 391
 392static void uacce_release(struct device *dev)
 393{
 394        struct uacce_device *uacce = to_uacce_device(dev);
 395
 396        kfree(uacce);
 397}
 398
 399/**
 400 * uacce_alloc() - alloc an accelerator
 401 * @parent: pointer of uacce parent device
 402 * @interface: pointer of uacce_interface for register
 403 *
 404 * Returns uacce pointer if success and ERR_PTR if not
 405 * Need check returned negotiated uacce->flags
 406 */
 407struct uacce_device *uacce_alloc(struct device *parent,
 408                                 struct uacce_interface *interface)
 409{
 410        unsigned int flags = interface->flags;
 411        struct uacce_device *uacce;
 412        int ret;
 413
 414        uacce = kzalloc(sizeof(struct uacce_device), GFP_KERNEL);
 415        if (!uacce)
 416                return ERR_PTR(-ENOMEM);
 417
 418        if (flags & UACCE_DEV_SVA) {
 419                ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
 420                if (ret)
 421                        flags &= ~UACCE_DEV_SVA;
 422        }
 423
 424        uacce->parent = parent;
 425        uacce->flags = flags;
 426        uacce->ops = interface->ops;
 427
 428        ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
 429                       GFP_KERNEL);
 430        if (ret < 0)
 431                goto err_with_uacce;
 432
 433        INIT_LIST_HEAD(&uacce->queues);
 434        mutex_init(&uacce->queues_lock);
 435        device_initialize(&uacce->dev);
 436        uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
 437        uacce->dev.class = uacce_class;
 438        uacce->dev.groups = uacce_dev_groups;
 439        uacce->dev.parent = uacce->parent;
 440        uacce->dev.release = uacce_release;
 441        dev_set_name(&uacce->dev, "%s-%d", interface->name, uacce->dev_id);
 442
 443        return uacce;
 444
 445err_with_uacce:
 446        if (flags & UACCE_DEV_SVA)
 447                iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
 448        kfree(uacce);
 449        return ERR_PTR(ret);
 450}
 451EXPORT_SYMBOL_GPL(uacce_alloc);
 452
 453/**
 454 * uacce_register() - add the accelerator to cdev and export to user space
 455 * @uacce: The initialized uacce device
 456 *
 457 * Return 0 if register succeeded, or an error.
 458 */
 459int uacce_register(struct uacce_device *uacce)
 460{
 461        if (!uacce)
 462                return -ENODEV;
 463
 464        uacce->cdev = cdev_alloc();
 465        if (!uacce->cdev)
 466                return -ENOMEM;
 467
 468        uacce->cdev->ops = &uacce_fops;
 469        uacce->cdev->owner = THIS_MODULE;
 470
 471        return cdev_device_add(uacce->cdev, &uacce->dev);
 472}
 473EXPORT_SYMBOL_GPL(uacce_register);
 474
 475/**
 476 * uacce_remove() - remove the accelerator
 477 * @uacce: the accelerator to remove
 478 */
 479void uacce_remove(struct uacce_device *uacce)
 480{
 481        struct uacce_queue *q, *next_q;
 482
 483        if (!uacce)
 484                return;
 485        /*
 486         * unmap remaining mapping from user space, preventing user still
 487         * access the mmaped area while parent device is already removed
 488         */
 489        if (uacce->inode)
 490                unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
 491
 492        /* ensure no open queue remains */
 493        mutex_lock(&uacce->queues_lock);
 494        list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
 495                uacce_put_queue(q);
 496                uacce_unbind_queue(q);
 497        }
 498        mutex_unlock(&uacce->queues_lock);
 499
 500        /* disable sva now since no opened queues */
 501        if (uacce->flags & UACCE_DEV_SVA)
 502                iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
 503
 504        if (uacce->cdev)
 505                cdev_device_del(uacce->cdev, &uacce->dev);
 506        xa_erase(&uacce_xa, uacce->dev_id);
 507        put_device(&uacce->dev);
 508}
 509EXPORT_SYMBOL_GPL(uacce_remove);
 510
 511static int __init uacce_init(void)
 512{
 513        int ret;
 514
 515        uacce_class = class_create(THIS_MODULE, UACCE_NAME);
 516        if (IS_ERR(uacce_class))
 517                return PTR_ERR(uacce_class);
 518
 519        ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
 520        if (ret)
 521                class_destroy(uacce_class);
 522
 523        return ret;
 524}
 525
 526static __exit void uacce_exit(void)
 527{
 528        unregister_chrdev_region(uacce_devt, MINORMASK);
 529        class_destroy(uacce_class);
 530}
 531
 532subsys_initcall(uacce_init);
 533module_exit(uacce_exit);
 534
 535MODULE_LICENSE("GPL");
 536MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
 537MODULE_DESCRIPTION("Accelerator interface for Userland applications");
 538