linux/drivers/dma-buf/udmabuf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/cred.h>
   3#include <linux/device.h>
   4#include <linux/dma-buf.h>
   5#include <linux/highmem.h>
   6#include <linux/init.h>
   7#include <linux/kernel.h>
   8#include <linux/memfd.h>
   9#include <linux/miscdevice.h>
  10#include <linux/module.h>
  11#include <linux/shmem_fs.h>
  12#include <linux/slab.h>
  13#include <linux/udmabuf.h>
  14#include <linux/hugetlb.h>
  15
  16static int list_limit = 1024;
  17module_param(list_limit, int, 0644);
  18MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
  19
  20static int size_limit_mb = 64;
  21module_param(size_limit_mb, int, 0644);
  22MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
  23
  24struct udmabuf {
  25        pgoff_t pagecount;
  26        struct page **pages;
  27        struct sg_table *sg;
  28        struct miscdevice *device;
  29};
  30
  31static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
  32{
  33        struct vm_area_struct *vma = vmf->vma;
  34        struct udmabuf *ubuf = vma->vm_private_data;
  35
  36        vmf->page = ubuf->pages[vmf->pgoff];
  37        get_page(vmf->page);
  38        return 0;
  39}
  40
  41static const struct vm_operations_struct udmabuf_vm_ops = {
  42        .fault = udmabuf_vm_fault,
  43};
  44
  45static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
  46{
  47        struct udmabuf *ubuf = buf->priv;
  48
  49        if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
  50                return -EINVAL;
  51
  52        vma->vm_ops = &udmabuf_vm_ops;
  53        vma->vm_private_data = ubuf;
  54        return 0;
  55}
  56
  57static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
  58                                     enum dma_data_direction direction)
  59{
  60        struct udmabuf *ubuf = buf->priv;
  61        struct sg_table *sg;
  62        int ret;
  63
  64        sg = kzalloc(sizeof(*sg), GFP_KERNEL);
  65        if (!sg)
  66                return ERR_PTR(-ENOMEM);
  67        ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
  68                                        0, ubuf->pagecount << PAGE_SHIFT,
  69                                        GFP_KERNEL);
  70        if (ret < 0)
  71                goto err;
  72        ret = dma_map_sgtable(dev, sg, direction, 0);
  73        if (ret < 0)
  74                goto err;
  75        return sg;
  76
  77err:
  78        sg_free_table(sg);
  79        kfree(sg);
  80        return ERR_PTR(ret);
  81}
  82
  83static void put_sg_table(struct device *dev, struct sg_table *sg,
  84                         enum dma_data_direction direction)
  85{
  86        dma_unmap_sgtable(dev, sg, direction, 0);
  87        sg_free_table(sg);
  88        kfree(sg);
  89}
  90
  91static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
  92                                    enum dma_data_direction direction)
  93{
  94        return get_sg_table(at->dev, at->dmabuf, direction);
  95}
  96
  97static void unmap_udmabuf(struct dma_buf_attachment *at,
  98                          struct sg_table *sg,
  99                          enum dma_data_direction direction)
 100{
 101        return put_sg_table(at->dev, sg, direction);
 102}
 103
 104static void release_udmabuf(struct dma_buf *buf)
 105{
 106        struct udmabuf *ubuf = buf->priv;
 107        struct device *dev = ubuf->device->this_device;
 108        pgoff_t pg;
 109
 110        if (ubuf->sg)
 111                put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
 112
 113        for (pg = 0; pg < ubuf->pagecount; pg++)
 114                put_page(ubuf->pages[pg]);
 115        kfree(ubuf->pages);
 116        kfree(ubuf);
 117}
 118
 119static int begin_cpu_udmabuf(struct dma_buf *buf,
 120                             enum dma_data_direction direction)
 121{
 122        struct udmabuf *ubuf = buf->priv;
 123        struct device *dev = ubuf->device->this_device;
 124
 125        if (!ubuf->sg) {
 126                ubuf->sg = get_sg_table(dev, buf, direction);
 127                if (IS_ERR(ubuf->sg))
 128                        return PTR_ERR(ubuf->sg);
 129        } else {
 130                dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
 131                                    direction);
 132        }
 133
 134        return 0;
 135}
 136
 137static int end_cpu_udmabuf(struct dma_buf *buf,
 138                           enum dma_data_direction direction)
 139{
 140        struct udmabuf *ubuf = buf->priv;
 141        struct device *dev = ubuf->device->this_device;
 142
 143        if (!ubuf->sg)
 144                return -EINVAL;
 145
 146        dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
 147        return 0;
 148}
 149
 150static const struct dma_buf_ops udmabuf_ops = {
 151        .cache_sgt_mapping = true,
 152        .map_dma_buf       = map_udmabuf,
 153        .unmap_dma_buf     = unmap_udmabuf,
 154        .release           = release_udmabuf,
 155        .mmap              = mmap_udmabuf,
 156        .begin_cpu_access  = begin_cpu_udmabuf,
 157        .end_cpu_access    = end_cpu_udmabuf,
 158};
 159
 160#define SEALS_WANTED (F_SEAL_SHRINK)
 161#define SEALS_DENIED (F_SEAL_WRITE)
 162
 163static long udmabuf_create(struct miscdevice *device,
 164                           struct udmabuf_create_list *head,
 165                           struct udmabuf_create_item *list)
 166{
 167        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 168        struct file *memfd = NULL;
 169        struct address_space *mapping = NULL;
 170        struct udmabuf *ubuf;
 171        struct dma_buf *buf;
 172        pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
 173        struct page *page, *hpage = NULL;
 174        pgoff_t subpgoff, maxsubpgs;
 175        struct hstate *hpstate;
 176        int seals, ret = -EINVAL;
 177        u32 i, flags;
 178
 179        ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
 180        if (!ubuf)
 181                return -ENOMEM;
 182
 183        pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
 184        for (i = 0; i < head->count; i++) {
 185                if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
 186                        goto err;
 187                if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
 188                        goto err;
 189                ubuf->pagecount += list[i].size >> PAGE_SHIFT;
 190                if (ubuf->pagecount > pglimit)
 191                        goto err;
 192        }
 193        ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
 194                                    GFP_KERNEL);
 195        if (!ubuf->pages) {
 196                ret = -ENOMEM;
 197                goto err;
 198        }
 199
 200        pgbuf = 0;
 201        for (i = 0; i < head->count; i++) {
 202                ret = -EBADFD;
 203                memfd = fget(list[i].memfd);
 204                if (!memfd)
 205                        goto err;
 206                mapping = file_inode(memfd)->i_mapping;
 207                if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
 208                        goto err;
 209                seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
 210                if (seals == -EINVAL)
 211                        goto err;
 212                ret = -EINVAL;
 213                if ((seals & SEALS_WANTED) != SEALS_WANTED ||
 214                    (seals & SEALS_DENIED) != 0)
 215                        goto err;
 216                pgoff = list[i].offset >> PAGE_SHIFT;
 217                pgcnt = list[i].size   >> PAGE_SHIFT;
 218                if (is_file_hugepages(memfd)) {
 219                        hpstate = hstate_file(memfd);
 220                        pgoff = list[i].offset >> huge_page_shift(hpstate);
 221                        subpgoff = (list[i].offset &
 222                                    ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
 223                        maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
 224                }
 225                for (pgidx = 0; pgidx < pgcnt; pgidx++) {
 226                        if (is_file_hugepages(memfd)) {
 227                                if (!hpage) {
 228                                        hpage = find_get_page_flags(mapping, pgoff,
 229                                                                    FGP_ACCESSED);
 230                                        if (!hpage) {
 231                                                ret = -EINVAL;
 232                                                goto err;
 233                                        }
 234                                }
 235                                page = hpage + subpgoff;
 236                                get_page(page);
 237                                subpgoff++;
 238                                if (subpgoff == maxsubpgs) {
 239                                        put_page(hpage);
 240                                        hpage = NULL;
 241                                        subpgoff = 0;
 242                                        pgoff++;
 243                                }
 244                        } else {
 245                                page = shmem_read_mapping_page(mapping,
 246                                                               pgoff + pgidx);
 247                                if (IS_ERR(page)) {
 248                                        ret = PTR_ERR(page);
 249                                        goto err;
 250                                }
 251                        }
 252                        ubuf->pages[pgbuf++] = page;
 253                }
 254                fput(memfd);
 255                memfd = NULL;
 256                if (hpage) {
 257                        put_page(hpage);
 258                        hpage = NULL;
 259                }
 260        }
 261
 262        exp_info.ops  = &udmabuf_ops;
 263        exp_info.size = ubuf->pagecount << PAGE_SHIFT;
 264        exp_info.priv = ubuf;
 265        exp_info.flags = O_RDWR;
 266
 267        ubuf->device = device;
 268        buf = dma_buf_export(&exp_info);
 269        if (IS_ERR(buf)) {
 270                ret = PTR_ERR(buf);
 271                goto err;
 272        }
 273
 274        flags = 0;
 275        if (head->flags & UDMABUF_FLAGS_CLOEXEC)
 276                flags |= O_CLOEXEC;
 277        return dma_buf_fd(buf, flags);
 278
 279err:
 280        while (pgbuf > 0)
 281                put_page(ubuf->pages[--pgbuf]);
 282        if (memfd)
 283                fput(memfd);
 284        kfree(ubuf->pages);
 285        kfree(ubuf);
 286        return ret;
 287}
 288
 289static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
 290{
 291        struct udmabuf_create create;
 292        struct udmabuf_create_list head;
 293        struct udmabuf_create_item list;
 294
 295        if (copy_from_user(&create, (void __user *)arg,
 296                           sizeof(create)))
 297                return -EFAULT;
 298
 299        head.flags  = create.flags;
 300        head.count  = 1;
 301        list.memfd  = create.memfd;
 302        list.offset = create.offset;
 303        list.size   = create.size;
 304
 305        return udmabuf_create(filp->private_data, &head, &list);
 306}
 307
 308static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
 309{
 310        struct udmabuf_create_list head;
 311        struct udmabuf_create_item *list;
 312        int ret = -EINVAL;
 313        u32 lsize;
 314
 315        if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
 316                return -EFAULT;
 317        if (head.count > list_limit)
 318                return -EINVAL;
 319        lsize = sizeof(struct udmabuf_create_item) * head.count;
 320        list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
 321        if (IS_ERR(list))
 322                return PTR_ERR(list);
 323
 324        ret = udmabuf_create(filp->private_data, &head, list);
 325        kfree(list);
 326        return ret;
 327}
 328
 329static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
 330                          unsigned long arg)
 331{
 332        long ret;
 333
 334        switch (ioctl) {
 335        case UDMABUF_CREATE:
 336                ret = udmabuf_ioctl_create(filp, arg);
 337                break;
 338        case UDMABUF_CREATE_LIST:
 339                ret = udmabuf_ioctl_create_list(filp, arg);
 340                break;
 341        default:
 342                ret = -ENOTTY;
 343                break;
 344        }
 345        return ret;
 346}
 347
 348static const struct file_operations udmabuf_fops = {
 349        .owner          = THIS_MODULE,
 350        .unlocked_ioctl = udmabuf_ioctl,
 351#ifdef CONFIG_COMPAT
 352        .compat_ioctl   = udmabuf_ioctl,
 353#endif
 354};
 355
 356static struct miscdevice udmabuf_misc = {
 357        .minor          = MISC_DYNAMIC_MINOR,
 358        .name           = "udmabuf",
 359        .fops           = &udmabuf_fops,
 360};
 361
 362static int __init udmabuf_dev_init(void)
 363{
 364        return misc_register(&udmabuf_misc);
 365}
 366
 367static void __exit udmabuf_dev_exit(void)
 368{
 369        misc_deregister(&udmabuf_misc);
 370}
 371
 372module_init(udmabuf_dev_init)
 373module_exit(udmabuf_dev_exit)
 374
 375MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
 376MODULE_LICENSE("GPL v2");
 377