linux/drivers/xen/gntdev-dmabuf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Xen dma-buf functionality for gntdev.
   5 *
   6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
   7 *
   8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/dma-buf.h>
  14#include <linux/slab.h>
  15#include <linux/types.h>
  16#include <linux/uaccess.h>
  17
  18#include <xen/xen.h>
  19#include <xen/grant_table.h>
  20
  21#include "gntdev-common.h"
  22#include "gntdev-dmabuf.h"
  23
  24#ifndef GRANT_INVALID_REF
  25/*
  26 * Note on usage of grant reference 0 as invalid grant reference:
  27 * grant reference 0 is valid, but never exposed to a driver,
  28 * because of the fact it is already in use/reserved by the PV console.
  29 */
  30#define GRANT_INVALID_REF       0
  31#endif
  32
  33struct gntdev_dmabuf {
  34        struct gntdev_dmabuf_priv *priv;
  35        struct dma_buf *dmabuf;
  36        struct list_head next;
  37        int fd;
  38
  39        union {
  40                struct {
  41                        /* Exported buffers are reference counted. */
  42                        struct kref refcount;
  43
  44                        struct gntdev_priv *priv;
  45                        struct gntdev_grant_map *map;
  46                } exp;
  47                struct {
  48                        /* Granted references of the imported buffer. */
  49                        grant_ref_t *refs;
  50                        /* Scatter-gather table of the imported buffer. */
  51                        struct sg_table *sgt;
  52                        /* dma-buf attachment of the imported buffer. */
  53                        struct dma_buf_attachment *attach;
  54                } imp;
  55        } u;
  56
  57        /* Number of pages this buffer has. */
  58        int nr_pages;
  59        /* Pages of this buffer. */
  60        struct page **pages;
  61};
  62
  63struct gntdev_dmabuf_wait_obj {
  64        struct list_head next;
  65        struct gntdev_dmabuf *gntdev_dmabuf;
  66        struct completion completion;
  67};
  68
  69struct gntdev_dmabuf_attachment {
  70        struct sg_table *sgt;
  71        enum dma_data_direction dir;
  72};
  73
  74struct gntdev_dmabuf_priv {
  75        /* List of exported DMA buffers. */
  76        struct list_head exp_list;
  77        /* List of wait objects. */
  78        struct list_head exp_wait_list;
  79        /* List of imported DMA buffers. */
  80        struct list_head imp_list;
  81        /* This is the lock which protects dma_buf_xxx lists. */
  82        struct mutex lock;
  83        /*
  84         * We reference this file while exporting dma-bufs, so
  85         * the grant device context is not destroyed while there are
  86         * external users alive.
  87         */
  88        struct file *filp;
  89};
  90
  91/* DMA buffer export support. */
  92
  93/* Implementation of wait for exported DMA buffer to be released. */
  94
  95static void dmabuf_exp_release(struct kref *kref);
  96
  97static struct gntdev_dmabuf_wait_obj *
  98dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
  99                        struct gntdev_dmabuf *gntdev_dmabuf)
 100{
 101        struct gntdev_dmabuf_wait_obj *obj;
 102
 103        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 104        if (!obj)
 105                return ERR_PTR(-ENOMEM);
 106
 107        init_completion(&obj->completion);
 108        obj->gntdev_dmabuf = gntdev_dmabuf;
 109
 110        mutex_lock(&priv->lock);
 111        list_add(&obj->next, &priv->exp_wait_list);
 112        /* Put our reference and wait for gntdev_dmabuf's release to fire. */
 113        kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
 114        mutex_unlock(&priv->lock);
 115        return obj;
 116}
 117
 118static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
 119                                     struct gntdev_dmabuf_wait_obj *obj)
 120{
 121        mutex_lock(&priv->lock);
 122        list_del(&obj->next);
 123        mutex_unlock(&priv->lock);
 124        kfree(obj);
 125}
 126
 127static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
 128                                    u32 wait_to_ms)
 129{
 130        if (wait_for_completion_timeout(&obj->completion,
 131                        msecs_to_jiffies(wait_to_ms)) <= 0)
 132                return -ETIMEDOUT;
 133
 134        return 0;
 135}
 136
 137static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
 138                                       struct gntdev_dmabuf *gntdev_dmabuf)
 139{
 140        struct gntdev_dmabuf_wait_obj *obj;
 141
 142        list_for_each_entry(obj, &priv->exp_wait_list, next)
 143                if (obj->gntdev_dmabuf == gntdev_dmabuf) {
 144                        pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
 145                        complete_all(&obj->completion);
 146                        break;
 147                }
 148}
 149
 150static struct gntdev_dmabuf *
 151dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
 152{
 153        struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
 154
 155        mutex_lock(&priv->lock);
 156        list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
 157                if (gntdev_dmabuf->fd == fd) {
 158                        pr_debug("Found gntdev_dmabuf in the wait list\n");
 159                        kref_get(&gntdev_dmabuf->u.exp.refcount);
 160                        ret = gntdev_dmabuf;
 161                        break;
 162                }
 163        mutex_unlock(&priv->lock);
 164        return ret;
 165}
 166
 167static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
 168                                    int wait_to_ms)
 169{
 170        struct gntdev_dmabuf *gntdev_dmabuf;
 171        struct gntdev_dmabuf_wait_obj *obj;
 172        int ret;
 173
 174        pr_debug("Will wait for dma-buf with fd %d\n", fd);
 175        /*
 176         * Try to find the DMA buffer: if not found means that
 177         * either the buffer has already been released or file descriptor
 178         * provided is wrong.
 179         */
 180        gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
 181        if (IS_ERR(gntdev_dmabuf))
 182                return PTR_ERR(gntdev_dmabuf);
 183
 184        /*
 185         * gntdev_dmabuf still exists and is reference count locked by us now,
 186         * so prepare to wait: allocate wait object and add it to the wait list,
 187         * so we can find it on release.
 188         */
 189        obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
 190        if (IS_ERR(obj))
 191                return PTR_ERR(obj);
 192
 193        ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
 194        dmabuf_exp_wait_obj_free(priv, obj);
 195        return ret;
 196}
 197
 198/* DMA buffer export support. */
 199
 200static struct sg_table *
 201dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
 202{
 203        struct sg_table *sgt;
 204        int ret;
 205
 206        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 207        if (!sgt) {
 208                ret = -ENOMEM;
 209                goto out;
 210        }
 211
 212        ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
 213                                        nr_pages << PAGE_SHIFT,
 214                                        GFP_KERNEL);
 215        if (ret)
 216                goto out;
 217
 218        return sgt;
 219
 220out:
 221        kfree(sgt);
 222        return ERR_PTR(ret);
 223}
 224
 225static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
 226                                 struct dma_buf_attachment *attach)
 227{
 228        struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
 229
 230        gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
 231                                       GFP_KERNEL);
 232        if (!gntdev_dmabuf_attach)
 233                return -ENOMEM;
 234
 235        gntdev_dmabuf_attach->dir = DMA_NONE;
 236        attach->priv = gntdev_dmabuf_attach;
 237        return 0;
 238}
 239
 240static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
 241                                  struct dma_buf_attachment *attach)
 242{
 243        struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
 244
 245        if (gntdev_dmabuf_attach) {
 246                struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
 247
 248                if (sgt) {
 249                        if (gntdev_dmabuf_attach->dir != DMA_NONE)
 250                                dma_unmap_sg_attrs(attach->dev, sgt->sgl,
 251                                                   sgt->nents,
 252                                                   gntdev_dmabuf_attach->dir,
 253                                                   DMA_ATTR_SKIP_CPU_SYNC);
 254                        sg_free_table(sgt);
 255                }
 256
 257                kfree(sgt);
 258                kfree(gntdev_dmabuf_attach);
 259                attach->priv = NULL;
 260        }
 261}
 262
 263static struct sg_table *
 264dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
 265                           enum dma_data_direction dir)
 266{
 267        struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
 268        struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
 269        struct sg_table *sgt;
 270
 271        pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
 272                 attach->dev);
 273
 274        if (dir == DMA_NONE || !gntdev_dmabuf_attach)
 275                return ERR_PTR(-EINVAL);
 276
 277        /* Return the cached mapping when possible. */
 278        if (gntdev_dmabuf_attach->dir == dir)
 279                return gntdev_dmabuf_attach->sgt;
 280
 281        /*
 282         * Two mappings with different directions for the same attachment are
 283         * not allowed.
 284         */
 285        if (gntdev_dmabuf_attach->dir != DMA_NONE)
 286                return ERR_PTR(-EBUSY);
 287
 288        sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
 289                                  gntdev_dmabuf->nr_pages);
 290        if (!IS_ERR(sgt)) {
 291                if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
 292                                      DMA_ATTR_SKIP_CPU_SYNC)) {
 293                        sg_free_table(sgt);
 294                        kfree(sgt);
 295                        sgt = ERR_PTR(-ENOMEM);
 296                } else {
 297                        gntdev_dmabuf_attach->sgt = sgt;
 298                        gntdev_dmabuf_attach->dir = dir;
 299                }
 300        }
 301        if (IS_ERR(sgt))
 302                pr_debug("Failed to map sg table for dev %p\n", attach->dev);
 303        return sgt;
 304}
 305
 306static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
 307                                         struct sg_table *sgt,
 308                                         enum dma_data_direction dir)
 309{
 310        /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
 311}
 312
 313static void dmabuf_exp_release(struct kref *kref)
 314{
 315        struct gntdev_dmabuf *gntdev_dmabuf =
 316                container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
 317
 318        dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
 319        list_del(&gntdev_dmabuf->next);
 320        fput(gntdev_dmabuf->priv->filp);
 321        kfree(gntdev_dmabuf);
 322}
 323
 324static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
 325                                  struct gntdev_grant_map *map)
 326{
 327        mutex_lock(&priv->lock);
 328        list_del(&map->next);
 329        gntdev_put_map(NULL /* already removed */, map);
 330        mutex_unlock(&priv->lock);
 331}
 332
 333static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
 334{
 335        struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
 336        struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
 337
 338        dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
 339                              gntdev_dmabuf->u.exp.map);
 340        mutex_lock(&priv->lock);
 341        kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
 342        mutex_unlock(&priv->lock);
 343}
 344
 345static const struct dma_buf_ops dmabuf_exp_ops =  {
 346        .attach = dmabuf_exp_ops_attach,
 347        .detach = dmabuf_exp_ops_detach,
 348        .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
 349        .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
 350        .release = dmabuf_exp_ops_release,
 351};
 352
 353struct gntdev_dmabuf_export_args {
 354        struct gntdev_priv *priv;
 355        struct gntdev_grant_map *map;
 356        struct gntdev_dmabuf_priv *dmabuf_priv;
 357        struct device *dev;
 358        int count;
 359        struct page **pages;
 360        u32 fd;
 361};
 362
 363static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
 364{
 365        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 366        struct gntdev_dmabuf *gntdev_dmabuf;
 367        int ret;
 368
 369        gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
 370        if (!gntdev_dmabuf)
 371                return -ENOMEM;
 372
 373        kref_init(&gntdev_dmabuf->u.exp.refcount);
 374
 375        gntdev_dmabuf->priv = args->dmabuf_priv;
 376        gntdev_dmabuf->nr_pages = args->count;
 377        gntdev_dmabuf->pages = args->pages;
 378        gntdev_dmabuf->u.exp.priv = args->priv;
 379        gntdev_dmabuf->u.exp.map = args->map;
 380
 381        exp_info.exp_name = KBUILD_MODNAME;
 382        if (args->dev->driver && args->dev->driver->owner)
 383                exp_info.owner = args->dev->driver->owner;
 384        else
 385                exp_info.owner = THIS_MODULE;
 386        exp_info.ops = &dmabuf_exp_ops;
 387        exp_info.size = args->count << PAGE_SHIFT;
 388        exp_info.flags = O_RDWR;
 389        exp_info.priv = gntdev_dmabuf;
 390
 391        gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
 392        if (IS_ERR(gntdev_dmabuf->dmabuf)) {
 393                ret = PTR_ERR(gntdev_dmabuf->dmabuf);
 394                gntdev_dmabuf->dmabuf = NULL;
 395                goto fail;
 396        }
 397
 398        ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
 399        if (ret < 0)
 400                goto fail;
 401
 402        gntdev_dmabuf->fd = ret;
 403        args->fd = ret;
 404
 405        pr_debug("Exporting DMA buffer with fd %d\n", ret);
 406
 407        mutex_lock(&args->dmabuf_priv->lock);
 408        list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
 409        mutex_unlock(&args->dmabuf_priv->lock);
 410        get_file(gntdev_dmabuf->priv->filp);
 411        return 0;
 412
 413fail:
 414        if (gntdev_dmabuf->dmabuf)
 415                dma_buf_put(gntdev_dmabuf->dmabuf);
 416        kfree(gntdev_dmabuf);
 417        return ret;
 418}
 419
 420static struct gntdev_grant_map *
 421dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
 422                                 int count)
 423{
 424        struct gntdev_grant_map *map;
 425
 426        if (unlikely(gntdev_test_page_count(count)))
 427                return ERR_PTR(-EINVAL);
 428
 429        if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
 430            (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
 431                pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
 432                return ERR_PTR(-EINVAL);
 433        }
 434
 435        map = gntdev_alloc_map(priv, count, dmabuf_flags);
 436        if (!map)
 437                return ERR_PTR(-ENOMEM);
 438
 439        return map;
 440}
 441
 442static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
 443                                int count, u32 domid, u32 *refs, u32 *fd)
 444{
 445        struct gntdev_grant_map *map;
 446        struct gntdev_dmabuf_export_args args;
 447        int i, ret;
 448
 449        map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
 450        if (IS_ERR(map))
 451                return PTR_ERR(map);
 452
 453        for (i = 0; i < count; i++) {
 454                map->grants[i].domid = domid;
 455                map->grants[i].ref = refs[i];
 456        }
 457
 458        mutex_lock(&priv->lock);
 459        gntdev_add_map(priv, map);
 460        mutex_unlock(&priv->lock);
 461
 462        map->flags |= GNTMAP_host_map;
 463#if defined(CONFIG_X86)
 464        map->flags |= GNTMAP_device_map;
 465#endif
 466
 467        ret = gntdev_map_grant_pages(map);
 468        if (ret < 0)
 469                goto out;
 470
 471        args.priv = priv;
 472        args.map = map;
 473        args.dev = priv->dma_dev;
 474        args.dmabuf_priv = priv->dmabuf_priv;
 475        args.count = map->count;
 476        args.pages = map->pages;
 477        args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
 478
 479        ret = dmabuf_exp_from_pages(&args);
 480        if (ret < 0)
 481                goto out;
 482
 483        *fd = args.fd;
 484        return 0;
 485
 486out:
 487        dmabuf_exp_remove_map(priv, map);
 488        return ret;
 489}
 490
 491/* DMA buffer import support. */
 492
 493static int
 494dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
 495                                int count, int domid)
 496{
 497        grant_ref_t priv_gref_head;
 498        int i, ret;
 499
 500        ret = gnttab_alloc_grant_references(count, &priv_gref_head);
 501        if (ret < 0) {
 502                pr_debug("Cannot allocate grant references, ret %d\n", ret);
 503                return ret;
 504        }
 505
 506        for (i = 0; i < count; i++) {
 507                int cur_ref;
 508
 509                cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
 510                if (cur_ref < 0) {
 511                        ret = cur_ref;
 512                        pr_debug("Cannot claim grant reference, ret %d\n", ret);
 513                        goto out;
 514                }
 515
 516                gnttab_grant_foreign_access_ref(cur_ref, domid,
 517                                                xen_page_to_gfn(pages[i]), 0);
 518                refs[i] = cur_ref;
 519        }
 520
 521        return 0;
 522
 523out:
 524        gnttab_free_grant_references(priv_gref_head);
 525        return ret;
 526}
 527
 528static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
 529{
 530        int i;
 531
 532        for (i = 0; i < count; i++)
 533                if (refs[i] != GRANT_INVALID_REF)
 534                        gnttab_end_foreign_access(refs[i], 0, 0UL);
 535}
 536
 537static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
 538{
 539        kfree(gntdev_dmabuf->pages);
 540        kfree(gntdev_dmabuf->u.imp.refs);
 541        kfree(gntdev_dmabuf);
 542}
 543
 544static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
 545{
 546        struct gntdev_dmabuf *gntdev_dmabuf;
 547        int i;
 548
 549        gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
 550        if (!gntdev_dmabuf)
 551                goto fail_no_free;
 552
 553        gntdev_dmabuf->u.imp.refs = kcalloc(count,
 554                                            sizeof(gntdev_dmabuf->u.imp.refs[0]),
 555                                            GFP_KERNEL);
 556        if (!gntdev_dmabuf->u.imp.refs)
 557                goto fail;
 558
 559        gntdev_dmabuf->pages = kcalloc(count,
 560                                       sizeof(gntdev_dmabuf->pages[0]),
 561                                       GFP_KERNEL);
 562        if (!gntdev_dmabuf->pages)
 563                goto fail;
 564
 565        gntdev_dmabuf->nr_pages = count;
 566
 567        for (i = 0; i < count; i++)
 568                gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
 569
 570        return gntdev_dmabuf;
 571
 572fail:
 573        dmabuf_imp_free_storage(gntdev_dmabuf);
 574fail_no_free:
 575        return ERR_PTR(-ENOMEM);
 576}
 577
 578static struct gntdev_dmabuf *
 579dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 580                   int fd, int count, int domid)
 581{
 582        struct gntdev_dmabuf *gntdev_dmabuf, *ret;
 583        struct dma_buf *dma_buf;
 584        struct dma_buf_attachment *attach;
 585        struct sg_table *sgt;
 586        struct sg_page_iter sg_iter;
 587        int i;
 588
 589        dma_buf = dma_buf_get(fd);
 590        if (IS_ERR(dma_buf))
 591                return ERR_CAST(dma_buf);
 592
 593        gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
 594        if (IS_ERR(gntdev_dmabuf)) {
 595                ret = gntdev_dmabuf;
 596                goto fail_put;
 597        }
 598
 599        gntdev_dmabuf->priv = priv;
 600        gntdev_dmabuf->fd = fd;
 601
 602        attach = dma_buf_attach(dma_buf, dev);
 603        if (IS_ERR(attach)) {
 604                ret = ERR_CAST(attach);
 605                goto fail_free_obj;
 606        }
 607
 608        gntdev_dmabuf->u.imp.attach = attach;
 609
 610        sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
 611        if (IS_ERR(sgt)) {
 612                ret = ERR_CAST(sgt);
 613                goto fail_detach;
 614        }
 615
 616        /* Check number of pages that imported buffer has. */
 617        if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
 618                ret = ERR_PTR(-EINVAL);
 619                pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
 620                         attach->dmabuf->size, gntdev_dmabuf->nr_pages);
 621                goto fail_unmap;
 622        }
 623
 624        gntdev_dmabuf->u.imp.sgt = sgt;
 625
 626        /* Now convert sgt to array of pages and check for page validity. */
 627        i = 0;
 628        for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
 629                struct page *page = sg_page_iter_page(&sg_iter);
 630                /*
 631                 * Check if page is valid: this can happen if we are given
 632                 * a page from VRAM or other resources which are not backed
 633                 * by a struct page.
 634                 */
 635                if (!pfn_valid(page_to_pfn(page))) {
 636                        ret = ERR_PTR(-EINVAL);
 637                        goto fail_unmap;
 638                }
 639
 640                gntdev_dmabuf->pages[i++] = page;
 641        }
 642
 643        ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
 644                                                      gntdev_dmabuf->u.imp.refs,
 645                                                      count, domid));
 646        if (IS_ERR(ret))
 647                goto fail_end_access;
 648
 649        pr_debug("Imported DMA buffer with fd %d\n", fd);
 650
 651        mutex_lock(&priv->lock);
 652        list_add(&gntdev_dmabuf->next, &priv->imp_list);
 653        mutex_unlock(&priv->lock);
 654
 655        return gntdev_dmabuf;
 656
 657fail_end_access:
 658        dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
 659fail_unmap:
 660        dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
 661fail_detach:
 662        dma_buf_detach(dma_buf, attach);
 663fail_free_obj:
 664        dmabuf_imp_free_storage(gntdev_dmabuf);
 665fail_put:
 666        dma_buf_put(dma_buf);
 667        return ret;
 668}
 669
 670/*
 671 * Find the hyper dma-buf by its file descriptor and remove
 672 * it from the buffer's list.
 673 */
 674static struct gntdev_dmabuf *
 675dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
 676{
 677        struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
 678
 679        mutex_lock(&priv->lock);
 680        list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
 681                if (gntdev_dmabuf->fd == fd) {
 682                        pr_debug("Found gntdev_dmabuf in the import list\n");
 683                        ret = gntdev_dmabuf;
 684                        list_del(&gntdev_dmabuf->next);
 685                        break;
 686                }
 687        }
 688        mutex_unlock(&priv->lock);
 689        return ret;
 690}
 691
 692static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
 693{
 694        struct gntdev_dmabuf *gntdev_dmabuf;
 695        struct dma_buf_attachment *attach;
 696        struct dma_buf *dma_buf;
 697
 698        gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
 699        if (IS_ERR(gntdev_dmabuf))
 700                return PTR_ERR(gntdev_dmabuf);
 701
 702        pr_debug("Releasing DMA buffer with fd %d\n", fd);
 703
 704        dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
 705                                      gntdev_dmabuf->nr_pages);
 706
 707        attach = gntdev_dmabuf->u.imp.attach;
 708
 709        if (gntdev_dmabuf->u.imp.sgt)
 710                dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
 711                                         DMA_BIDIRECTIONAL);
 712        dma_buf = attach->dmabuf;
 713        dma_buf_detach(attach->dmabuf, attach);
 714        dma_buf_put(dma_buf);
 715
 716        dmabuf_imp_free_storage(gntdev_dmabuf);
 717        return 0;
 718}
 719
 720static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
 721{
 722        struct gntdev_dmabuf *q, *gntdev_dmabuf;
 723
 724        list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
 725                dmabuf_imp_release(priv, gntdev_dmabuf->fd);
 726}
 727
 728/* DMA buffer IOCTL support. */
 729
 730long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
 731                                       struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
 732{
 733        struct ioctl_gntdev_dmabuf_exp_from_refs op;
 734        u32 *refs;
 735        long ret;
 736
 737        if (use_ptemod) {
 738                pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
 739                         use_ptemod);
 740                return -EINVAL;
 741        }
 742
 743        if (copy_from_user(&op, u, sizeof(op)) != 0)
 744                return -EFAULT;
 745
 746        if (unlikely(gntdev_test_page_count(op.count)))
 747                return -EINVAL;
 748
 749        refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
 750        if (!refs)
 751                return -ENOMEM;
 752
 753        if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
 754                ret = -EFAULT;
 755                goto out;
 756        }
 757
 758        ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
 759                                   op.domid, refs, &op.fd);
 760        if (ret)
 761                goto out;
 762
 763        if (copy_to_user(u, &op, sizeof(op)) != 0)
 764                ret = -EFAULT;
 765
 766out:
 767        kfree(refs);
 768        return ret;
 769}
 770
 771long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
 772                                           struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
 773{
 774        struct ioctl_gntdev_dmabuf_exp_wait_released op;
 775
 776        if (copy_from_user(&op, u, sizeof(op)) != 0)
 777                return -EFAULT;
 778
 779        return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
 780                                        op.wait_to_ms);
 781}
 782
 783long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
 784                                     struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
 785{
 786        struct ioctl_gntdev_dmabuf_imp_to_refs op;
 787        struct gntdev_dmabuf *gntdev_dmabuf;
 788        long ret;
 789
 790        if (copy_from_user(&op, u, sizeof(op)) != 0)
 791                return -EFAULT;
 792
 793        if (unlikely(gntdev_test_page_count(op.count)))
 794                return -EINVAL;
 795
 796        gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
 797                                           priv->dma_dev, op.fd,
 798                                           op.count, op.domid);
 799        if (IS_ERR(gntdev_dmabuf))
 800                return PTR_ERR(gntdev_dmabuf);
 801
 802        if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
 803                         sizeof(*u->refs) * op.count) != 0) {
 804                ret = -EFAULT;
 805                goto out_release;
 806        }
 807        return 0;
 808
 809out_release:
 810        dmabuf_imp_release(priv->dmabuf_priv, op.fd);
 811        return ret;
 812}
 813
 814long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
 815                                     struct ioctl_gntdev_dmabuf_imp_release __user *u)
 816{
 817        struct ioctl_gntdev_dmabuf_imp_release op;
 818
 819        if (copy_from_user(&op, u, sizeof(op)) != 0)
 820                return -EFAULT;
 821
 822        return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
 823}
 824
 825struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
 826{
 827        struct gntdev_dmabuf_priv *priv;
 828
 829        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 830        if (!priv)
 831                return ERR_PTR(-ENOMEM);
 832
 833        mutex_init(&priv->lock);
 834        INIT_LIST_HEAD(&priv->exp_list);
 835        INIT_LIST_HEAD(&priv->exp_wait_list);
 836        INIT_LIST_HEAD(&priv->imp_list);
 837
 838        priv->filp = filp;
 839
 840        return priv;
 841}
 842
 843void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
 844{
 845        dmabuf_imp_release_all(priv);
 846        kfree(priv);
 847}
 848