linux/drivers/xen/gntdev.c
<<
>>
Prefs
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *           (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  19 */
  20
  21#undef DEBUG
  22
  23#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  24
  25#include <linux/dma-mapping.h>
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/init.h>
  29#include <linux/miscdevice.h>
  30#include <linux/fs.h>
  31#include <linux/uaccess.h>
  32#include <linux/sched.h>
  33#include <linux/sched/mm.h>
  34#include <linux/spinlock.h>
  35#include <linux/slab.h>
  36#include <linux/highmem.h>
  37#include <linux/refcount.h>
  38
  39#include <xen/xen.h>
  40#include <xen/grant_table.h>
  41#include <xen/balloon.h>
  42#include <xen/gntdev.h>
  43#include <xen/events.h>
  44#include <xen/page.h>
  45#include <asm/xen/hypervisor.h>
  46#include <asm/xen/hypercall.h>
  47
  48#include "gntdev-common.h"
  49#ifdef CONFIG_XEN_GNTDEV_DMABUF
  50#include "gntdev-dmabuf.h"
  51#endif
  52
  53MODULE_LICENSE("GPL");
  54MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  55              "Gerd Hoffmann <kraxel@redhat.com>");
  56MODULE_DESCRIPTION("User-space granted page access driver");
  57
  58static unsigned int limit = 64*1024;
  59module_param(limit, uint, 0644);
  60MODULE_PARM_DESC(limit,
  61        "Maximum number of grants that may be mapped by one mapping request");
  62
  63static int use_ptemod;
  64
  65static int unmap_grant_pages(struct gntdev_grant_map *map,
  66                             int offset, int pages);
  67
  68static struct miscdevice gntdev_miscdev;
  69
  70/* ------------------------------------------------------------------ */
  71
  72bool gntdev_test_page_count(unsigned int count)
  73{
  74        return !count || count > limit;
  75}
  76
  77static void gntdev_print_maps(struct gntdev_priv *priv,
  78                              char *text, int text_index)
  79{
  80#ifdef DEBUG
  81        struct gntdev_grant_map *map;
  82
  83        pr_debug("%s: maps list (priv %p)\n", __func__, priv);
  84        list_for_each_entry(map, &priv->maps, next)
  85                pr_debug("  index %2d, count %2d %s\n",
  86                       map->index, map->count,
  87                       map->index == text_index && text ? text : "");
  88#endif
  89}
  90
  91static void gntdev_free_map(struct gntdev_grant_map *map)
  92{
  93        if (map == NULL)
  94                return;
  95
  96#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  97        if (map->dma_vaddr) {
  98                struct gnttab_dma_alloc_args args;
  99
 100                args.dev = map->dma_dev;
 101                args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 102                args.nr_pages = map->count;
 103                args.pages = map->pages;
 104                args.frames = map->frames;
 105                args.vaddr = map->dma_vaddr;
 106                args.dev_bus_addr = map->dma_bus_addr;
 107
 108                gnttab_dma_free_pages(&args);
 109        } else
 110#endif
 111        if (map->pages)
 112                gnttab_free_pages(map->count, map->pages);
 113
 114#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 115        kvfree(map->frames);
 116#endif
 117        kvfree(map->pages);
 118        kvfree(map->grants);
 119        kvfree(map->map_ops);
 120        kvfree(map->unmap_ops);
 121        kvfree(map->kmap_ops);
 122        kvfree(map->kunmap_ops);
 123        kfree(map);
 124}
 125
 126struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
 127                                          int dma_flags)
 128{
 129        struct gntdev_grant_map *add;
 130        int i;
 131
 132        add = kzalloc(sizeof(*add), GFP_KERNEL);
 133        if (NULL == add)
 134                return NULL;
 135
 136        add->grants    = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
 137        add->map_ops   = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
 138        add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
 139        add->kmap_ops  = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 140        add->kunmap_ops = kvcalloc(count,
 141                                   sizeof(add->kunmap_ops[0]), GFP_KERNEL);
 142        add->pages     = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 143        if (NULL == add->grants    ||
 144            NULL == add->map_ops   ||
 145            NULL == add->unmap_ops ||
 146            NULL == add->kmap_ops  ||
 147            NULL == add->kunmap_ops ||
 148            NULL == add->pages)
 149                goto err;
 150
 151#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 152        add->dma_flags = dma_flags;
 153
 154        /*
 155         * Check if this mapping is requested to be backed
 156         * by a DMA buffer.
 157         */
 158        if (dma_flags & (GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT)) {
 159                struct gnttab_dma_alloc_args args;
 160
 161                add->frames = kvcalloc(count, sizeof(add->frames[0]),
 162                                       GFP_KERNEL);
 163                if (!add->frames)
 164                        goto err;
 165
 166                /* Remember the device, so we can free DMA memory. */
 167                add->dma_dev = priv->dma_dev;
 168
 169                args.dev = priv->dma_dev;
 170                args.coherent = !!(dma_flags & GNTDEV_DMA_FLAG_COHERENT);
 171                args.nr_pages = count;
 172                args.pages = add->pages;
 173                args.frames = add->frames;
 174
 175                if (gnttab_dma_alloc_pages(&args))
 176                        goto err;
 177
 178                add->dma_vaddr = args.vaddr;
 179                add->dma_bus_addr = args.dev_bus_addr;
 180        } else
 181#endif
 182        if (gnttab_alloc_pages(count, add->pages))
 183                goto err;
 184
 185        for (i = 0; i < count; i++) {
 186                add->map_ops[i].handle = -1;
 187                add->unmap_ops[i].handle = -1;
 188                add->kmap_ops[i].handle = -1;
 189                add->kunmap_ops[i].handle = -1;
 190        }
 191
 192        add->index = 0;
 193        add->count = count;
 194        refcount_set(&add->users, 1);
 195
 196        return add;
 197
 198err:
 199        gntdev_free_map(add);
 200        return NULL;
 201}
 202
 203void gntdev_add_map(struct gntdev_priv *priv, struct gntdev_grant_map *add)
 204{
 205        struct gntdev_grant_map *map;
 206
 207        list_for_each_entry(map, &priv->maps, next) {
 208                if (add->index + add->count < map->index) {
 209                        list_add_tail(&add->next, &map->next);
 210                        goto done;
 211                }
 212                add->index = map->index + map->count;
 213        }
 214        list_add_tail(&add->next, &priv->maps);
 215
 216done:
 217        gntdev_print_maps(priv, "[new]", add->index);
 218}
 219
 220static struct gntdev_grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 221                                                      int index, int count)
 222{
 223        struct gntdev_grant_map *map;
 224
 225        list_for_each_entry(map, &priv->maps, next) {
 226                if (map->index != index)
 227                        continue;
 228                if (count && map->count != count)
 229                        continue;
 230                return map;
 231        }
 232        return NULL;
 233}
 234
 235void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
 236{
 237        if (!map)
 238                return;
 239
 240        if (!refcount_dec_and_test(&map->users))
 241                return;
 242
 243        if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 244                notify_remote_via_evtchn(map->notify.event);
 245                evtchn_put(map->notify.event);
 246        }
 247
 248        if (map->pages && !use_ptemod)
 249                unmap_grant_pages(map, 0, map->count);
 250        gntdev_free_map(map);
 251}
 252
 253/* ------------------------------------------------------------------ */
 254
 255static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
 256{
 257        struct gntdev_grant_map *map = data;
 258        unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 259        int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 260        u64 pte_maddr;
 261
 262        BUG_ON(pgnr >= map->count);
 263        pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 264
 265        /*
 266         * Set the PTE as special to force get_user_pages_fast() fall
 267         * back to the slow path.  If this is not supported as part of
 268         * the grant map, it will be done afterwards.
 269         */
 270        if (xen_feature(XENFEAT_gnttab_map_avail_bits))
 271                flags |= (1 << _GNTMAP_guest_avail0);
 272
 273        gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 274                          map->grants[pgnr].ref,
 275                          map->grants[pgnr].domid);
 276        gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 277                            -1 /* handle */);
 278        return 0;
 279}
 280
 281#ifdef CONFIG_X86
 282static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
 283{
 284        set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
 285        return 0;
 286}
 287#endif
 288
 289int gntdev_map_grant_pages(struct gntdev_grant_map *map)
 290{
 291        int i, err = 0;
 292
 293        if (!use_ptemod) {
 294                /* Note: it could already be mapped */
 295                if (map->map_ops[0].handle != -1)
 296                        return 0;
 297                for (i = 0; i < map->count; i++) {
 298                        unsigned long addr = (unsigned long)
 299                                pfn_to_kaddr(page_to_pfn(map->pages[i]));
 300                        gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 301                                map->grants[i].ref,
 302                                map->grants[i].domid);
 303                        gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 304                                map->flags, -1 /* handle */);
 305                }
 306        } else {
 307                /*
 308                 * Setup the map_ops corresponding to the pte entries pointing
 309                 * to the kernel linear addresses of the struct pages.
 310                 * These ptes are completely different from the user ptes dealt
 311                 * with find_grant_ptes.
 312                 */
 313                for (i = 0; i < map->count; i++) {
 314                        unsigned long address = (unsigned long)
 315                                pfn_to_kaddr(page_to_pfn(map->pages[i]));
 316                        BUG_ON(PageHighMem(map->pages[i]));
 317
 318                        gnttab_set_map_op(&map->kmap_ops[i], address,
 319                                map->flags | GNTMAP_host_map,
 320                                map->grants[i].ref,
 321                                map->grants[i].domid);
 322                        gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 323                                map->flags | GNTMAP_host_map, -1);
 324                }
 325        }
 326
 327        pr_debug("map %d+%d\n", map->index, map->count);
 328        err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 329                        map->pages, map->count);
 330        if (err)
 331                return err;
 332
 333        for (i = 0; i < map->count; i++) {
 334                if (map->map_ops[i].status) {
 335                        err = -EINVAL;
 336                        continue;
 337                }
 338
 339                map->unmap_ops[i].handle = map->map_ops[i].handle;
 340                if (use_ptemod)
 341                        map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 342#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 343                else if (map->dma_vaddr) {
 344                        unsigned long bfn;
 345
 346                        bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
 347                        map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
 348                }
 349#endif
 350        }
 351        return err;
 352}
 353
 354static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 355                               int pages)
 356{
 357        int i, err = 0;
 358        struct gntab_unmap_queue_data unmap_data;
 359
 360        if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 361                int pgno = (map->notify.addr >> PAGE_SHIFT);
 362                if (pgno >= offset && pgno < offset + pages) {
 363                        /* No need for kmap, pages are in lowmem */
 364                        uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 365                        tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 366                        map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 367                }
 368        }
 369
 370        unmap_data.unmap_ops = map->unmap_ops + offset;
 371        unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 372        unmap_data.pages = map->pages + offset;
 373        unmap_data.count = pages;
 374
 375        err = gnttab_unmap_refs_sync(&unmap_data);
 376        if (err)
 377                return err;
 378
 379        for (i = 0; i < pages; i++) {
 380                if (map->unmap_ops[offset+i].status)
 381                        err = -EINVAL;
 382                pr_debug("unmap handle=%d st=%d\n",
 383                        map->unmap_ops[offset+i].handle,
 384                        map->unmap_ops[offset+i].status);
 385                map->unmap_ops[offset+i].handle = -1;
 386        }
 387        return err;
 388}
 389
 390static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
 391                             int pages)
 392{
 393        int range, err = 0;
 394
 395        pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 396
 397        /* It is possible the requested range will have a "hole" where we
 398         * already unmapped some of the grants. Only unmap valid ranges.
 399         */
 400        while (pages && !err) {
 401                while (pages && map->unmap_ops[offset].handle == -1) {
 402                        offset++;
 403                        pages--;
 404                }
 405                range = 0;
 406                while (range < pages) {
 407                        if (map->unmap_ops[offset+range].handle == -1)
 408                                break;
 409                        range++;
 410                }
 411                err = __unmap_grant_pages(map, offset, range);
 412                offset += range;
 413                pages -= range;
 414        }
 415
 416        return err;
 417}
 418
 419/* ------------------------------------------------------------------ */
 420
 421static void gntdev_vma_open(struct vm_area_struct *vma)
 422{
 423        struct gntdev_grant_map *map = vma->vm_private_data;
 424
 425        pr_debug("gntdev_vma_open %p\n", vma);
 426        refcount_inc(&map->users);
 427}
 428
 429static void gntdev_vma_close(struct vm_area_struct *vma)
 430{
 431        struct gntdev_grant_map *map = vma->vm_private_data;
 432        struct file *file = vma->vm_file;
 433        struct gntdev_priv *priv = file->private_data;
 434
 435        pr_debug("gntdev_vma_close %p\n", vma);
 436        if (use_ptemod) {
 437                WARN_ON(map->vma != vma);
 438                mmu_interval_notifier_remove(&map->notifier);
 439                map->vma = NULL;
 440        }
 441        vma->vm_private_data = NULL;
 442        gntdev_put_map(priv, map);
 443}
 444
 445static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 446                                                 unsigned long addr)
 447{
 448        struct gntdev_grant_map *map = vma->vm_private_data;
 449
 450        return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 451}
 452
 453static const struct vm_operations_struct gntdev_vmops = {
 454        .open = gntdev_vma_open,
 455        .close = gntdev_vma_close,
 456        .find_special_page = gntdev_vma_find_special_page,
 457};
 458
 459/* ------------------------------------------------------------------ */
 460
 461static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
 462                              const struct mmu_notifier_range *range,
 463                              unsigned long cur_seq)
 464{
 465        struct gntdev_grant_map *map =
 466                container_of(mn, struct gntdev_grant_map, notifier);
 467        unsigned long mstart, mend;
 468        int err;
 469
 470        if (!mmu_notifier_range_blockable(range))
 471                return false;
 472
 473        /*
 474         * If the VMA is split or otherwise changed the notifier is not
 475         * updated, but we don't want to process VA's outside the modified
 476         * VMA. FIXME: It would be much more understandable to just prevent
 477         * modifying the VMA in the first place.
 478         */
 479        if (map->vma->vm_start >= range->end ||
 480            map->vma->vm_end <= range->start)
 481                return true;
 482
 483        mstart = max(range->start, map->vma->vm_start);
 484        mend = min(range->end, map->vma->vm_end);
 485        pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 486                        map->index, map->count,
 487                        map->vma->vm_start, map->vma->vm_end,
 488                        range->start, range->end, mstart, mend);
 489        err = unmap_grant_pages(map,
 490                                (mstart - map->vma->vm_start) >> PAGE_SHIFT,
 491                                (mend - mstart) >> PAGE_SHIFT);
 492        WARN_ON(err);
 493
 494        return true;
 495}
 496
 497static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
 498        .invalidate = gntdev_invalidate,
 499};
 500
 501/* ------------------------------------------------------------------ */
 502
 503static int gntdev_open(struct inode *inode, struct file *flip)
 504{
 505        struct gntdev_priv *priv;
 506
 507        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 508        if (!priv)
 509                return -ENOMEM;
 510
 511        INIT_LIST_HEAD(&priv->maps);
 512        mutex_init(&priv->lock);
 513
 514#ifdef CONFIG_XEN_GNTDEV_DMABUF
 515        priv->dmabuf_priv = gntdev_dmabuf_init(flip);
 516        if (IS_ERR(priv->dmabuf_priv)) {
 517                int ret = PTR_ERR(priv->dmabuf_priv);
 518
 519                kfree(priv);
 520                return ret;
 521        }
 522#endif
 523
 524        flip->private_data = priv;
 525#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 526        priv->dma_dev = gntdev_miscdev.this_device;
 527        dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
 528#endif
 529        pr_debug("priv %p\n", priv);
 530
 531        return 0;
 532}
 533
 534static int gntdev_release(struct inode *inode, struct file *flip)
 535{
 536        struct gntdev_priv *priv = flip->private_data;
 537        struct gntdev_grant_map *map;
 538
 539        pr_debug("priv %p\n", priv);
 540
 541        mutex_lock(&priv->lock);
 542        while (!list_empty(&priv->maps)) {
 543                map = list_entry(priv->maps.next,
 544                                 struct gntdev_grant_map, next);
 545                list_del(&map->next);
 546                gntdev_put_map(NULL /* already removed */, map);
 547        }
 548        mutex_unlock(&priv->lock);
 549
 550#ifdef CONFIG_XEN_GNTDEV_DMABUF
 551        gntdev_dmabuf_fini(priv->dmabuf_priv);
 552#endif
 553
 554        kfree(priv);
 555        return 0;
 556}
 557
 558static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 559                                       struct ioctl_gntdev_map_grant_ref __user *u)
 560{
 561        struct ioctl_gntdev_map_grant_ref op;
 562        struct gntdev_grant_map *map;
 563        int err;
 564
 565        if (copy_from_user(&op, u, sizeof(op)) != 0)
 566                return -EFAULT;
 567        pr_debug("priv %p, add %d\n", priv, op.count);
 568        if (unlikely(gntdev_test_page_count(op.count)))
 569                return -EINVAL;
 570
 571        err = -ENOMEM;
 572        map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */);
 573        if (!map)
 574                return err;
 575
 576        if (copy_from_user(map->grants, &u->refs,
 577                           sizeof(map->grants[0]) * op.count) != 0) {
 578                gntdev_put_map(NULL, map);
 579                return -EFAULT;
 580        }
 581
 582        mutex_lock(&priv->lock);
 583        gntdev_add_map(priv, map);
 584        op.index = map->index << PAGE_SHIFT;
 585        mutex_unlock(&priv->lock);
 586
 587        if (copy_to_user(u, &op, sizeof(op)) != 0)
 588                return -EFAULT;
 589
 590        return 0;
 591}
 592
 593static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 594                                         struct ioctl_gntdev_unmap_grant_ref __user *u)
 595{
 596        struct ioctl_gntdev_unmap_grant_ref op;
 597        struct gntdev_grant_map *map;
 598        int err = -ENOENT;
 599
 600        if (copy_from_user(&op, u, sizeof(op)) != 0)
 601                return -EFAULT;
 602        pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 603
 604        mutex_lock(&priv->lock);
 605        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 606        if (map) {
 607                list_del(&map->next);
 608                err = 0;
 609        }
 610        mutex_unlock(&priv->lock);
 611        if (map)
 612                gntdev_put_map(priv, map);
 613        return err;
 614}
 615
 616static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 617                                              struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 618{
 619        struct ioctl_gntdev_get_offset_for_vaddr op;
 620        struct vm_area_struct *vma;
 621        struct gntdev_grant_map *map;
 622        int rv = -EINVAL;
 623
 624        if (copy_from_user(&op, u, sizeof(op)) != 0)
 625                return -EFAULT;
 626        pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 627
 628        mmap_read_lock(current->mm);
 629        vma = find_vma(current->mm, op.vaddr);
 630        if (!vma || vma->vm_ops != &gntdev_vmops)
 631                goto out_unlock;
 632
 633        map = vma->vm_private_data;
 634        if (!map)
 635                goto out_unlock;
 636
 637        op.offset = map->index << PAGE_SHIFT;
 638        op.count = map->count;
 639        rv = 0;
 640
 641 out_unlock:
 642        mmap_read_unlock(current->mm);
 643
 644        if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 645                return -EFAULT;
 646        return rv;
 647}
 648
 649static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 650{
 651        struct ioctl_gntdev_unmap_notify op;
 652        struct gntdev_grant_map *map;
 653        int rc;
 654        int out_flags;
 655        evtchn_port_t out_event;
 656
 657        if (copy_from_user(&op, u, sizeof(op)))
 658                return -EFAULT;
 659
 660        if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 661                return -EINVAL;
 662
 663        /* We need to grab a reference to the event channel we are going to use
 664         * to send the notify before releasing the reference we may already have
 665         * (if someone has called this ioctl twice). This is required so that
 666         * it is possible to change the clear_byte part of the notification
 667         * without disturbing the event channel part, which may now be the last
 668         * reference to that event channel.
 669         */
 670        if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 671                if (evtchn_get(op.event_channel_port))
 672                        return -EINVAL;
 673        }
 674
 675        out_flags = op.action;
 676        out_event = op.event_channel_port;
 677
 678        mutex_lock(&priv->lock);
 679
 680        list_for_each_entry(map, &priv->maps, next) {
 681                uint64_t begin = map->index << PAGE_SHIFT;
 682                uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 683                if (op.index >= begin && op.index < end)
 684                        goto found;
 685        }
 686        rc = -ENOENT;
 687        goto unlock_out;
 688
 689 found:
 690        if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 691                        (map->flags & GNTMAP_readonly)) {
 692                rc = -EINVAL;
 693                goto unlock_out;
 694        }
 695
 696        out_flags = map->notify.flags;
 697        out_event = map->notify.event;
 698
 699        map->notify.flags = op.action;
 700        map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 701        map->notify.event = op.event_channel_port;
 702
 703        rc = 0;
 704
 705 unlock_out:
 706        mutex_unlock(&priv->lock);
 707
 708        /* Drop the reference to the event channel we did not save in the map */
 709        if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 710                evtchn_put(out_event);
 711
 712        return rc;
 713}
 714
 715#define GNTDEV_COPY_BATCH 16
 716
 717struct gntdev_copy_batch {
 718        struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 719        struct page *pages[GNTDEV_COPY_BATCH];
 720        s16 __user *status[GNTDEV_COPY_BATCH];
 721        unsigned int nr_ops;
 722        unsigned int nr_pages;
 723};
 724
 725static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 726                           bool writeable, unsigned long *gfn)
 727{
 728        unsigned long addr = (unsigned long)virt;
 729        struct page *page;
 730        unsigned long xen_pfn;
 731        int ret;
 732
 733        ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
 734        if (ret < 0)
 735                return ret;
 736
 737        batch->pages[batch->nr_pages++] = page;
 738
 739        xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 740        *gfn = pfn_to_gfn(xen_pfn);
 741
 742        return 0;
 743}
 744
 745static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 746{
 747        unsigned int i;
 748
 749        for (i = 0; i < batch->nr_pages; i++)
 750                put_page(batch->pages[i]);
 751        batch->nr_pages = 0;
 752}
 753
 754static int gntdev_copy(struct gntdev_copy_batch *batch)
 755{
 756        unsigned int i;
 757
 758        gnttab_batch_copy(batch->ops, batch->nr_ops);
 759        gntdev_put_pages(batch);
 760
 761        /*
 762         * For each completed op, update the status if the op failed
 763         * and all previous ops for the segment were successful.
 764         */
 765        for (i = 0; i < batch->nr_ops; i++) {
 766                s16 status = batch->ops[i].status;
 767                s16 old_status;
 768
 769                if (status == GNTST_okay)
 770                        continue;
 771
 772                if (__get_user(old_status, batch->status[i]))
 773                        return -EFAULT;
 774
 775                if (old_status != GNTST_okay)
 776                        continue;
 777
 778                if (__put_user(status, batch->status[i]))
 779                        return -EFAULT;
 780        }
 781
 782        batch->nr_ops = 0;
 783        return 0;
 784}
 785
 786static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 787                                 struct gntdev_grant_copy_segment *seg,
 788                                 s16 __user *status)
 789{
 790        uint16_t copied = 0;
 791
 792        /*
 793         * Disallow local -> local copies since there is only space in
 794         * batch->pages for one page per-op and this would be a very
 795         * expensive memcpy().
 796         */
 797        if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 798                return -EINVAL;
 799
 800        /* Can't cross page if source/dest is a grant ref. */
 801        if (seg->flags & GNTCOPY_source_gref) {
 802                if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 803                        return -EINVAL;
 804        }
 805        if (seg->flags & GNTCOPY_dest_gref) {
 806                if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 807                        return -EINVAL;
 808        }
 809
 810        if (put_user(GNTST_okay, status))
 811                return -EFAULT;
 812
 813        while (copied < seg->len) {
 814                struct gnttab_copy *op;
 815                void __user *virt;
 816                size_t len, off;
 817                unsigned long gfn;
 818                int ret;
 819
 820                if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 821                        ret = gntdev_copy(batch);
 822                        if (ret < 0)
 823                                return ret;
 824                }
 825
 826                len = seg->len - copied;
 827
 828                op = &batch->ops[batch->nr_ops];
 829                op->flags = 0;
 830
 831                if (seg->flags & GNTCOPY_source_gref) {
 832                        op->source.u.ref = seg->source.foreign.ref;
 833                        op->source.domid = seg->source.foreign.domid;
 834                        op->source.offset = seg->source.foreign.offset + copied;
 835                        op->flags |= GNTCOPY_source_gref;
 836                } else {
 837                        virt = seg->source.virt + copied;
 838                        off = (unsigned long)virt & ~XEN_PAGE_MASK;
 839                        len = min(len, (size_t)XEN_PAGE_SIZE - off);
 840
 841                        ret = gntdev_get_page(batch, virt, false, &gfn);
 842                        if (ret < 0)
 843                                return ret;
 844
 845                        op->source.u.gmfn = gfn;
 846                        op->source.domid = DOMID_SELF;
 847                        op->source.offset = off;
 848                }
 849
 850                if (seg->flags & GNTCOPY_dest_gref) {
 851                        op->dest.u.ref = seg->dest.foreign.ref;
 852                        op->dest.domid = seg->dest.foreign.domid;
 853                        op->dest.offset = seg->dest.foreign.offset + copied;
 854                        op->flags |= GNTCOPY_dest_gref;
 855                } else {
 856                        virt = seg->dest.virt + copied;
 857                        off = (unsigned long)virt & ~XEN_PAGE_MASK;
 858                        len = min(len, (size_t)XEN_PAGE_SIZE - off);
 859
 860                        ret = gntdev_get_page(batch, virt, true, &gfn);
 861                        if (ret < 0)
 862                                return ret;
 863
 864                        op->dest.u.gmfn = gfn;
 865                        op->dest.domid = DOMID_SELF;
 866                        op->dest.offset = off;
 867                }
 868
 869                op->len = len;
 870                copied += len;
 871
 872                batch->status[batch->nr_ops] = status;
 873                batch->nr_ops++;
 874        }
 875
 876        return 0;
 877}
 878
 879static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 880{
 881        struct ioctl_gntdev_grant_copy copy;
 882        struct gntdev_copy_batch batch;
 883        unsigned int i;
 884        int ret = 0;
 885
 886        if (copy_from_user(&copy, u, sizeof(copy)))
 887                return -EFAULT;
 888
 889        batch.nr_ops = 0;
 890        batch.nr_pages = 0;
 891
 892        for (i = 0; i < copy.count; i++) {
 893                struct gntdev_grant_copy_segment seg;
 894
 895                if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 896                        ret = -EFAULT;
 897                        goto out;
 898                }
 899
 900                ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 901                if (ret < 0)
 902                        goto out;
 903
 904                cond_resched();
 905        }
 906        if (batch.nr_ops)
 907                ret = gntdev_copy(&batch);
 908        return ret;
 909
 910  out:
 911        gntdev_put_pages(&batch);
 912        return ret;
 913}
 914
 915static long gntdev_ioctl(struct file *flip,
 916                         unsigned int cmd, unsigned long arg)
 917{
 918        struct gntdev_priv *priv = flip->private_data;
 919        void __user *ptr = (void __user *)arg;
 920
 921        switch (cmd) {
 922        case IOCTL_GNTDEV_MAP_GRANT_REF:
 923                return gntdev_ioctl_map_grant_ref(priv, ptr);
 924
 925        case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 926                return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 927
 928        case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 929                return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 930
 931        case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 932                return gntdev_ioctl_notify(priv, ptr);
 933
 934        case IOCTL_GNTDEV_GRANT_COPY:
 935                return gntdev_ioctl_grant_copy(priv, ptr);
 936
 937#ifdef CONFIG_XEN_GNTDEV_DMABUF
 938        case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
 939                return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
 940
 941        case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
 942                return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
 943
 944        case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS:
 945                return gntdev_ioctl_dmabuf_imp_to_refs(priv, ptr);
 946
 947        case IOCTL_GNTDEV_DMABUF_IMP_RELEASE:
 948                return gntdev_ioctl_dmabuf_imp_release(priv, ptr);
 949#endif
 950
 951        default:
 952                pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 953                return -ENOIOCTLCMD;
 954        }
 955
 956        return 0;
 957}
 958
 959static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 960{
 961        struct gntdev_priv *priv = flip->private_data;
 962        int index = vma->vm_pgoff;
 963        int count = vma_pages(vma);
 964        struct gntdev_grant_map *map;
 965        int err = -EINVAL;
 966
 967        if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 968                return -EINVAL;
 969
 970        pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 971                        index, count, vma->vm_start, vma->vm_pgoff);
 972
 973        mutex_lock(&priv->lock);
 974        map = gntdev_find_map_index(priv, index, count);
 975        if (!map)
 976                goto unlock_out;
 977        if (use_ptemod && map->vma)
 978                goto unlock_out;
 979        refcount_inc(&map->users);
 980
 981        vma->vm_ops = &gntdev_vmops;
 982
 983        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
 984
 985        if (use_ptemod)
 986                vma->vm_flags |= VM_DONTCOPY;
 987
 988        vma->vm_private_data = map;
 989        if (map->flags) {
 990                if ((vma->vm_flags & VM_WRITE) &&
 991                                (map->flags & GNTMAP_readonly))
 992                        goto out_unlock_put;
 993        } else {
 994                map->flags = GNTMAP_host_map;
 995                if (!(vma->vm_flags & VM_WRITE))
 996                        map->flags |= GNTMAP_readonly;
 997        }
 998
 999        if (use_ptemod) {
1000                map->vma = vma;
1001                err = mmu_interval_notifier_insert_locked(
1002                        &map->notifier, vma->vm_mm, vma->vm_start,
1003                        vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
1004                if (err)
1005                        goto out_unlock_put;
1006        }
1007        mutex_unlock(&priv->lock);
1008
1009        if (use_ptemod) {
1010                /*
1011                 * gntdev takes the address of the PTE in find_grant_ptes() and
1012                 * passes it to the hypervisor in gntdev_map_grant_pages(). The
1013                 * purpose of the notifier is to prevent the hypervisor pointer
1014                 * to the PTE from going stale.
1015                 *
1016                 * Since this vma's mappings can't be touched without the
1017                 * mmap_lock, and we are holding it now, there is no need for
1018                 * the notifier_range locking pattern.
1019                 */
1020                mmu_interval_read_begin(&map->notifier);
1021
1022                map->pages_vm_start = vma->vm_start;
1023                err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1024                                          vma->vm_end - vma->vm_start,
1025                                          find_grant_ptes, map);
1026                if (err) {
1027                        pr_warn("find_grant_ptes() failure.\n");
1028                        goto out_put_map;
1029                }
1030        }
1031
1032        err = gntdev_map_grant_pages(map);
1033        if (err)
1034                goto out_put_map;
1035
1036        if (!use_ptemod) {
1037                err = vm_map_pages_zero(vma, map->pages, map->count);
1038                if (err)
1039                        goto out_put_map;
1040        } else {
1041#ifdef CONFIG_X86
1042                /*
1043                 * If the PTEs were not made special by the grant map
1044                 * hypercall, do so here.
1045                 *
1046                 * This is racy since the mapping is already visible
1047                 * to userspace but userspace should be well-behaved
1048                 * enough to not touch it until the mmap() call
1049                 * returns.
1050                 */
1051                if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1052                        apply_to_page_range(vma->vm_mm, vma->vm_start,
1053                                            vma->vm_end - vma->vm_start,
1054                                            set_grant_ptes_as_special, NULL);
1055                }
1056#endif
1057        }
1058
1059        return 0;
1060
1061unlock_out:
1062        mutex_unlock(&priv->lock);
1063        return err;
1064
1065out_unlock_put:
1066        mutex_unlock(&priv->lock);
1067out_put_map:
1068        if (use_ptemod) {
1069                unmap_grant_pages(map, 0, map->count);
1070                if (map->vma) {
1071                        mmu_interval_notifier_remove(&map->notifier);
1072                        map->vma = NULL;
1073                }
1074        }
1075        gntdev_put_map(priv, map);
1076        return err;
1077}
1078
1079static const struct file_operations gntdev_fops = {
1080        .owner = THIS_MODULE,
1081        .open = gntdev_open,
1082        .release = gntdev_release,
1083        .mmap = gntdev_mmap,
1084        .unlocked_ioctl = gntdev_ioctl
1085};
1086
1087static struct miscdevice gntdev_miscdev = {
1088        .minor        = MISC_DYNAMIC_MINOR,
1089        .name         = "xen/gntdev",
1090        .fops         = &gntdev_fops,
1091};
1092
1093/* ------------------------------------------------------------------ */
1094
1095static int __init gntdev_init(void)
1096{
1097        int err;
1098
1099        if (!xen_domain())
1100                return -ENODEV;
1101
1102        use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1103
1104        err = misc_register(&gntdev_miscdev);
1105        if (err != 0) {
1106                pr_err("Could not register gntdev device\n");
1107                return err;
1108        }
1109        return 0;
1110}
1111
1112static void __exit gntdev_exit(void)
1113{
1114        misc_deregister(&gntdev_miscdev);
1115}
1116
1117module_init(gntdev_init);
1118module_exit(gntdev_exit);
1119
1120/* ------------------------------------------------------------------ */
1121