linux/drivers/xen/gntdev.c
<<
>>
Prefs
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#undef DEBUG
  21
  22#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  23
  24#include <linux/module.h>
  25#include <linux/kernel.h>
  26#include <linux/init.h>
  27#include <linux/miscdevice.h>
  28#include <linux/fs.h>
  29#include <linux/mm.h>
  30#include <linux/mman.h>
  31#include <linux/mmu_notifier.h>
  32#include <linux/types.h>
  33#include <linux/uaccess.h>
  34#include <linux/sched.h>
  35#include <linux/sched/mm.h>
  36#include <linux/spinlock.h>
  37#include <linux/slab.h>
  38#include <linux/highmem.h>
  39#include <linux/refcount.h>
  40
  41#include <xen/xen.h>
  42#include <xen/grant_table.h>
  43#include <xen/balloon.h>
  44#include <xen/gntdev.h>
  45#include <xen/events.h>
  46#include <xen/page.h>
  47#include <asm/xen/hypervisor.h>
  48#include <asm/xen/hypercall.h>
  49
  50MODULE_LICENSE("GPL");
  51MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  52              "Gerd Hoffmann <kraxel@redhat.com>");
  53MODULE_DESCRIPTION("User-space granted page access driver");
  54
  55static int limit = 1024*1024;
  56module_param(limit, int, 0644);
  57MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
  58                "the gntdev device");
  59
  60static atomic_t pages_mapped = ATOMIC_INIT(0);
  61
  62static int use_ptemod;
  63#define populate_freeable_maps use_ptemod
  64
  65struct gntdev_priv {
  66        /* maps with visible offsets in the file descriptor */
  67        struct list_head maps;
  68        /* maps that are not visible; will be freed on munmap.
  69         * Only populated if populate_freeable_maps == 1 */
  70        struct list_head freeable_maps;
  71        /* lock protects maps and freeable_maps */
  72        struct mutex lock;
  73        struct mm_struct *mm;
  74        struct mmu_notifier mn;
  75};
  76
  77struct unmap_notify {
  78        int flags;
  79        /* Address relative to the start of the grant_map */
  80        int addr;
  81        int event;
  82};
  83
  84struct grant_map {
  85        struct list_head next;
  86        struct vm_area_struct *vma;
  87        int index;
  88        int count;
  89        int flags;
  90        refcount_t users;
  91        struct unmap_notify notify;
  92        struct ioctl_gntdev_grant_ref *grants;
  93        struct gnttab_map_grant_ref   *map_ops;
  94        struct gnttab_unmap_grant_ref *unmap_ops;
  95        struct gnttab_map_grant_ref   *kmap_ops;
  96        struct gnttab_unmap_grant_ref *kunmap_ops;
  97        struct page **pages;
  98        unsigned long pages_vm_start;
  99};
 100
 101static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
 102
 103/* ------------------------------------------------------------------ */
 104
 105static void gntdev_print_maps(struct gntdev_priv *priv,
 106                              char *text, int text_index)
 107{
 108#ifdef DEBUG
 109        struct grant_map *map;
 110
 111        pr_debug("%s: maps list (priv %p)\n", __func__, priv);
 112        list_for_each_entry(map, &priv->maps, next)
 113                pr_debug("  index %2d, count %2d %s\n",
 114                       map->index, map->count,
 115                       map->index == text_index && text ? text : "");
 116#endif
 117}
 118
 119static void gntdev_free_map(struct grant_map *map)
 120{
 121        if (map == NULL)
 122                return;
 123
 124        if (map->pages)
 125                gnttab_free_pages(map->count, map->pages);
 126        kfree(map->pages);
 127        kfree(map->grants);
 128        kfree(map->map_ops);
 129        kfree(map->unmap_ops);
 130        kfree(map->kmap_ops);
 131        kfree(map->kunmap_ops);
 132        kfree(map);
 133}
 134
 135static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 136{
 137        struct grant_map *add;
 138        int i;
 139
 140        add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
 141        if (NULL == add)
 142                return NULL;
 143
 144        add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
 145        add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
 146        add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
 147        add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 148        add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
 149        add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 150        if (NULL == add->grants    ||
 151            NULL == add->map_ops   ||
 152            NULL == add->unmap_ops ||
 153            NULL == add->kmap_ops  ||
 154            NULL == add->kunmap_ops ||
 155            NULL == add->pages)
 156                goto err;
 157
 158        if (gnttab_alloc_pages(count, add->pages))
 159                goto err;
 160
 161        for (i = 0; i < count; i++) {
 162                add->map_ops[i].handle = -1;
 163                add->unmap_ops[i].handle = -1;
 164                add->kmap_ops[i].handle = -1;
 165                add->kunmap_ops[i].handle = -1;
 166        }
 167
 168        add->index = 0;
 169        add->count = count;
 170        refcount_set(&add->users, 1);
 171
 172        return add;
 173
 174err:
 175        gntdev_free_map(add);
 176        return NULL;
 177}
 178
 179static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
 180{
 181        struct grant_map *map;
 182
 183        list_for_each_entry(map, &priv->maps, next) {
 184                if (add->index + add->count < map->index) {
 185                        list_add_tail(&add->next, &map->next);
 186                        goto done;
 187                }
 188                add->index = map->index + map->count;
 189        }
 190        list_add_tail(&add->next, &priv->maps);
 191
 192done:
 193        gntdev_print_maps(priv, "[new]", add->index);
 194}
 195
 196static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 197                int index, int count)
 198{
 199        struct grant_map *map;
 200
 201        list_for_each_entry(map, &priv->maps, next) {
 202                if (map->index != index)
 203                        continue;
 204                if (count && map->count != count)
 205                        continue;
 206                return map;
 207        }
 208        return NULL;
 209}
 210
 211static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
 212{
 213        if (!map)
 214                return;
 215
 216        if (!refcount_dec_and_test(&map->users))
 217                return;
 218
 219        atomic_sub(map->count, &pages_mapped);
 220
 221        if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 222                notify_remote_via_evtchn(map->notify.event);
 223                evtchn_put(map->notify.event);
 224        }
 225
 226        if (populate_freeable_maps && priv) {
 227                mutex_lock(&priv->lock);
 228                list_del(&map->next);
 229                mutex_unlock(&priv->lock);
 230        }
 231
 232        if (map->pages && !use_ptemod)
 233                unmap_grant_pages(map, 0, map->count);
 234        gntdev_free_map(map);
 235}
 236
 237/* ------------------------------------------------------------------ */
 238
 239static int find_grant_ptes(pte_t *pte, pgtable_t token,
 240                unsigned long addr, void *data)
 241{
 242        struct grant_map *map = data;
 243        unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 244        int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 245        u64 pte_maddr;
 246
 247        BUG_ON(pgnr >= map->count);
 248        pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 249
 250        /*
 251         * Set the PTE as special to force get_user_pages_fast() fall
 252         * back to the slow path.  If this is not supported as part of
 253         * the grant map, it will be done afterwards.
 254         */
 255        if (xen_feature(XENFEAT_gnttab_map_avail_bits))
 256                flags |= (1 << _GNTMAP_guest_avail0);
 257
 258        gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 259                          map->grants[pgnr].ref,
 260                          map->grants[pgnr].domid);
 261        gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 262                            -1 /* handle */);
 263        return 0;
 264}
 265
 266#ifdef CONFIG_X86
 267static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
 268                                     unsigned long addr, void *data)
 269{
 270        set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
 271        return 0;
 272}
 273#endif
 274
 275static int map_grant_pages(struct grant_map *map)
 276{
 277        int i, err = 0;
 278
 279        if (!use_ptemod) {
 280                /* Note: it could already be mapped */
 281                if (map->map_ops[0].handle != -1)
 282                        return 0;
 283                for (i = 0; i < map->count; i++) {
 284                        unsigned long addr = (unsigned long)
 285                                pfn_to_kaddr(page_to_pfn(map->pages[i]));
 286                        gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 287                                map->grants[i].ref,
 288                                map->grants[i].domid);
 289                        gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 290                                map->flags, -1 /* handle */);
 291                }
 292        } else {
 293                /*
 294                 * Setup the map_ops corresponding to the pte entries pointing
 295                 * to the kernel linear addresses of the struct pages.
 296                 * These ptes are completely different from the user ptes dealt
 297                 * with find_grant_ptes.
 298                 */
 299                for (i = 0; i < map->count; i++) {
 300                        unsigned long address = (unsigned long)
 301                                pfn_to_kaddr(page_to_pfn(map->pages[i]));
 302                        BUG_ON(PageHighMem(map->pages[i]));
 303
 304                        gnttab_set_map_op(&map->kmap_ops[i], address,
 305                                map->flags | GNTMAP_host_map,
 306                                map->grants[i].ref,
 307                                map->grants[i].domid);
 308                        gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 309                                map->flags | GNTMAP_host_map, -1);
 310                }
 311        }
 312
 313        pr_debug("map %d+%d\n", map->index, map->count);
 314        err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 315                        map->pages, map->count);
 316        if (err)
 317                return err;
 318
 319        for (i = 0; i < map->count; i++) {
 320                if (map->map_ops[i].status) {
 321                        err = -EINVAL;
 322                        continue;
 323                }
 324
 325                map->unmap_ops[i].handle = map->map_ops[i].handle;
 326                if (use_ptemod)
 327                        map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 328        }
 329        return err;
 330}
 331
 332static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 333{
 334        int i, err = 0;
 335        struct gntab_unmap_queue_data unmap_data;
 336
 337        if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 338                int pgno = (map->notify.addr >> PAGE_SHIFT);
 339                if (pgno >= offset && pgno < offset + pages) {
 340                        /* No need for kmap, pages are in lowmem */
 341                        uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 342                        tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 343                        map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 344                }
 345        }
 346
 347        unmap_data.unmap_ops = map->unmap_ops + offset;
 348        unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 349        unmap_data.pages = map->pages + offset;
 350        unmap_data.count = pages;
 351
 352        err = gnttab_unmap_refs_sync(&unmap_data);
 353        if (err)
 354                return err;
 355
 356        for (i = 0; i < pages; i++) {
 357                if (map->unmap_ops[offset+i].status)
 358                        err = -EINVAL;
 359                pr_debug("unmap handle=%d st=%d\n",
 360                        map->unmap_ops[offset+i].handle,
 361                        map->unmap_ops[offset+i].status);
 362                map->unmap_ops[offset+i].handle = -1;
 363        }
 364        return err;
 365}
 366
 367static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
 368{
 369        int range, err = 0;
 370
 371        pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 372
 373        /* It is possible the requested range will have a "hole" where we
 374         * already unmapped some of the grants. Only unmap valid ranges.
 375         */
 376        while (pages && !err) {
 377                while (pages && map->unmap_ops[offset].handle == -1) {
 378                        offset++;
 379                        pages--;
 380                }
 381                range = 0;
 382                while (range < pages) {
 383                        if (map->unmap_ops[offset+range].handle == -1) {
 384                                range--;
 385                                break;
 386                        }
 387                        range++;
 388                }
 389                err = __unmap_grant_pages(map, offset, range);
 390                offset += range;
 391                pages -= range;
 392        }
 393
 394        return err;
 395}
 396
 397/* ------------------------------------------------------------------ */
 398
 399static void gntdev_vma_open(struct vm_area_struct *vma)
 400{
 401        struct grant_map *map = vma->vm_private_data;
 402
 403        pr_debug("gntdev_vma_open %p\n", vma);
 404        refcount_inc(&map->users);
 405}
 406
 407static void gntdev_vma_close(struct vm_area_struct *vma)
 408{
 409        struct grant_map *map = vma->vm_private_data;
 410        struct file *file = vma->vm_file;
 411        struct gntdev_priv *priv = file->private_data;
 412
 413        pr_debug("gntdev_vma_close %p\n", vma);
 414        if (use_ptemod) {
 415                /* It is possible that an mmu notifier could be running
 416                 * concurrently, so take priv->lock to ensure that the vma won't
 417                 * vanishing during the unmap_grant_pages call, since we will
 418                 * spin here until that completes. Such a concurrent call will
 419                 * not do any unmapping, since that has been done prior to
 420                 * closing the vma, but it may still iterate the unmap_ops list.
 421                 */
 422                mutex_lock(&priv->lock);
 423                map->vma = NULL;
 424                mutex_unlock(&priv->lock);
 425        }
 426        vma->vm_private_data = NULL;
 427        gntdev_put_map(priv, map);
 428}
 429
 430static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 431                                                 unsigned long addr)
 432{
 433        struct grant_map *map = vma->vm_private_data;
 434
 435        return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 436}
 437
 438static const struct vm_operations_struct gntdev_vmops = {
 439        .open = gntdev_vma_open,
 440        .close = gntdev_vma_close,
 441        .find_special_page = gntdev_vma_find_special_page,
 442};
 443
 444/* ------------------------------------------------------------------ */
 445
 446static void unmap_if_in_range(struct grant_map *map,
 447                              unsigned long start, unsigned long end)
 448{
 449        unsigned long mstart, mend;
 450        int err;
 451
 452        if (!map->vma)
 453                return;
 454        if (map->vma->vm_start >= end)
 455                return;
 456        if (map->vma->vm_end <= start)
 457                return;
 458        mstart = max(start, map->vma->vm_start);
 459        mend   = min(end,   map->vma->vm_end);
 460        pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 461                        map->index, map->count,
 462                        map->vma->vm_start, map->vma->vm_end,
 463                        start, end, mstart, mend);
 464        err = unmap_grant_pages(map,
 465                                (mstart - map->vma->vm_start) >> PAGE_SHIFT,
 466                                (mend - mstart) >> PAGE_SHIFT);
 467        WARN_ON(err);
 468}
 469
 470static void mn_invl_range_start(struct mmu_notifier *mn,
 471                                struct mm_struct *mm,
 472                                unsigned long start, unsigned long end)
 473{
 474        struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 475        struct grant_map *map;
 476
 477        mutex_lock(&priv->lock);
 478        list_for_each_entry(map, &priv->maps, next) {
 479                unmap_if_in_range(map, start, end);
 480        }
 481        list_for_each_entry(map, &priv->freeable_maps, next) {
 482                unmap_if_in_range(map, start, end);
 483        }
 484        mutex_unlock(&priv->lock);
 485}
 486
 487static void mn_release(struct mmu_notifier *mn,
 488                       struct mm_struct *mm)
 489{
 490        struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 491        struct grant_map *map;
 492        int err;
 493
 494        mutex_lock(&priv->lock);
 495        list_for_each_entry(map, &priv->maps, next) {
 496                if (!map->vma)
 497                        continue;
 498                pr_debug("map %d+%d (%lx %lx)\n",
 499                                map->index, map->count,
 500                                map->vma->vm_start, map->vma->vm_end);
 501                err = unmap_grant_pages(map, /* offset */ 0, map->count);
 502                WARN_ON(err);
 503        }
 504        list_for_each_entry(map, &priv->freeable_maps, next) {
 505                if (!map->vma)
 506                        continue;
 507                pr_debug("map %d+%d (%lx %lx)\n",
 508                                map->index, map->count,
 509                                map->vma->vm_start, map->vma->vm_end);
 510                err = unmap_grant_pages(map, /* offset */ 0, map->count);
 511                WARN_ON(err);
 512        }
 513        mutex_unlock(&priv->lock);
 514}
 515
 516static const struct mmu_notifier_ops gntdev_mmu_ops = {
 517        .release                = mn_release,
 518        .invalidate_range_start = mn_invl_range_start,
 519};
 520
 521/* ------------------------------------------------------------------ */
 522
 523static int gntdev_open(struct inode *inode, struct file *flip)
 524{
 525        struct gntdev_priv *priv;
 526        int ret = 0;
 527
 528        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 529        if (!priv)
 530                return -ENOMEM;
 531
 532        INIT_LIST_HEAD(&priv->maps);
 533        INIT_LIST_HEAD(&priv->freeable_maps);
 534        mutex_init(&priv->lock);
 535
 536        if (use_ptemod) {
 537                priv->mm = get_task_mm(current);
 538                if (!priv->mm) {
 539                        kfree(priv);
 540                        return -ENOMEM;
 541                }
 542                priv->mn.ops = &gntdev_mmu_ops;
 543                ret = mmu_notifier_register(&priv->mn, priv->mm);
 544                mmput(priv->mm);
 545        }
 546
 547        if (ret) {
 548                kfree(priv);
 549                return ret;
 550        }
 551
 552        flip->private_data = priv;
 553        pr_debug("priv %p\n", priv);
 554
 555        return 0;
 556}
 557
 558static int gntdev_release(struct inode *inode, struct file *flip)
 559{
 560        struct gntdev_priv *priv = flip->private_data;
 561        struct grant_map *map;
 562
 563        pr_debug("priv %p\n", priv);
 564
 565        mutex_lock(&priv->lock);
 566        while (!list_empty(&priv->maps)) {
 567                map = list_entry(priv->maps.next, struct grant_map, next);
 568                list_del(&map->next);
 569                gntdev_put_map(NULL /* already removed */, map);
 570        }
 571        WARN_ON(!list_empty(&priv->freeable_maps));
 572        mutex_unlock(&priv->lock);
 573
 574        if (use_ptemod)
 575                mmu_notifier_unregister(&priv->mn, priv->mm);
 576        kfree(priv);
 577        return 0;
 578}
 579
 580static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 581                                       struct ioctl_gntdev_map_grant_ref __user *u)
 582{
 583        struct ioctl_gntdev_map_grant_ref op;
 584        struct grant_map *map;
 585        int err;
 586
 587        if (copy_from_user(&op, u, sizeof(op)) != 0)
 588                return -EFAULT;
 589        pr_debug("priv %p, add %d\n", priv, op.count);
 590        if (unlikely(op.count <= 0))
 591                return -EINVAL;
 592
 593        err = -ENOMEM;
 594        map = gntdev_alloc_map(priv, op.count);
 595        if (!map)
 596                return err;
 597
 598        if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
 599                pr_debug("can't map: over limit\n");
 600                gntdev_put_map(NULL, map);
 601                return err;
 602        }
 603
 604        if (copy_from_user(map->grants, &u->refs,
 605                           sizeof(map->grants[0]) * op.count) != 0) {
 606                gntdev_put_map(NULL, map);
 607                return -EFAULT;
 608        }
 609
 610        mutex_lock(&priv->lock);
 611        gntdev_add_map(priv, map);
 612        op.index = map->index << PAGE_SHIFT;
 613        mutex_unlock(&priv->lock);
 614
 615        if (copy_to_user(u, &op, sizeof(op)) != 0)
 616                return -EFAULT;
 617
 618        return 0;
 619}
 620
 621static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 622                                         struct ioctl_gntdev_unmap_grant_ref __user *u)
 623{
 624        struct ioctl_gntdev_unmap_grant_ref op;
 625        struct grant_map *map;
 626        int err = -ENOENT;
 627
 628        if (copy_from_user(&op, u, sizeof(op)) != 0)
 629                return -EFAULT;
 630        pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 631
 632        mutex_lock(&priv->lock);
 633        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 634        if (map) {
 635                list_del(&map->next);
 636                if (populate_freeable_maps)
 637                        list_add_tail(&map->next, &priv->freeable_maps);
 638                err = 0;
 639        }
 640        mutex_unlock(&priv->lock);
 641        if (map)
 642                gntdev_put_map(priv, map);
 643        return err;
 644}
 645
 646static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 647                                              struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 648{
 649        struct ioctl_gntdev_get_offset_for_vaddr op;
 650        struct vm_area_struct *vma;
 651        struct grant_map *map;
 652        int rv = -EINVAL;
 653
 654        if (copy_from_user(&op, u, sizeof(op)) != 0)
 655                return -EFAULT;
 656        pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 657
 658        down_read(&current->mm->mmap_sem);
 659        vma = find_vma(current->mm, op.vaddr);
 660        if (!vma || vma->vm_ops != &gntdev_vmops)
 661                goto out_unlock;
 662
 663        map = vma->vm_private_data;
 664        if (!map)
 665                goto out_unlock;
 666
 667        op.offset = map->index << PAGE_SHIFT;
 668        op.count = map->count;
 669        rv = 0;
 670
 671 out_unlock:
 672        up_read(&current->mm->mmap_sem);
 673
 674        if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 675                return -EFAULT;
 676        return rv;
 677}
 678
 679static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 680{
 681        struct ioctl_gntdev_unmap_notify op;
 682        struct grant_map *map;
 683        int rc;
 684        int out_flags;
 685        unsigned int out_event;
 686
 687        if (copy_from_user(&op, u, sizeof(op)))
 688                return -EFAULT;
 689
 690        if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 691                return -EINVAL;
 692
 693        /* We need to grab a reference to the event channel we are going to use
 694         * to send the notify before releasing the reference we may already have
 695         * (if someone has called this ioctl twice). This is required so that
 696         * it is possible to change the clear_byte part of the notification
 697         * without disturbing the event channel part, which may now be the last
 698         * reference to that event channel.
 699         */
 700        if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 701                if (evtchn_get(op.event_channel_port))
 702                        return -EINVAL;
 703        }
 704
 705        out_flags = op.action;
 706        out_event = op.event_channel_port;
 707
 708        mutex_lock(&priv->lock);
 709
 710        list_for_each_entry(map, &priv->maps, next) {
 711                uint64_t begin = map->index << PAGE_SHIFT;
 712                uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 713                if (op.index >= begin && op.index < end)
 714                        goto found;
 715        }
 716        rc = -ENOENT;
 717        goto unlock_out;
 718
 719 found:
 720        if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 721                        (map->flags & GNTMAP_readonly)) {
 722                rc = -EINVAL;
 723                goto unlock_out;
 724        }
 725
 726        out_flags = map->notify.flags;
 727        out_event = map->notify.event;
 728
 729        map->notify.flags = op.action;
 730        map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 731        map->notify.event = op.event_channel_port;
 732
 733        rc = 0;
 734
 735 unlock_out:
 736        mutex_unlock(&priv->lock);
 737
 738        /* Drop the reference to the event channel we did not save in the map */
 739        if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 740                evtchn_put(out_event);
 741
 742        return rc;
 743}
 744
 745#define GNTDEV_COPY_BATCH 16
 746
 747struct gntdev_copy_batch {
 748        struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 749        struct page *pages[GNTDEV_COPY_BATCH];
 750        s16 __user *status[GNTDEV_COPY_BATCH];
 751        unsigned int nr_ops;
 752        unsigned int nr_pages;
 753};
 754
 755static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 756                           bool writeable, unsigned long *gfn)
 757{
 758        unsigned long addr = (unsigned long)virt;
 759        struct page *page;
 760        unsigned long xen_pfn;
 761        int ret;
 762
 763        ret = get_user_pages_fast(addr, 1, writeable, &page);
 764        if (ret < 0)
 765                return ret;
 766
 767        batch->pages[batch->nr_pages++] = page;
 768
 769        xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 770        *gfn = pfn_to_gfn(xen_pfn);
 771
 772        return 0;
 773}
 774
 775static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 776{
 777        unsigned int i;
 778
 779        for (i = 0; i < batch->nr_pages; i++)
 780                put_page(batch->pages[i]);
 781        batch->nr_pages = 0;
 782}
 783
 784static int gntdev_copy(struct gntdev_copy_batch *batch)
 785{
 786        unsigned int i;
 787
 788        gnttab_batch_copy(batch->ops, batch->nr_ops);
 789        gntdev_put_pages(batch);
 790
 791        /*
 792         * For each completed op, update the status if the op failed
 793         * and all previous ops for the segment were successful.
 794         */
 795        for (i = 0; i < batch->nr_ops; i++) {
 796                s16 status = batch->ops[i].status;
 797                s16 old_status;
 798
 799                if (status == GNTST_okay)
 800                        continue;
 801
 802                if (__get_user(old_status, batch->status[i]))
 803                        return -EFAULT;
 804
 805                if (old_status != GNTST_okay)
 806                        continue;
 807
 808                if (__put_user(status, batch->status[i]))
 809                        return -EFAULT;
 810        }
 811
 812        batch->nr_ops = 0;
 813        return 0;
 814}
 815
 816static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 817                                 struct gntdev_grant_copy_segment *seg,
 818                                 s16 __user *status)
 819{
 820        uint16_t copied = 0;
 821
 822        /*
 823         * Disallow local -> local copies since there is only space in
 824         * batch->pages for one page per-op and this would be a very
 825         * expensive memcpy().
 826         */
 827        if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 828                return -EINVAL;
 829
 830        /* Can't cross page if source/dest is a grant ref. */
 831        if (seg->flags & GNTCOPY_source_gref) {
 832                if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 833                        return -EINVAL;
 834        }
 835        if (seg->flags & GNTCOPY_dest_gref) {
 836                if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 837                        return -EINVAL;
 838        }
 839
 840        if (put_user(GNTST_okay, status))
 841                return -EFAULT;
 842
 843        while (copied < seg->len) {
 844                struct gnttab_copy *op;
 845                void __user *virt;
 846                size_t len, off;
 847                unsigned long gfn;
 848                int ret;
 849
 850                if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 851                        ret = gntdev_copy(batch);
 852                        if (ret < 0)
 853                                return ret;
 854                }
 855
 856                len = seg->len - copied;
 857
 858                op = &batch->ops[batch->nr_ops];
 859                op->flags = 0;
 860
 861                if (seg->flags & GNTCOPY_source_gref) {
 862                        op->source.u.ref = seg->source.foreign.ref;
 863                        op->source.domid = seg->source.foreign.domid;
 864                        op->source.offset = seg->source.foreign.offset + copied;
 865                        op->flags |= GNTCOPY_source_gref;
 866                } else {
 867                        virt = seg->source.virt + copied;
 868                        off = (unsigned long)virt & ~XEN_PAGE_MASK;
 869                        len = min(len, (size_t)XEN_PAGE_SIZE - off);
 870
 871                        ret = gntdev_get_page(batch, virt, false, &gfn);
 872                        if (ret < 0)
 873                                return ret;
 874
 875                        op->source.u.gmfn = gfn;
 876                        op->source.domid = DOMID_SELF;
 877                        op->source.offset = off;
 878                }
 879
 880                if (seg->flags & GNTCOPY_dest_gref) {
 881                        op->dest.u.ref = seg->dest.foreign.ref;
 882                        op->dest.domid = seg->dest.foreign.domid;
 883                        op->dest.offset = seg->dest.foreign.offset + copied;
 884                        op->flags |= GNTCOPY_dest_gref;
 885                } else {
 886                        virt = seg->dest.virt + copied;
 887                        off = (unsigned long)virt & ~XEN_PAGE_MASK;
 888                        len = min(len, (size_t)XEN_PAGE_SIZE - off);
 889
 890                        ret = gntdev_get_page(batch, virt, true, &gfn);
 891                        if (ret < 0)
 892                                return ret;
 893
 894                        op->dest.u.gmfn = gfn;
 895                        op->dest.domid = DOMID_SELF;
 896                        op->dest.offset = off;
 897                }
 898
 899                op->len = len;
 900                copied += len;
 901
 902                batch->status[batch->nr_ops] = status;
 903                batch->nr_ops++;
 904        }
 905
 906        return 0;
 907}
 908
 909static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 910{
 911        struct ioctl_gntdev_grant_copy copy;
 912        struct gntdev_copy_batch batch;
 913        unsigned int i;
 914        int ret = 0;
 915
 916        if (copy_from_user(&copy, u, sizeof(copy)))
 917                return -EFAULT;
 918
 919        batch.nr_ops = 0;
 920        batch.nr_pages = 0;
 921
 922        for (i = 0; i < copy.count; i++) {
 923                struct gntdev_grant_copy_segment seg;
 924
 925                if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 926                        ret = -EFAULT;
 927                        goto out;
 928                }
 929
 930                ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 931                if (ret < 0)
 932                        goto out;
 933
 934                cond_resched();
 935        }
 936        if (batch.nr_ops)
 937                ret = gntdev_copy(&batch);
 938        return ret;
 939
 940  out:
 941        gntdev_put_pages(&batch);
 942        return ret;
 943}
 944
 945static long gntdev_ioctl(struct file *flip,
 946                         unsigned int cmd, unsigned long arg)
 947{
 948        struct gntdev_priv *priv = flip->private_data;
 949        void __user *ptr = (void __user *)arg;
 950
 951        switch (cmd) {
 952        case IOCTL_GNTDEV_MAP_GRANT_REF:
 953                return gntdev_ioctl_map_grant_ref(priv, ptr);
 954
 955        case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 956                return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 957
 958        case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 959                return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 960
 961        case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 962                return gntdev_ioctl_notify(priv, ptr);
 963
 964        case IOCTL_GNTDEV_GRANT_COPY:
 965                return gntdev_ioctl_grant_copy(priv, ptr);
 966
 967        default:
 968                pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 969                return -ENOIOCTLCMD;
 970        }
 971
 972        return 0;
 973}
 974
 975static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 976{
 977        struct gntdev_priv *priv = flip->private_data;
 978        int index = vma->vm_pgoff;
 979        int count = vma_pages(vma);
 980        struct grant_map *map;
 981        int i, err = -EINVAL;
 982
 983        if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 984                return -EINVAL;
 985
 986        pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 987                        index, count, vma->vm_start, vma->vm_pgoff);
 988
 989        mutex_lock(&priv->lock);
 990        map = gntdev_find_map_index(priv, index, count);
 991        if (!map)
 992                goto unlock_out;
 993        if (use_ptemod && map->vma)
 994                goto unlock_out;
 995        if (use_ptemod && priv->mm != vma->vm_mm) {
 996                pr_warn("Huh? Other mm?\n");
 997                goto unlock_out;
 998        }
 999
1000        refcount_inc(&map->users);
1001
1002        vma->vm_ops = &gntdev_vmops;
1003
1004        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1005
1006        if (use_ptemod)
1007                vma->vm_flags |= VM_DONTCOPY;
1008
1009        vma->vm_private_data = map;
1010
1011        if (use_ptemod)
1012                map->vma = vma;
1013
1014        if (map->flags) {
1015                if ((vma->vm_flags & VM_WRITE) &&
1016                                (map->flags & GNTMAP_readonly))
1017                        goto out_unlock_put;
1018        } else {
1019                map->flags = GNTMAP_host_map;
1020                if (!(vma->vm_flags & VM_WRITE))
1021                        map->flags |= GNTMAP_readonly;
1022        }
1023
1024        mutex_unlock(&priv->lock);
1025
1026        if (use_ptemod) {
1027                map->pages_vm_start = vma->vm_start;
1028                err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1029                                          vma->vm_end - vma->vm_start,
1030                                          find_grant_ptes, map);
1031                if (err) {
1032                        pr_warn("find_grant_ptes() failure.\n");
1033                        goto out_put_map;
1034                }
1035        }
1036
1037        err = map_grant_pages(map);
1038        if (err)
1039                goto out_put_map;
1040
1041        if (!use_ptemod) {
1042                for (i = 0; i < count; i++) {
1043                        err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
1044                                map->pages[i]);
1045                        if (err)
1046                                goto out_put_map;
1047                }
1048        } else {
1049#ifdef CONFIG_X86
1050                /*
1051                 * If the PTEs were not made special by the grant map
1052                 * hypercall, do so here.
1053                 *
1054                 * This is racy since the mapping is already visible
1055                 * to userspace but userspace should be well-behaved
1056                 * enough to not touch it until the mmap() call
1057                 * returns.
1058                 */
1059                if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1060                        apply_to_page_range(vma->vm_mm, vma->vm_start,
1061                                            vma->vm_end - vma->vm_start,
1062                                            set_grant_ptes_as_special, NULL);
1063                }
1064#endif
1065        }
1066
1067        return 0;
1068
1069unlock_out:
1070        mutex_unlock(&priv->lock);
1071        return err;
1072
1073out_unlock_put:
1074        mutex_unlock(&priv->lock);
1075out_put_map:
1076        if (use_ptemod)
1077                map->vma = NULL;
1078        gntdev_put_map(priv, map);
1079        return err;
1080}
1081
1082static const struct file_operations gntdev_fops = {
1083        .owner = THIS_MODULE,
1084        .open = gntdev_open,
1085        .release = gntdev_release,
1086        .mmap = gntdev_mmap,
1087        .unlocked_ioctl = gntdev_ioctl
1088};
1089
1090static struct miscdevice gntdev_miscdev = {
1091        .minor        = MISC_DYNAMIC_MINOR,
1092        .name         = "xen/gntdev",
1093        .fops         = &gntdev_fops,
1094};
1095
1096/* ------------------------------------------------------------------ */
1097
1098static int __init gntdev_init(void)
1099{
1100        int err;
1101
1102        if (!xen_domain())
1103                return -ENODEV;
1104
1105        use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1106
1107        err = misc_register(&gntdev_miscdev);
1108        if (err != 0) {
1109                pr_err("Could not register gntdev device\n");
1110                return err;
1111        }
1112        return 0;
1113}
1114
1115static void __exit gntdev_exit(void)
1116{
1117        misc_deregister(&gntdev_miscdev);
1118}
1119
1120module_init(gntdev_init);
1121module_exit(gntdev_exit);
1122
1123/* ------------------------------------------------------------------ */
1124