linux/drivers/xen/privcmd.c
<<
>>
Prefs
   1/******************************************************************************
   2 * privcmd.c
   3 *
   4 * Interface to privileged domain-0 commands.
   5 *
   6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
   7 */
   8
   9#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/sched.h>
  14#include <linux/slab.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/mm.h>
  18#include <linux/mman.h>
  19#include <linux/uaccess.h>
  20#include <linux/swap.h>
  21#include <linux/highmem.h>
  22#include <linux/pagemap.h>
  23#include <linux/seq_file.h>
  24#include <linux/miscdevice.h>
  25
  26#include <asm/pgalloc.h>
  27#include <asm/pgtable.h>
  28#include <asm/tlb.h>
  29#include <asm/xen/hypervisor.h>
  30#include <asm/xen/hypercall.h>
  31
  32#include <xen/xen.h>
  33#include <xen/privcmd.h>
  34#include <xen/interface/xen.h>
  35#include <xen/features.h>
  36#include <xen/page.h>
  37#include <xen/xen-ops.h>
  38#include <xen/balloon.h>
  39
  40#include "privcmd.h"
  41
  42MODULE_LICENSE("GPL");
  43
  44#define PRIV_VMA_LOCKED ((void *)1)
  45
  46#ifndef HAVE_ARCH_PRIVCMD_MMAP
  47static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
  48#endif
  49
  50static long privcmd_ioctl_hypercall(void __user *udata)
  51{
  52        struct privcmd_hypercall hypercall;
  53        long ret;
  54
  55        if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  56                return -EFAULT;
  57
  58        ret = privcmd_call(hypercall.op,
  59                           hypercall.arg[0], hypercall.arg[1],
  60                           hypercall.arg[2], hypercall.arg[3],
  61                           hypercall.arg[4]);
  62
  63        return ret;
  64}
  65
  66static void free_page_list(struct list_head *pages)
  67{
  68        struct page *p, *n;
  69
  70        list_for_each_entry_safe(p, n, pages, lru)
  71                __free_page(p);
  72
  73        INIT_LIST_HEAD(pages);
  74}
  75
  76/*
  77 * Given an array of items in userspace, return a list of pages
  78 * containing the data.  If copying fails, either because of memory
  79 * allocation failure or a problem reading user memory, return an
  80 * error code; its up to the caller to dispose of any partial list.
  81 */
  82static int gather_array(struct list_head *pagelist,
  83                        unsigned nelem, size_t size,
  84                        const void __user *data)
  85{
  86        unsigned pageidx;
  87        void *pagedata;
  88        int ret;
  89
  90        if (size > PAGE_SIZE)
  91                return 0;
  92
  93        pageidx = PAGE_SIZE;
  94        pagedata = NULL;        /* quiet, gcc */
  95        while (nelem--) {
  96                if (pageidx > PAGE_SIZE-size) {
  97                        struct page *page = alloc_page(GFP_KERNEL);
  98
  99                        ret = -ENOMEM;
 100                        if (page == NULL)
 101                                goto fail;
 102
 103                        pagedata = page_address(page);
 104
 105                        list_add_tail(&page->lru, pagelist);
 106                        pageidx = 0;
 107                }
 108
 109                ret = -EFAULT;
 110                if (copy_from_user(pagedata + pageidx, data, size))
 111                        goto fail;
 112
 113                data += size;
 114                pageidx += size;
 115        }
 116
 117        ret = 0;
 118
 119fail:
 120        return ret;
 121}
 122
 123/*
 124 * Call function "fn" on each element of the array fragmented
 125 * over a list of pages.
 126 */
 127static int traverse_pages(unsigned nelem, size_t size,
 128                          struct list_head *pos,
 129                          int (*fn)(void *data, void *state),
 130                          void *state)
 131{
 132        void *pagedata;
 133        unsigned pageidx;
 134        int ret = 0;
 135
 136        BUG_ON(size > PAGE_SIZE);
 137
 138        pageidx = PAGE_SIZE;
 139        pagedata = NULL;        /* hush, gcc */
 140
 141        while (nelem--) {
 142                if (pageidx > PAGE_SIZE-size) {
 143                        struct page *page;
 144                        pos = pos->next;
 145                        page = list_entry(pos, struct page, lru);
 146                        pagedata = page_address(page);
 147                        pageidx = 0;
 148                }
 149
 150                ret = (*fn)(pagedata + pageidx, state);
 151                if (ret)
 152                        break;
 153                pageidx += size;
 154        }
 155
 156        return ret;
 157}
 158
 159struct mmap_mfn_state {
 160        unsigned long va;
 161        struct vm_area_struct *vma;
 162        domid_t domain;
 163};
 164
 165static int mmap_mfn_range(void *data, void *state)
 166{
 167        struct privcmd_mmap_entry *msg = data;
 168        struct mmap_mfn_state *st = state;
 169        struct vm_area_struct *vma = st->vma;
 170        int rc;
 171
 172        /* Do not allow range to wrap the address space. */
 173        if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
 174            ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
 175                return -EINVAL;
 176
 177        /* Range chunks must be contiguous in va space. */
 178        if ((msg->va != st->va) ||
 179            ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
 180                return -EINVAL;
 181
 182        rc = xen_remap_domain_mfn_range(vma,
 183                                        msg->va & PAGE_MASK,
 184                                        msg->mfn, msg->npages,
 185                                        vma->vm_page_prot,
 186                                        st->domain, NULL);
 187        if (rc < 0)
 188                return rc;
 189
 190        st->va += msg->npages << PAGE_SHIFT;
 191
 192        return 0;
 193}
 194
 195static long privcmd_ioctl_mmap(void __user *udata)
 196{
 197        struct privcmd_mmap mmapcmd;
 198        struct mm_struct *mm = current->mm;
 199        struct vm_area_struct *vma;
 200        int rc;
 201        LIST_HEAD(pagelist);
 202        struct mmap_mfn_state state;
 203
 204        /* We only support privcmd_ioctl_mmap_batch for auto translated. */
 205        if (xen_feature(XENFEAT_auto_translated_physmap))
 206                return -ENOSYS;
 207
 208        if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
 209                return -EFAULT;
 210
 211        rc = gather_array(&pagelist,
 212                          mmapcmd.num, sizeof(struct privcmd_mmap_entry),
 213                          mmapcmd.entry);
 214
 215        if (rc || list_empty(&pagelist))
 216                goto out;
 217
 218        down_write(&mm->mmap_sem);
 219
 220        {
 221                struct page *page = list_first_entry(&pagelist,
 222                                                     struct page, lru);
 223                struct privcmd_mmap_entry *msg = page_address(page);
 224
 225                vma = find_vma(mm, msg->va);
 226                rc = -EINVAL;
 227
 228                if (!vma || (msg->va != vma->vm_start) ||
 229                    !privcmd_enforce_singleshot_mapping(vma))
 230                        goto out_up;
 231        }
 232
 233        state.va = vma->vm_start;
 234        state.vma = vma;
 235        state.domain = mmapcmd.dom;
 236
 237        rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
 238                            &pagelist,
 239                            mmap_mfn_range, &state);
 240
 241
 242out_up:
 243        up_write(&mm->mmap_sem);
 244
 245out:
 246        free_page_list(&pagelist);
 247
 248        return rc;
 249}
 250
 251struct mmap_batch_state {
 252        domid_t domain;
 253        unsigned long va;
 254        struct vm_area_struct *vma;
 255        int index;
 256        /* A tristate:
 257         *      0 for no errors
 258         *      1 if at least one error has happened (and no
 259         *          -ENOENT errors have happened)
 260         *      -ENOENT if at least 1 -ENOENT has happened.
 261         */
 262        int global_error;
 263        int version;
 264
 265        /* User-space mfn array to store errors in the second pass for V1. */
 266        xen_pfn_t __user *user_mfn;
 267        /* User-space int array to store errors in the second pass for V2. */
 268        int __user *user_err;
 269};
 270
 271/* auto translated dom0 note: if domU being created is PV, then mfn is
 272 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
 273 */
 274static int mmap_batch_fn(void *data, void *state)
 275{
 276        xen_pfn_t *mfnp = data;
 277        struct mmap_batch_state *st = state;
 278        struct vm_area_struct *vma = st->vma;
 279        struct page **pages = vma->vm_private_data;
 280        struct page *cur_page = NULL;
 281        int ret;
 282
 283        if (xen_feature(XENFEAT_auto_translated_physmap))
 284                cur_page = pages[st->index++];
 285
 286        ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
 287                                         st->vma->vm_page_prot, st->domain,
 288                                         &cur_page);
 289
 290        /* Store error code for second pass. */
 291        if (st->version == 1) {
 292                if (ret < 0) {
 293                        /*
 294                         * V1 encodes the error codes in the 32bit top nibble of the
 295                         * mfn (with its known limitations vis-a-vis 64 bit callers).
 296                         */
 297                        *mfnp |= (ret == -ENOENT) ?
 298                                                PRIVCMD_MMAPBATCH_PAGED_ERROR :
 299                                                PRIVCMD_MMAPBATCH_MFN_ERROR;
 300                }
 301        } else { /* st->version == 2 */
 302                *((int *) mfnp) = ret;
 303        }
 304
 305        /* And see if it affects the global_error. */
 306        if (ret < 0) {
 307                if (ret == -ENOENT)
 308                        st->global_error = -ENOENT;
 309                else {
 310                        /* Record that at least one error has happened. */
 311                        if (st->global_error == 0)
 312                                st->global_error = 1;
 313                }
 314        }
 315        st->va += PAGE_SIZE;
 316
 317        return 0;
 318}
 319
 320static int mmap_return_errors(void *data, void *state)
 321{
 322        struct mmap_batch_state *st = state;
 323
 324        if (st->version == 1) {
 325                xen_pfn_t mfnp = *((xen_pfn_t *) data);
 326                if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
 327                        return __put_user(mfnp, st->user_mfn++);
 328                else
 329                        st->user_mfn++;
 330        } else { /* st->version == 2 */
 331                int err = *((int *) data);
 332                if (err)
 333                        return __put_user(err, st->user_err++);
 334                else
 335                        st->user_err++;
 336        }
 337
 338        return 0;
 339}
 340
 341/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
 342 * the vma with the page info to use later.
 343 * Returns: 0 if success, otherwise -errno
 344 */
 345static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
 346{
 347        int rc;
 348        struct page **pages;
 349
 350        pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
 351        if (pages == NULL)
 352                return -ENOMEM;
 353
 354        rc = alloc_xenballooned_pages(numpgs, pages, 0);
 355        if (rc != 0) {
 356                pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
 357                        numpgs, rc);
 358                kfree(pages);
 359                return -ENOMEM;
 360        }
 361        BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
 362        vma->vm_private_data = pages;
 363
 364        return 0;
 365}
 366
 367static struct vm_operations_struct privcmd_vm_ops;
 368
 369static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 370{
 371        int ret;
 372        struct privcmd_mmapbatch_v2 m;
 373        struct mm_struct *mm = current->mm;
 374        struct vm_area_struct *vma;
 375        unsigned long nr_pages;
 376        LIST_HEAD(pagelist);
 377        struct mmap_batch_state state;
 378
 379        switch (version) {
 380        case 1:
 381                if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
 382                        return -EFAULT;
 383                /* Returns per-frame error in m.arr. */
 384                m.err = NULL;
 385                if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
 386                        return -EFAULT;
 387                break;
 388        case 2:
 389                if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
 390                        return -EFAULT;
 391                /* Returns per-frame error code in m.err. */
 392                if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
 393                        return -EFAULT;
 394                break;
 395        default:
 396                return -EINVAL;
 397        }
 398
 399        nr_pages = m.num;
 400        if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
 401                return -EINVAL;
 402
 403        ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
 404
 405        if (ret)
 406                goto out;
 407        if (list_empty(&pagelist)) {
 408                ret = -EINVAL;
 409                goto out;
 410        }
 411
 412        if (version == 2) {
 413                /* Zero error array now to only copy back actual errors. */
 414                if (clear_user(m.err, sizeof(int) * m.num)) {
 415                        ret = -EFAULT;
 416                        goto out;
 417                }
 418        }
 419
 420        down_write(&mm->mmap_sem);
 421
 422        vma = find_vma(mm, m.addr);
 423        if (!vma ||
 424            vma->vm_ops != &privcmd_vm_ops ||
 425            (m.addr != vma->vm_start) ||
 426            ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
 427            !privcmd_enforce_singleshot_mapping(vma)) {
 428                up_write(&mm->mmap_sem);
 429                ret = -EINVAL;
 430                goto out;
 431        }
 432        if (xen_feature(XENFEAT_auto_translated_physmap)) {
 433                ret = alloc_empty_pages(vma, m.num);
 434                if (ret < 0) {
 435                        up_write(&mm->mmap_sem);
 436                        goto out;
 437                }
 438        }
 439
 440        state.domain        = m.dom;
 441        state.vma           = vma;
 442        state.va            = m.addr;
 443        state.index         = 0;
 444        state.global_error  = 0;
 445        state.version       = version;
 446
 447        /* mmap_batch_fn guarantees ret == 0 */
 448        BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
 449                             &pagelist, mmap_batch_fn, &state));
 450
 451        up_write(&mm->mmap_sem);
 452
 453        if (state.global_error) {
 454                /* Write back errors in second pass. */
 455                state.user_mfn = (xen_pfn_t *)m.arr;
 456                state.user_err = m.err;
 457                ret = traverse_pages(m.num, sizeof(xen_pfn_t),
 458                                                         &pagelist, mmap_return_errors, &state);
 459        } else
 460                ret = 0;
 461
 462        /* If we have not had any EFAULT-like global errors then set the global
 463         * error to -ENOENT if necessary. */
 464        if ((ret == 0) && (state.global_error == -ENOENT))
 465                ret = -ENOENT;
 466
 467out:
 468        free_page_list(&pagelist);
 469
 470        return ret;
 471}
 472
 473static long privcmd_ioctl(struct file *file,
 474                          unsigned int cmd, unsigned long data)
 475{
 476        int ret = -ENOSYS;
 477        void __user *udata = (void __user *) data;
 478
 479        switch (cmd) {
 480        case IOCTL_PRIVCMD_HYPERCALL:
 481                ret = privcmd_ioctl_hypercall(udata);
 482                break;
 483
 484        case IOCTL_PRIVCMD_MMAP:
 485                ret = privcmd_ioctl_mmap(udata);
 486                break;
 487
 488        case IOCTL_PRIVCMD_MMAPBATCH:
 489                ret = privcmd_ioctl_mmap_batch(udata, 1);
 490                break;
 491
 492        case IOCTL_PRIVCMD_MMAPBATCH_V2:
 493                ret = privcmd_ioctl_mmap_batch(udata, 2);
 494                break;
 495
 496        default:
 497                ret = -EINVAL;
 498                break;
 499        }
 500
 501        return ret;
 502}
 503
 504static void privcmd_close(struct vm_area_struct *vma)
 505{
 506        struct page **pages = vma->vm_private_data;
 507        int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 508
 509        if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
 510                return;
 511
 512        xen_unmap_domain_mfn_range(vma, numpgs, pages);
 513        free_xenballooned_pages(numpgs, pages);
 514        kfree(pages);
 515}
 516
 517static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 518{
 519        printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
 520               vma, vma->vm_start, vma->vm_end,
 521               vmf->pgoff, vmf->virtual_address);
 522
 523        return VM_FAULT_SIGBUS;
 524}
 525
 526static struct vm_operations_struct privcmd_vm_ops = {
 527        .close = privcmd_close,
 528        .fault = privcmd_fault
 529};
 530
 531static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
 532{
 533        /* DONTCOPY is essential for Xen because copy_page_range doesn't know
 534         * how to recreate these mappings */
 535        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
 536                         VM_DONTEXPAND | VM_DONTDUMP;
 537        vma->vm_ops = &privcmd_vm_ops;
 538        vma->vm_private_data = NULL;
 539
 540        return 0;
 541}
 542
 543static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
 544{
 545        return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
 546}
 547
 548const struct file_operations xen_privcmd_fops = {
 549        .owner = THIS_MODULE,
 550        .unlocked_ioctl = privcmd_ioctl,
 551        .mmap = privcmd_mmap,
 552};
 553EXPORT_SYMBOL_GPL(xen_privcmd_fops);
 554
 555static struct miscdevice privcmd_dev = {
 556        .minor = MISC_DYNAMIC_MINOR,
 557        .name = "xen/privcmd",
 558        .fops = &xen_privcmd_fops,
 559};
 560
 561static int __init privcmd_init(void)
 562{
 563        int err;
 564
 565        if (!xen_domain())
 566                return -ENODEV;
 567
 568        err = misc_register(&privcmd_dev);
 569        if (err != 0) {
 570                pr_err("Could not register Xen privcmd device\n");
 571                return err;
 572        }
 573        return 0;
 574}
 575
 576static void __exit privcmd_exit(void)
 577{
 578        misc_deregister(&privcmd_dev);
 579}
 580
 581module_init(privcmd_init);
 582module_exit(privcmd_exit);
 583