linux/drivers/gpu/drm/i915/i915_gem_userptr.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012-2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <drm/drmP.h>
  26#include <drm/i915_drm.h>
  27#include "i915_drv.h"
  28#include "i915_trace.h"
  29#include "intel_drv.h"
  30#include <linux/mmu_context.h>
  31#include <linux/mmu_notifier.h>
  32#include <linux/mempolicy.h>
  33#include <linux/swap.h>
  34
  35struct i915_mm_struct {
  36        struct mm_struct *mm;
  37        struct drm_device *dev;
  38        struct i915_mmu_notifier *mn;
  39        struct hlist_node node;
  40        struct kref kref;
  41        struct work_struct work;
  42};
  43
  44#if defined(CONFIG_MMU_NOTIFIER)
  45#include <linux/interval_tree.h>
  46
  47struct i915_mmu_notifier {
  48        spinlock_t lock;
  49        struct hlist_node node;
  50        struct mmu_notifier mn;
  51        struct rb_root objects;
  52};
  53
  54struct i915_mmu_object {
  55        struct i915_mmu_notifier *mn;
  56        struct drm_i915_gem_object *obj;
  57        struct interval_tree_node it;
  58        struct list_head link;
  59        struct work_struct work;
  60        bool attached;
  61};
  62
  63static void cancel_userptr(struct work_struct *work)
  64{
  65        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
  66        struct drm_i915_gem_object *obj = mo->obj;
  67        struct drm_device *dev = obj->base.dev;
  68
  69        mutex_lock(&dev->struct_mutex);
  70        /* Cancel any active worker and force us to re-evaluate gup */
  71        obj->userptr.work = NULL;
  72
  73        if (obj->pages != NULL) {
  74                struct drm_i915_private *dev_priv = to_i915(dev);
  75                struct i915_vma *vma, *tmp;
  76                bool was_interruptible;
  77
  78                was_interruptible = dev_priv->mm.interruptible;
  79                dev_priv->mm.interruptible = false;
  80
  81                list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
  82                        int ret = i915_vma_unbind(vma);
  83                        WARN_ON(ret && ret != -EIO);
  84                }
  85                WARN_ON(i915_gem_object_put_pages(obj));
  86
  87                dev_priv->mm.interruptible = was_interruptible;
  88        }
  89
  90        drm_gem_object_unreference(&obj->base);
  91        mutex_unlock(&dev->struct_mutex);
  92}
  93
  94static void add_object(struct i915_mmu_object *mo)
  95{
  96        if (mo->attached)
  97                return;
  98
  99        interval_tree_insert(&mo->it, &mo->mn->objects);
 100        mo->attached = true;
 101}
 102
 103static void del_object(struct i915_mmu_object *mo)
 104{
 105        if (!mo->attached)
 106                return;
 107
 108        interval_tree_remove(&mo->it, &mo->mn->objects);
 109        mo->attached = false;
 110}
 111
 112static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 113                                                       struct mm_struct *mm,
 114                                                       unsigned long start,
 115                                                       unsigned long end)
 116{
 117        struct i915_mmu_notifier *mn =
 118                container_of(_mn, struct i915_mmu_notifier, mn);
 119        struct i915_mmu_object *mo;
 120        struct interval_tree_node *it;
 121        LIST_HEAD(cancelled);
 122
 123        if (RB_EMPTY_ROOT(&mn->objects))
 124                return;
 125
 126        /* interval ranges are inclusive, but invalidate range is exclusive */
 127        end--;
 128
 129        spin_lock(&mn->lock);
 130        it = interval_tree_iter_first(&mn->objects, start, end);
 131        while (it) {
 132                /* The mmu_object is released late when destroying the
 133                 * GEM object so it is entirely possible to gain a
 134                 * reference on an object in the process of being freed
 135                 * since our serialisation is via the spinlock and not
 136                 * the struct_mutex - and consequently use it after it
 137                 * is freed and then double free it. To prevent that
 138                 * use-after-free we only acquire a reference on the
 139                 * object if it is not in the process of being destroyed.
 140                 */
 141                mo = container_of(it, struct i915_mmu_object, it);
 142                if (kref_get_unless_zero(&mo->obj->base.refcount))
 143                        schedule_work(&mo->work);
 144
 145                list_add(&mo->link, &cancelled);
 146                it = interval_tree_iter_next(it, start, end);
 147        }
 148        list_for_each_entry(mo, &cancelled, link)
 149                del_object(mo);
 150        spin_unlock(&mn->lock);
 151}
 152
 153static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
 154        .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
 155};
 156
 157static struct i915_mmu_notifier *
 158i915_mmu_notifier_create(struct mm_struct *mm)
 159{
 160        struct i915_mmu_notifier *mn;
 161        int ret;
 162
 163        mn = kmalloc(sizeof(*mn), GFP_KERNEL);
 164        if (mn == NULL)
 165                return ERR_PTR(-ENOMEM);
 166
 167        spin_lock_init(&mn->lock);
 168        mn->mn.ops = &i915_gem_userptr_notifier;
 169        mn->objects = RB_ROOT;
 170
 171         /* Protected by mmap_sem (write-lock) */
 172        ret = __mmu_notifier_register(&mn->mn, mm);
 173        if (ret) {
 174                kfree(mn);
 175                return ERR_PTR(ret);
 176        }
 177
 178        return mn;
 179}
 180
 181static void
 182i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 183{
 184        struct i915_mmu_object *mo;
 185
 186        mo = obj->userptr.mmu_object;
 187        if (mo == NULL)
 188                return;
 189
 190        spin_lock(&mo->mn->lock);
 191        del_object(mo);
 192        spin_unlock(&mo->mn->lock);
 193        kfree(mo);
 194
 195        obj->userptr.mmu_object = NULL;
 196}
 197
 198static struct i915_mmu_notifier *
 199i915_mmu_notifier_find(struct i915_mm_struct *mm)
 200{
 201        struct i915_mmu_notifier *mn = mm->mn;
 202
 203        mn = mm->mn;
 204        if (mn)
 205                return mn;
 206
 207        down_write(&mm->mm->mmap_sem);
 208        mutex_lock(&to_i915(mm->dev)->mm_lock);
 209        if ((mn = mm->mn) == NULL) {
 210                mn = i915_mmu_notifier_create(mm->mm);
 211                if (!IS_ERR(mn))
 212                        mm->mn = mn;
 213        }
 214        mutex_unlock(&to_i915(mm->dev)->mm_lock);
 215        up_write(&mm->mm->mmap_sem);
 216
 217        return mn;
 218}
 219
 220static int
 221i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 222                                    unsigned flags)
 223{
 224        struct i915_mmu_notifier *mn;
 225        struct i915_mmu_object *mo;
 226
 227        if (flags & I915_USERPTR_UNSYNCHRONIZED)
 228                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
 229
 230        if (WARN_ON(obj->userptr.mm == NULL))
 231                return -EINVAL;
 232
 233        mn = i915_mmu_notifier_find(obj->userptr.mm);
 234        if (IS_ERR(mn))
 235                return PTR_ERR(mn);
 236
 237        mo = kzalloc(sizeof(*mo), GFP_KERNEL);
 238        if (mo == NULL)
 239                return -ENOMEM;
 240
 241        mo->mn = mn;
 242        mo->obj = obj;
 243        mo->it.start = obj->userptr.ptr;
 244        mo->it.last = obj->userptr.ptr + obj->base.size - 1;
 245        INIT_WORK(&mo->work, cancel_userptr);
 246
 247        obj->userptr.mmu_object = mo;
 248        return 0;
 249}
 250
 251static void
 252i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
 253                       struct mm_struct *mm)
 254{
 255        if (mn == NULL)
 256                return;
 257
 258        mmu_notifier_unregister(&mn->mn, mm);
 259        kfree(mn);
 260}
 261
 262#else
 263
 264static void
 265i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 266{
 267}
 268
 269static int
 270i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 271                                    unsigned flags)
 272{
 273        if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
 274                return -ENODEV;
 275
 276        if (!capable(CAP_SYS_ADMIN))
 277                return -EPERM;
 278
 279        return 0;
 280}
 281
 282static void
 283i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
 284                       struct mm_struct *mm)
 285{
 286}
 287
 288#endif
 289
 290static struct i915_mm_struct *
 291__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
 292{
 293        struct i915_mm_struct *mm;
 294
 295        /* Protected by dev_priv->mm_lock */
 296        hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
 297                if (mm->mm == real)
 298                        return mm;
 299
 300        return NULL;
 301}
 302
 303static int
 304i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 305{
 306        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 307        struct i915_mm_struct *mm;
 308        int ret = 0;
 309
 310        /* During release of the GEM object we hold the struct_mutex. This
 311         * precludes us from calling mmput() at that time as that may be
 312         * the last reference and so call exit_mmap(). exit_mmap() will
 313         * attempt to reap the vma, and if we were holding a GTT mmap
 314         * would then call drm_gem_vm_close() and attempt to reacquire
 315         * the struct mutex. So in order to avoid that recursion, we have
 316         * to defer releasing the mm reference until after we drop the
 317         * struct_mutex, i.e. we need to schedule a worker to do the clean
 318         * up.
 319         */
 320        mutex_lock(&dev_priv->mm_lock);
 321        mm = __i915_mm_struct_find(dev_priv, current->mm);
 322        if (mm == NULL) {
 323                mm = kmalloc(sizeof(*mm), GFP_KERNEL);
 324                if (mm == NULL) {
 325                        ret = -ENOMEM;
 326                        goto out;
 327                }
 328
 329                kref_init(&mm->kref);
 330                mm->dev = obj->base.dev;
 331
 332                mm->mm = current->mm;
 333                atomic_inc(&current->mm->mm_count);
 334
 335                mm->mn = NULL;
 336
 337                /* Protected by dev_priv->mm_lock */
 338                hash_add(dev_priv->mm_structs,
 339                         &mm->node, (unsigned long)mm->mm);
 340        } else
 341                kref_get(&mm->kref);
 342
 343        obj->userptr.mm = mm;
 344out:
 345        mutex_unlock(&dev_priv->mm_lock);
 346        return ret;
 347}
 348
 349static void
 350__i915_mm_struct_free__worker(struct work_struct *work)
 351{
 352        struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
 353        i915_mmu_notifier_free(mm->mn, mm->mm);
 354        mmdrop(mm->mm);
 355        kfree(mm);
 356}
 357
 358static void
 359__i915_mm_struct_free(struct kref *kref)
 360{
 361        struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
 362
 363        /* Protected by dev_priv->mm_lock */
 364        hash_del(&mm->node);
 365        mutex_unlock(&to_i915(mm->dev)->mm_lock);
 366
 367        INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
 368        schedule_work(&mm->work);
 369}
 370
 371static void
 372i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
 373{
 374        if (obj->userptr.mm == NULL)
 375                return;
 376
 377        kref_put_mutex(&obj->userptr.mm->kref,
 378                       __i915_mm_struct_free,
 379                       &to_i915(obj->base.dev)->mm_lock);
 380        obj->userptr.mm = NULL;
 381}
 382
 383struct get_pages_work {
 384        struct work_struct work;
 385        struct drm_i915_gem_object *obj;
 386        struct task_struct *task;
 387};
 388
 389#if IS_ENABLED(CONFIG_SWIOTLB)
 390#define swiotlb_active() swiotlb_nr_tbl()
 391#else
 392#define swiotlb_active() 0
 393#endif
 394
 395static int
 396st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
 397{
 398        struct scatterlist *sg;
 399        int ret, n;
 400
 401        *st = kmalloc(sizeof(**st), GFP_KERNEL);
 402        if (*st == NULL)
 403                return -ENOMEM;
 404
 405        if (swiotlb_active()) {
 406                ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
 407                if (ret)
 408                        goto err;
 409
 410                for_each_sg((*st)->sgl, sg, num_pages, n)
 411                        sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
 412        } else {
 413                ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
 414                                                0, num_pages << PAGE_SHIFT,
 415                                                GFP_KERNEL);
 416                if (ret)
 417                        goto err;
 418        }
 419
 420        return 0;
 421
 422err:
 423        kfree(*st);
 424        *st = NULL;
 425        return ret;
 426}
 427
 428static int
 429__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
 430                             struct page **pvec, int num_pages)
 431{
 432        int ret;
 433
 434        ret = st_set_pages(&obj->pages, pvec, num_pages);
 435        if (ret)
 436                return ret;
 437
 438        ret = i915_gem_gtt_prepare_object(obj);
 439        if (ret) {
 440                sg_free_table(obj->pages);
 441                kfree(obj->pages);
 442                obj->pages = NULL;
 443        }
 444
 445        return ret;
 446}
 447
 448static int
 449__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
 450                              bool value)
 451{
 452        int ret = 0;
 453
 454        /* During mm_invalidate_range we need to cancel any userptr that
 455         * overlaps the range being invalidated. Doing so requires the
 456         * struct_mutex, and that risks recursion. In order to cause
 457         * recursion, the user must alias the userptr address space with
 458         * a GTT mmapping (possible with a MAP_FIXED) - then when we have
 459         * to invalidate that mmaping, mm_invalidate_range is called with
 460         * the userptr address *and* the struct_mutex held.  To prevent that
 461         * we set a flag under the i915_mmu_notifier spinlock to indicate
 462         * whether this object is valid.
 463         */
 464#if defined(CONFIG_MMU_NOTIFIER)
 465        if (obj->userptr.mmu_object == NULL)
 466                return 0;
 467
 468        spin_lock(&obj->userptr.mmu_object->mn->lock);
 469        /* In order to serialise get_pages with an outstanding
 470         * cancel_userptr, we must drop the struct_mutex and try again.
 471         */
 472        if (!value)
 473                del_object(obj->userptr.mmu_object);
 474        else if (!work_pending(&obj->userptr.mmu_object->work))
 475                add_object(obj->userptr.mmu_object);
 476        else
 477                ret = -EAGAIN;
 478        spin_unlock(&obj->userptr.mmu_object->mn->lock);
 479#endif
 480
 481        return ret;
 482}
 483
 484static void
 485__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 486{
 487        struct get_pages_work *work = container_of(_work, typeof(*work), work);
 488        struct drm_i915_gem_object *obj = work->obj;
 489        struct drm_device *dev = obj->base.dev;
 490        const int npages = obj->base.size >> PAGE_SHIFT;
 491        struct page **pvec;
 492        int pinned, ret;
 493
 494        ret = -ENOMEM;
 495        pinned = 0;
 496
 497        pvec = kmalloc(npages*sizeof(struct page *),
 498                       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 499        if (pvec == NULL)
 500                pvec = drm_malloc_ab(npages, sizeof(struct page *));
 501        if (pvec != NULL) {
 502                struct mm_struct *mm = obj->userptr.mm->mm;
 503
 504                ret = -EFAULT;
 505                if (atomic_inc_not_zero(&mm->mm_users)) {
 506                        down_read(&mm->mmap_sem);
 507                        while (pinned < npages) {
 508                                ret = get_user_pages_remote
 509                                        (work->task, mm,
 510                                         obj->userptr.ptr + pinned * PAGE_SIZE,
 511                                         npages - pinned,
 512                                         !obj->userptr.read_only, 0,
 513                                         pvec + pinned, NULL);
 514                                if (ret < 0)
 515                                        break;
 516
 517                                pinned += ret;
 518                        }
 519                        up_read(&mm->mmap_sem);
 520                        mmput(mm);
 521                }
 522        }
 523
 524        mutex_lock(&dev->struct_mutex);
 525        if (obj->userptr.work == &work->work) {
 526                if (pinned == npages) {
 527                        ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
 528                        if (ret == 0) {
 529                                list_add_tail(&obj->global_list,
 530                                              &to_i915(dev)->mm.unbound_list);
 531                                obj->get_page.sg = obj->pages->sgl;
 532                                obj->get_page.last = 0;
 533                                pinned = 0;
 534                        }
 535                }
 536                obj->userptr.work = ERR_PTR(ret);
 537                if (ret)
 538                        __i915_gem_userptr_set_active(obj, false);
 539        }
 540
 541        obj->userptr.workers--;
 542        drm_gem_object_unreference(&obj->base);
 543        mutex_unlock(&dev->struct_mutex);
 544
 545        release_pages(pvec, pinned, 0);
 546        drm_free_large(pvec);
 547
 548        put_task_struct(work->task);
 549        kfree(work);
 550}
 551
 552static int
 553__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 554                                      bool *active)
 555{
 556        struct get_pages_work *work;
 557
 558        /* Spawn a worker so that we can acquire the
 559         * user pages without holding our mutex. Access
 560         * to the user pages requires mmap_sem, and we have
 561         * a strict lock ordering of mmap_sem, struct_mutex -
 562         * we already hold struct_mutex here and so cannot
 563         * call gup without encountering a lock inversion.
 564         *
 565         * Userspace will keep on repeating the operation
 566         * (thanks to EAGAIN) until either we hit the fast
 567         * path or the worker completes. If the worker is
 568         * cancelled or superseded, the task is still run
 569         * but the results ignored. (This leads to
 570         * complications that we may have a stray object
 571         * refcount that we need to be wary of when
 572         * checking for existing objects during creation.)
 573         * If the worker encounters an error, it reports
 574         * that error back to this function through
 575         * obj->userptr.work = ERR_PTR.
 576         */
 577        if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
 578                return -EAGAIN;
 579
 580        work = kmalloc(sizeof(*work), GFP_KERNEL);
 581        if (work == NULL)
 582                return -ENOMEM;
 583
 584        obj->userptr.work = &work->work;
 585        obj->userptr.workers++;
 586
 587        work->obj = obj;
 588        drm_gem_object_reference(&obj->base);
 589
 590        work->task = current;
 591        get_task_struct(work->task);
 592
 593        INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
 594        schedule_work(&work->work);
 595
 596        *active = true;
 597        return -EAGAIN;
 598}
 599
 600static int
 601i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 602{
 603        const int num_pages = obj->base.size >> PAGE_SHIFT;
 604        struct page **pvec;
 605        int pinned, ret;
 606        bool active;
 607
 608        /* If userspace should engineer that these pages are replaced in
 609         * the vma between us binding this page into the GTT and completion
 610         * of rendering... Their loss. If they change the mapping of their
 611         * pages they need to create a new bo to point to the new vma.
 612         *
 613         * However, that still leaves open the possibility of the vma
 614         * being copied upon fork. Which falls under the same userspace
 615         * synchronisation issue as a regular bo, except that this time
 616         * the process may not be expecting that a particular piece of
 617         * memory is tied to the GPU.
 618         *
 619         * Fortunately, we can hook into the mmu_notifier in order to
 620         * discard the page references prior to anything nasty happening
 621         * to the vma (discard or cloning) which should prevent the more
 622         * egregious cases from causing harm.
 623         */
 624        if (IS_ERR(obj->userptr.work)) {
 625                /* active flag will have been dropped already by the worker */
 626                ret = PTR_ERR(obj->userptr.work);
 627                obj->userptr.work = NULL;
 628                return ret;
 629        }
 630        if (obj->userptr.work)
 631                /* active flag should still be held for the pending work */
 632                return -EAGAIN;
 633
 634        /* Let the mmu-notifier know that we have begun and need cancellation */
 635        ret = __i915_gem_userptr_set_active(obj, true);
 636        if (ret)
 637                return ret;
 638
 639        pvec = NULL;
 640        pinned = 0;
 641        if (obj->userptr.mm->mm == current->mm) {
 642                pvec = kmalloc(num_pages*sizeof(struct page *),
 643                               GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 644                if (pvec == NULL) {
 645                        pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
 646                        if (pvec == NULL) {
 647                                __i915_gem_userptr_set_active(obj, false);
 648                                return -ENOMEM;
 649                        }
 650                }
 651
 652                pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
 653                                               !obj->userptr.read_only, pvec);
 654        }
 655
 656        active = false;
 657        if (pinned < 0)
 658                ret = pinned, pinned = 0;
 659        else if (pinned < num_pages)
 660                ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
 661        else
 662                ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
 663        if (ret) {
 664                __i915_gem_userptr_set_active(obj, active);
 665                release_pages(pvec, pinned, 0);
 666        }
 667        drm_free_large(pvec);
 668        return ret;
 669}
 670
 671static void
 672i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 673{
 674        struct sg_page_iter sg_iter;
 675
 676        BUG_ON(obj->userptr.work != NULL);
 677        __i915_gem_userptr_set_active(obj, false);
 678
 679        if (obj->madv != I915_MADV_WILLNEED)
 680                obj->dirty = 0;
 681
 682        i915_gem_gtt_finish_object(obj);
 683
 684        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
 685                struct page *page = sg_page_iter_page(&sg_iter);
 686
 687                if (obj->dirty)
 688                        set_page_dirty(page);
 689
 690                mark_page_accessed(page);
 691                put_page(page);
 692        }
 693        obj->dirty = 0;
 694
 695        sg_free_table(obj->pages);
 696        kfree(obj->pages);
 697}
 698
 699static void
 700i915_gem_userptr_release(struct drm_i915_gem_object *obj)
 701{
 702        i915_gem_userptr_release__mmu_notifier(obj);
 703        i915_gem_userptr_release__mm_struct(obj);
 704}
 705
 706static int
 707i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 708{
 709        if (obj->userptr.mmu_object)
 710                return 0;
 711
 712        return i915_gem_userptr_init__mmu_notifier(obj, 0);
 713}
 714
 715static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 716        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
 717        .get_pages = i915_gem_userptr_get_pages,
 718        .put_pages = i915_gem_userptr_put_pages,
 719        .dmabuf_export = i915_gem_userptr_dmabuf_export,
 720        .release = i915_gem_userptr_release,
 721};
 722
 723/**
 724 * Creates a new mm object that wraps some normal memory from the process
 725 * context - user memory.
 726 *
 727 * We impose several restrictions upon the memory being mapped
 728 * into the GPU.
 729 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
 730 * 2. It must be normal system memory, not a pointer into another map of IO
 731 *    space (e.g. it must not be a GTT mmapping of another object).
 732 * 3. We only allow a bo as large as we could in theory map into the GTT,
 733 *    that is we limit the size to the total size of the GTT.
 734 * 4. The bo is marked as being snoopable. The backing pages are left
 735 *    accessible directly by the CPU, but reads and writes by the GPU may
 736 *    incur the cost of a snoop (unless you have an LLC architecture).
 737 *
 738 * Synchronisation between multiple users and the GPU is left to userspace
 739 * through the normal set-domain-ioctl. The kernel will enforce that the
 740 * GPU relinquishes the VMA before it is returned back to the system
 741 * i.e. upon free(), munmap() or process termination. However, the userspace
 742 * malloc() library may not immediately relinquish the VMA after free() and
 743 * instead reuse it whilst the GPU is still reading and writing to the VMA.
 744 * Caveat emptor.
 745 *
 746 * Also note, that the object created here is not currently a "first class"
 747 * object, in that several ioctls are banned. These are the CPU access
 748 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
 749 * direct access via your pointer rather than use those ioctls. Another
 750 * restriction is that we do not allow userptr surfaces to be pinned to the
 751 * hardware and so we reject any attempt to create a framebuffer out of a
 752 * userptr.
 753 *
 754 * If you think this is a good interface to use to pass GPU memory between
 755 * drivers, please use dma-buf instead. In fact, wherever possible use
 756 * dma-buf instead.
 757 */
 758int
 759i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 760{
 761        struct drm_i915_gem_userptr *args = data;
 762        struct drm_i915_gem_object *obj;
 763        int ret;
 764        u32 handle;
 765
 766        if (args->flags & ~(I915_USERPTR_READ_ONLY |
 767                            I915_USERPTR_UNSYNCHRONIZED))
 768                return -EINVAL;
 769
 770        if (offset_in_page(args->user_ptr | args->user_size))
 771                return -EINVAL;
 772
 773        if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
 774                       (char __user *)(unsigned long)args->user_ptr, args->user_size))
 775                return -EFAULT;
 776
 777        if (args->flags & I915_USERPTR_READ_ONLY) {
 778                /* On almost all of the current hw, we cannot tell the GPU that a
 779                 * page is readonly, so this is just a placeholder in the uAPI.
 780                 */
 781                return -ENODEV;
 782        }
 783
 784        obj = i915_gem_object_alloc(dev);
 785        if (obj == NULL)
 786                return -ENOMEM;
 787
 788        drm_gem_private_object_init(dev, &obj->base, args->user_size);
 789        i915_gem_object_init(obj, &i915_gem_userptr_ops);
 790        obj->cache_level = I915_CACHE_LLC;
 791        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 792        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 793
 794        obj->userptr.ptr = args->user_ptr;
 795        obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
 796
 797        /* And keep a pointer to the current->mm for resolving the user pages
 798         * at binding. This means that we need to hook into the mmu_notifier
 799         * in order to detect if the mmu is destroyed.
 800         */
 801        ret = i915_gem_userptr_init__mm_struct(obj);
 802        if (ret == 0)
 803                ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
 804        if (ret == 0)
 805                ret = drm_gem_handle_create(file, &obj->base, &handle);
 806
 807        /* drop reference from allocate - handle holds it now */
 808        drm_gem_object_unreference_unlocked(&obj->base);
 809        if (ret)
 810                return ret;
 811
 812        args->handle = handle;
 813        return 0;
 814}
 815
 816int
 817i915_gem_init_userptr(struct drm_device *dev)
 818{
 819        struct drm_i915_private *dev_priv = to_i915(dev);
 820        mutex_init(&dev_priv->mm_lock);
 821        hash_init(dev_priv->mm_structs);
 822        return 0;
 823}
 824