linux/drivers/gpu/drm/i915/i915_gem_userptr.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012-2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <drm/drmP.h>
  26#include <drm/i915_drm.h>
  27#include "i915_drv.h"
  28#include "i915_trace.h"
  29#include "intel_drv.h"
  30#include <linux/mmu_context.h>
  31#include <linux/mmu_notifier.h>
  32#include <linux/mempolicy.h>
  33#include <linux/swap.h>
  34#include <linux/sched/mm.h>
  35
  36struct i915_mm_struct {
  37        struct mm_struct *mm;
  38        struct drm_i915_private *i915;
  39        struct i915_mmu_notifier *mn;
  40        struct hlist_node node;
  41        struct kref kref;
  42        struct work_struct work;
  43};
  44
  45#if defined(CONFIG_MMU_NOTIFIER)
  46#include <linux/interval_tree.h>
  47
  48struct i915_mmu_notifier {
  49        spinlock_t lock;
  50        struct hlist_node node;
  51        struct mmu_notifier mn;
  52        struct rb_root_cached objects;
  53        struct workqueue_struct *wq;
  54};
  55
  56struct i915_mmu_object {
  57        struct i915_mmu_notifier *mn;
  58        struct drm_i915_gem_object *obj;
  59        struct interval_tree_node it;
  60        struct list_head link;
  61        struct work_struct work;
  62        bool attached;
  63};
  64
  65static void cancel_userptr(struct work_struct *work)
  66{
  67        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
  68        struct drm_i915_gem_object *obj = mo->obj;
  69        struct work_struct *active;
  70
  71        /* Cancel any active worker and force us to re-evaluate gup */
  72        mutex_lock(&obj->mm.lock);
  73        active = fetch_and_zero(&obj->userptr.work);
  74        mutex_unlock(&obj->mm.lock);
  75        if (active)
  76                goto out;
  77
  78        i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
  79
  80        mutex_lock(&obj->base.dev->struct_mutex);
  81
  82        /* We are inside a kthread context and can't be interrupted */
  83        if (i915_gem_object_unbind(obj) == 0)
  84                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
  85        WARN_ONCE(i915_gem_object_has_pages(obj),
  86                  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
  87                  obj->bind_count,
  88                  atomic_read(&obj->mm.pages_pin_count),
  89                  obj->pin_global);
  90
  91        mutex_unlock(&obj->base.dev->struct_mutex);
  92
  93out:
  94        i915_gem_object_put(obj);
  95}
  96
  97static void add_object(struct i915_mmu_object *mo)
  98{
  99        if (mo->attached)
 100                return;
 101
 102        interval_tree_insert(&mo->it, &mo->mn->objects);
 103        mo->attached = true;
 104}
 105
 106static void del_object(struct i915_mmu_object *mo)
 107{
 108        if (!mo->attached)
 109                return;
 110
 111        interval_tree_remove(&mo->it, &mo->mn->objects);
 112        mo->attached = false;
 113}
 114
 115static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 116                                                       struct mm_struct *mm,
 117                                                       unsigned long start,
 118                                                       unsigned long end)
 119{
 120        struct i915_mmu_notifier *mn =
 121                container_of(_mn, struct i915_mmu_notifier, mn);
 122        struct i915_mmu_object *mo;
 123        struct interval_tree_node *it;
 124        LIST_HEAD(cancelled);
 125
 126        if (RB_EMPTY_ROOT(&mn->objects.rb_root))
 127                return;
 128
 129        /* interval ranges are inclusive, but invalidate range is exclusive */
 130        end--;
 131
 132        spin_lock(&mn->lock);
 133        it = interval_tree_iter_first(&mn->objects, start, end);
 134        while (it) {
 135                /* The mmu_object is released late when destroying the
 136                 * GEM object so it is entirely possible to gain a
 137                 * reference on an object in the process of being freed
 138                 * since our serialisation is via the spinlock and not
 139                 * the struct_mutex - and consequently use it after it
 140                 * is freed and then double free it. To prevent that
 141                 * use-after-free we only acquire a reference on the
 142                 * object if it is not in the process of being destroyed.
 143                 */
 144                mo = container_of(it, struct i915_mmu_object, it);
 145                if (kref_get_unless_zero(&mo->obj->base.refcount))
 146                        queue_work(mn->wq, &mo->work);
 147
 148                list_add(&mo->link, &cancelled);
 149                it = interval_tree_iter_next(it, start, end);
 150        }
 151        list_for_each_entry(mo, &cancelled, link)
 152                del_object(mo);
 153        spin_unlock(&mn->lock);
 154
 155        if (!list_empty(&cancelled))
 156                flush_workqueue(mn->wq);
 157}
 158
 159static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
 160        .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
 161};
 162
 163static struct i915_mmu_notifier *
 164i915_mmu_notifier_create(struct mm_struct *mm)
 165{
 166        struct i915_mmu_notifier *mn;
 167
 168        mn = kmalloc(sizeof(*mn), GFP_KERNEL);
 169        if (mn == NULL)
 170                return ERR_PTR(-ENOMEM);
 171
 172        spin_lock_init(&mn->lock);
 173        mn->mn.ops = &i915_gem_userptr_notifier;
 174        mn->objects = RB_ROOT_CACHED;
 175        mn->wq = alloc_workqueue("i915-userptr-release",
 176                                 WQ_UNBOUND | WQ_MEM_RECLAIM,
 177                                 0);
 178        if (mn->wq == NULL) {
 179                kfree(mn);
 180                return ERR_PTR(-ENOMEM);
 181        }
 182
 183        return mn;
 184}
 185
 186static void
 187i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 188{
 189        struct i915_mmu_object *mo;
 190
 191        mo = obj->userptr.mmu_object;
 192        if (mo == NULL)
 193                return;
 194
 195        spin_lock(&mo->mn->lock);
 196        del_object(mo);
 197        spin_unlock(&mo->mn->lock);
 198        kfree(mo);
 199
 200        obj->userptr.mmu_object = NULL;
 201}
 202
 203static struct i915_mmu_notifier *
 204i915_mmu_notifier_find(struct i915_mm_struct *mm)
 205{
 206        struct i915_mmu_notifier *mn;
 207        int err = 0;
 208
 209        mn = mm->mn;
 210        if (mn)
 211                return mn;
 212
 213        mn = i915_mmu_notifier_create(mm->mm);
 214        if (IS_ERR(mn))
 215                err = PTR_ERR(mn);
 216
 217        down_write(&mm->mm->mmap_sem);
 218        mutex_lock(&mm->i915->mm_lock);
 219        if (mm->mn == NULL && !err) {
 220                /* Protected by mmap_sem (write-lock) */
 221                err = __mmu_notifier_register(&mn->mn, mm->mm);
 222                if (!err) {
 223                        /* Protected by mm_lock */
 224                        mm->mn = fetch_and_zero(&mn);
 225                }
 226        } else if (mm->mn) {
 227                /*
 228                 * Someone else raced and successfully installed the mmu
 229                 * notifier, we can cancel our own errors.
 230                 */
 231                err = 0;
 232        }
 233        mutex_unlock(&mm->i915->mm_lock);
 234        up_write(&mm->mm->mmap_sem);
 235
 236        if (mn && !IS_ERR(mn)) {
 237                destroy_workqueue(mn->wq);
 238                kfree(mn);
 239        }
 240
 241        return err ? ERR_PTR(err) : mm->mn;
 242}
 243
 244static int
 245i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 246                                    unsigned flags)
 247{
 248        struct i915_mmu_notifier *mn;
 249        struct i915_mmu_object *mo;
 250
 251        if (flags & I915_USERPTR_UNSYNCHRONIZED)
 252                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
 253
 254        if (WARN_ON(obj->userptr.mm == NULL))
 255                return -EINVAL;
 256
 257        mn = i915_mmu_notifier_find(obj->userptr.mm);
 258        if (IS_ERR(mn))
 259                return PTR_ERR(mn);
 260
 261        mo = kzalloc(sizeof(*mo), GFP_KERNEL);
 262        if (mo == NULL)
 263                return -ENOMEM;
 264
 265        mo->mn = mn;
 266        mo->obj = obj;
 267        mo->it.start = obj->userptr.ptr;
 268        mo->it.last = obj->userptr.ptr + obj->base.size - 1;
 269        INIT_WORK(&mo->work, cancel_userptr);
 270
 271        obj->userptr.mmu_object = mo;
 272        return 0;
 273}
 274
 275static void
 276i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
 277                       struct mm_struct *mm)
 278{
 279        if (mn == NULL)
 280                return;
 281
 282        mmu_notifier_unregister(&mn->mn, mm);
 283        destroy_workqueue(mn->wq);
 284        kfree(mn);
 285}
 286
 287#else
 288
 289static void
 290i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 291{
 292}
 293
 294static int
 295i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 296                                    unsigned flags)
 297{
 298        if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
 299                return -ENODEV;
 300
 301        if (!capable(CAP_SYS_ADMIN))
 302                return -EPERM;
 303
 304        return 0;
 305}
 306
 307static void
 308i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
 309                       struct mm_struct *mm)
 310{
 311}
 312
 313#endif
 314
 315static struct i915_mm_struct *
 316__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
 317{
 318        struct i915_mm_struct *mm;
 319
 320        /* Protected by dev_priv->mm_lock */
 321        hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
 322                if (mm->mm == real)
 323                        return mm;
 324
 325        return NULL;
 326}
 327
 328static int
 329i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 330{
 331        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 332        struct i915_mm_struct *mm;
 333        int ret = 0;
 334
 335        /* During release of the GEM object we hold the struct_mutex. This
 336         * precludes us from calling mmput() at that time as that may be
 337         * the last reference and so call exit_mmap(). exit_mmap() will
 338         * attempt to reap the vma, and if we were holding a GTT mmap
 339         * would then call drm_gem_vm_close() and attempt to reacquire
 340         * the struct mutex. So in order to avoid that recursion, we have
 341         * to defer releasing the mm reference until after we drop the
 342         * struct_mutex, i.e. we need to schedule a worker to do the clean
 343         * up.
 344         */
 345        mutex_lock(&dev_priv->mm_lock);
 346        mm = __i915_mm_struct_find(dev_priv, current->mm);
 347        if (mm == NULL) {
 348                mm = kmalloc(sizeof(*mm), GFP_KERNEL);
 349                if (mm == NULL) {
 350                        ret = -ENOMEM;
 351                        goto out;
 352                }
 353
 354                kref_init(&mm->kref);
 355                mm->i915 = to_i915(obj->base.dev);
 356
 357                mm->mm = current->mm;
 358                mmgrab(current->mm);
 359
 360                mm->mn = NULL;
 361
 362                /* Protected by dev_priv->mm_lock */
 363                hash_add(dev_priv->mm_structs,
 364                         &mm->node, (unsigned long)mm->mm);
 365        } else
 366                kref_get(&mm->kref);
 367
 368        obj->userptr.mm = mm;
 369out:
 370        mutex_unlock(&dev_priv->mm_lock);
 371        return ret;
 372}
 373
 374static void
 375__i915_mm_struct_free__worker(struct work_struct *work)
 376{
 377        struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
 378        i915_mmu_notifier_free(mm->mn, mm->mm);
 379        mmdrop(mm->mm);
 380        kfree(mm);
 381}
 382
 383static void
 384__i915_mm_struct_free(struct kref *kref)
 385{
 386        struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
 387
 388        /* Protected by dev_priv->mm_lock */
 389        hash_del(&mm->node);
 390        mutex_unlock(&mm->i915->mm_lock);
 391
 392        INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
 393        queue_work(mm->i915->mm.userptr_wq, &mm->work);
 394}
 395
 396static void
 397i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
 398{
 399        if (obj->userptr.mm == NULL)
 400                return;
 401
 402        kref_put_mutex(&obj->userptr.mm->kref,
 403                       __i915_mm_struct_free,
 404                       &to_i915(obj->base.dev)->mm_lock);
 405        obj->userptr.mm = NULL;
 406}
 407
 408struct get_pages_work {
 409        struct work_struct work;
 410        struct drm_i915_gem_object *obj;
 411        struct task_struct *task;
 412};
 413
 414static struct sg_table *
 415__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
 416                               struct page **pvec, int num_pages)
 417{
 418        unsigned int max_segment = i915_sg_segment_size();
 419        struct sg_table *st;
 420        unsigned int sg_page_sizes;
 421        int ret;
 422
 423        st = kmalloc(sizeof(*st), GFP_KERNEL);
 424        if (!st)
 425                return ERR_PTR(-ENOMEM);
 426
 427alloc_table:
 428        ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
 429                                          0, num_pages << PAGE_SHIFT,
 430                                          max_segment,
 431                                          GFP_KERNEL);
 432        if (ret) {
 433                kfree(st);
 434                return ERR_PTR(ret);
 435        }
 436
 437        ret = i915_gem_gtt_prepare_pages(obj, st);
 438        if (ret) {
 439                sg_free_table(st);
 440
 441                if (max_segment > PAGE_SIZE) {
 442                        max_segment = PAGE_SIZE;
 443                        goto alloc_table;
 444                }
 445
 446                kfree(st);
 447                return ERR_PTR(ret);
 448        }
 449
 450        sg_page_sizes = i915_sg_page_sizes(st->sgl);
 451
 452        __i915_gem_object_set_pages(obj, st, sg_page_sizes);
 453
 454        return st;
 455}
 456
 457static int
 458__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
 459                              bool value)
 460{
 461        int ret = 0;
 462
 463        /* During mm_invalidate_range we need to cancel any userptr that
 464         * overlaps the range being invalidated. Doing so requires the
 465         * struct_mutex, and that risks recursion. In order to cause
 466         * recursion, the user must alias the userptr address space with
 467         * a GTT mmapping (possible with a MAP_FIXED) - then when we have
 468         * to invalidate that mmaping, mm_invalidate_range is called with
 469         * the userptr address *and* the struct_mutex held.  To prevent that
 470         * we set a flag under the i915_mmu_notifier spinlock to indicate
 471         * whether this object is valid.
 472         */
 473#if defined(CONFIG_MMU_NOTIFIER)
 474        if (obj->userptr.mmu_object == NULL)
 475                return 0;
 476
 477        spin_lock(&obj->userptr.mmu_object->mn->lock);
 478        /* In order to serialise get_pages with an outstanding
 479         * cancel_userptr, we must drop the struct_mutex and try again.
 480         */
 481        if (!value)
 482                del_object(obj->userptr.mmu_object);
 483        else if (!work_pending(&obj->userptr.mmu_object->work))
 484                add_object(obj->userptr.mmu_object);
 485        else
 486                ret = -EAGAIN;
 487        spin_unlock(&obj->userptr.mmu_object->mn->lock);
 488#endif
 489
 490        return ret;
 491}
 492
 493static void
 494__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 495{
 496        struct get_pages_work *work = container_of(_work, typeof(*work), work);
 497        struct drm_i915_gem_object *obj = work->obj;
 498        const int npages = obj->base.size >> PAGE_SHIFT;
 499        struct page **pvec;
 500        int pinned, ret;
 501
 502        ret = -ENOMEM;
 503        pinned = 0;
 504
 505        pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 506        if (pvec != NULL) {
 507                struct mm_struct *mm = obj->userptr.mm->mm;
 508                unsigned int flags = 0;
 509
 510                if (!obj->userptr.read_only)
 511                        flags |= FOLL_WRITE;
 512
 513                ret = -EFAULT;
 514                if (mmget_not_zero(mm)) {
 515                        down_read(&mm->mmap_sem);
 516                        while (pinned < npages) {
 517                                ret = get_user_pages_remote
 518                                        (work->task, mm,
 519                                         obj->userptr.ptr + pinned * PAGE_SIZE,
 520                                         npages - pinned,
 521                                         flags,
 522                                         pvec + pinned, NULL, NULL);
 523                                if (ret < 0)
 524                                        break;
 525
 526                                pinned += ret;
 527                        }
 528                        up_read(&mm->mmap_sem);
 529                        mmput(mm);
 530                }
 531        }
 532
 533        mutex_lock(&obj->mm.lock);
 534        if (obj->userptr.work == &work->work) {
 535                struct sg_table *pages = ERR_PTR(ret);
 536
 537                if (pinned == npages) {
 538                        pages = __i915_gem_userptr_alloc_pages(obj, pvec,
 539                                                               npages);
 540                        if (!IS_ERR(pages)) {
 541                                pinned = 0;
 542                                pages = NULL;
 543                        }
 544                }
 545
 546                obj->userptr.work = ERR_CAST(pages);
 547                if (IS_ERR(pages))
 548                        __i915_gem_userptr_set_active(obj, false);
 549        }
 550        mutex_unlock(&obj->mm.lock);
 551
 552        release_pages(pvec, pinned);
 553        kvfree(pvec);
 554
 555        i915_gem_object_put(obj);
 556        put_task_struct(work->task);
 557        kfree(work);
 558}
 559
 560static struct sg_table *
 561__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 562{
 563        struct get_pages_work *work;
 564
 565        /* Spawn a worker so that we can acquire the
 566         * user pages without holding our mutex. Access
 567         * to the user pages requires mmap_sem, and we have
 568         * a strict lock ordering of mmap_sem, struct_mutex -
 569         * we already hold struct_mutex here and so cannot
 570         * call gup without encountering a lock inversion.
 571         *
 572         * Userspace will keep on repeating the operation
 573         * (thanks to EAGAIN) until either we hit the fast
 574         * path or the worker completes. If the worker is
 575         * cancelled or superseded, the task is still run
 576         * but the results ignored. (This leads to
 577         * complications that we may have a stray object
 578         * refcount that we need to be wary of when
 579         * checking for existing objects during creation.)
 580         * If the worker encounters an error, it reports
 581         * that error back to this function through
 582         * obj->userptr.work = ERR_PTR.
 583         */
 584        work = kmalloc(sizeof(*work), GFP_KERNEL);
 585        if (work == NULL)
 586                return ERR_PTR(-ENOMEM);
 587
 588        obj->userptr.work = &work->work;
 589
 590        work->obj = i915_gem_object_get(obj);
 591
 592        work->task = current;
 593        get_task_struct(work->task);
 594
 595        INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
 596        queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
 597
 598        return ERR_PTR(-EAGAIN);
 599}
 600
 601static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 602{
 603        const int num_pages = obj->base.size >> PAGE_SHIFT;
 604        struct mm_struct *mm = obj->userptr.mm->mm;
 605        struct page **pvec;
 606        struct sg_table *pages;
 607        bool active;
 608        int pinned;
 609
 610        /* If userspace should engineer that these pages are replaced in
 611         * the vma between us binding this page into the GTT and completion
 612         * of rendering... Their loss. If they change the mapping of their
 613         * pages they need to create a new bo to point to the new vma.
 614         *
 615         * However, that still leaves open the possibility of the vma
 616         * being copied upon fork. Which falls under the same userspace
 617         * synchronisation issue as a regular bo, except that this time
 618         * the process may not be expecting that a particular piece of
 619         * memory is tied to the GPU.
 620         *
 621         * Fortunately, we can hook into the mmu_notifier in order to
 622         * discard the page references prior to anything nasty happening
 623         * to the vma (discard or cloning) which should prevent the more
 624         * egregious cases from causing harm.
 625         */
 626
 627        if (obj->userptr.work) {
 628                /* active flag should still be held for the pending work */
 629                if (IS_ERR(obj->userptr.work))
 630                        return PTR_ERR(obj->userptr.work);
 631                else
 632                        return -EAGAIN;
 633        }
 634
 635        pvec = NULL;
 636        pinned = 0;
 637
 638        if (mm == current->mm) {
 639                pvec = kvmalloc_array(num_pages, sizeof(struct page *),
 640                                      GFP_KERNEL |
 641                                      __GFP_NORETRY |
 642                                      __GFP_NOWARN);
 643                if (pvec) /* defer to worker if malloc fails */
 644                        pinned = __get_user_pages_fast(obj->userptr.ptr,
 645                                                       num_pages,
 646                                                       !obj->userptr.read_only,
 647                                                       pvec);
 648        }
 649
 650        active = false;
 651        if (pinned < 0) {
 652                pages = ERR_PTR(pinned);
 653                pinned = 0;
 654        } else if (pinned < num_pages) {
 655                pages = __i915_gem_userptr_get_pages_schedule(obj);
 656                active = pages == ERR_PTR(-EAGAIN);
 657        } else {
 658                pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
 659                active = !IS_ERR(pages);
 660        }
 661        if (active)
 662                __i915_gem_userptr_set_active(obj, true);
 663
 664        if (IS_ERR(pages))
 665                release_pages(pvec, pinned);
 666        kvfree(pvec);
 667
 668        return PTR_ERR_OR_ZERO(pages);
 669}
 670
 671static void
 672i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 673                           struct sg_table *pages)
 674{
 675        struct sgt_iter sgt_iter;
 676        struct page *page;
 677
 678        BUG_ON(obj->userptr.work != NULL);
 679        __i915_gem_userptr_set_active(obj, false);
 680
 681        if (obj->mm.madv != I915_MADV_WILLNEED)
 682                obj->mm.dirty = false;
 683
 684        i915_gem_gtt_finish_pages(obj, pages);
 685
 686        for_each_sgt_page(page, sgt_iter, pages) {
 687                if (obj->mm.dirty)
 688                        set_page_dirty(page);
 689
 690                mark_page_accessed(page);
 691                put_page(page);
 692        }
 693        obj->mm.dirty = false;
 694
 695        sg_free_table(pages);
 696        kfree(pages);
 697}
 698
 699static void
 700i915_gem_userptr_release(struct drm_i915_gem_object *obj)
 701{
 702        i915_gem_userptr_release__mmu_notifier(obj);
 703        i915_gem_userptr_release__mm_struct(obj);
 704}
 705
 706static int
 707i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 708{
 709        if (obj->userptr.mmu_object)
 710                return 0;
 711
 712        return i915_gem_userptr_init__mmu_notifier(obj, 0);
 713}
 714
 715static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 716        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 717                 I915_GEM_OBJECT_IS_SHRINKABLE,
 718        .get_pages = i915_gem_userptr_get_pages,
 719        .put_pages = i915_gem_userptr_put_pages,
 720        .dmabuf_export = i915_gem_userptr_dmabuf_export,
 721        .release = i915_gem_userptr_release,
 722};
 723
 724/**
 725 * Creates a new mm object that wraps some normal memory from the process
 726 * context - user memory.
 727 *
 728 * We impose several restrictions upon the memory being mapped
 729 * into the GPU.
 730 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
 731 * 2. It must be normal system memory, not a pointer into another map of IO
 732 *    space (e.g. it must not be a GTT mmapping of another object).
 733 * 3. We only allow a bo as large as we could in theory map into the GTT,
 734 *    that is we limit the size to the total size of the GTT.
 735 * 4. The bo is marked as being snoopable. The backing pages are left
 736 *    accessible directly by the CPU, but reads and writes by the GPU may
 737 *    incur the cost of a snoop (unless you have an LLC architecture).
 738 *
 739 * Synchronisation between multiple users and the GPU is left to userspace
 740 * through the normal set-domain-ioctl. The kernel will enforce that the
 741 * GPU relinquishes the VMA before it is returned back to the system
 742 * i.e. upon free(), munmap() or process termination. However, the userspace
 743 * malloc() library may not immediately relinquish the VMA after free() and
 744 * instead reuse it whilst the GPU is still reading and writing to the VMA.
 745 * Caveat emptor.
 746 *
 747 * Also note, that the object created here is not currently a "first class"
 748 * object, in that several ioctls are banned. These are the CPU access
 749 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
 750 * direct access via your pointer rather than use those ioctls. Another
 751 * restriction is that we do not allow userptr surfaces to be pinned to the
 752 * hardware and so we reject any attempt to create a framebuffer out of a
 753 * userptr.
 754 *
 755 * If you think this is a good interface to use to pass GPU memory between
 756 * drivers, please use dma-buf instead. In fact, wherever possible use
 757 * dma-buf instead.
 758 */
 759int
 760i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 761{
 762        struct drm_i915_private *dev_priv = to_i915(dev);
 763        struct drm_i915_gem_userptr *args = data;
 764        struct drm_i915_gem_object *obj;
 765        int ret;
 766        u32 handle;
 767
 768        if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
 769                /* We cannot support coherent userptr objects on hw without
 770                 * LLC and broken snooping.
 771                 */
 772                return -ENODEV;
 773        }
 774
 775        if (args->flags & ~(I915_USERPTR_READ_ONLY |
 776                            I915_USERPTR_UNSYNCHRONIZED))
 777                return -EINVAL;
 778
 779        if (offset_in_page(args->user_ptr | args->user_size))
 780                return -EINVAL;
 781
 782        if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
 783                       (char __user *)(unsigned long)args->user_ptr, args->user_size))
 784                return -EFAULT;
 785
 786        if (args->flags & I915_USERPTR_READ_ONLY) {
 787                /* On almost all of the current hw, we cannot tell the GPU that a
 788                 * page is readonly, so this is just a placeholder in the uAPI.
 789                 */
 790                return -ENODEV;
 791        }
 792
 793        obj = i915_gem_object_alloc(dev_priv);
 794        if (obj == NULL)
 795                return -ENOMEM;
 796
 797        drm_gem_private_object_init(dev, &obj->base, args->user_size);
 798        i915_gem_object_init(obj, &i915_gem_userptr_ops);
 799        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 800        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 801        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 802
 803        obj->userptr.ptr = args->user_ptr;
 804        obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
 805
 806        /* And keep a pointer to the current->mm for resolving the user pages
 807         * at binding. This means that we need to hook into the mmu_notifier
 808         * in order to detect if the mmu is destroyed.
 809         */
 810        ret = i915_gem_userptr_init__mm_struct(obj);
 811        if (ret == 0)
 812                ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
 813        if (ret == 0)
 814                ret = drm_gem_handle_create(file, &obj->base, &handle);
 815
 816        /* drop reference from allocate - handle holds it now */
 817        i915_gem_object_put(obj);
 818        if (ret)
 819                return ret;
 820
 821        args->handle = handle;
 822        return 0;
 823}
 824
 825int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 826{
 827        mutex_init(&dev_priv->mm_lock);
 828        hash_init(dev_priv->mm_structs);
 829
 830        dev_priv->mm.userptr_wq =
 831                alloc_workqueue("i915-userptr-acquire",
 832                                WQ_HIGHPRI | WQ_UNBOUND,
 833                                0);
 834        if (!dev_priv->mm.userptr_wq)
 835                return -ENOMEM;
 836
 837        return 0;
 838}
 839
 840void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
 841{
 842        destroy_workqueue(dev_priv->mm.userptr_wq);
 843}
 844