linux/drivers/gpu/drm/i915/i915_gem_userptr.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012-2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <drm/drmP.h>
  26#include <drm/i915_drm.h>
  27#include "i915_drv.h"
  28#include "i915_trace.h"
  29#include "intel_drv.h"
  30#include <linux/mmu_context.h>
  31#include <linux/mmu_notifier.h>
  32#include <linux/mempolicy.h>
  33#include <linux/swap.h>
  34#include <linux/sched/mm.h>
  35
  36struct i915_mm_struct {
  37        struct mm_struct *mm;
  38        struct drm_i915_private *i915;
  39        struct i915_mmu_notifier *mn;
  40        struct hlist_node node;
  41        struct kref kref;
  42        struct work_struct work;
  43};
  44
  45#if defined(CONFIG_MMU_NOTIFIER)
  46#include <linux/interval_tree.h>
  47
  48struct i915_mmu_notifier {
  49        spinlock_t lock;
  50        struct hlist_node node;
  51        struct mmu_notifier mn;
  52        struct rb_root objects;
  53        struct workqueue_struct *wq;
  54};
  55
  56struct i915_mmu_object {
  57        struct i915_mmu_notifier *mn;
  58        struct drm_i915_gem_object *obj;
  59        struct interval_tree_node it;
  60        struct list_head link;
  61        struct work_struct work;
  62        bool attached;
  63};
  64
  65static void cancel_userptr(struct work_struct *work)
  66{
  67        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
  68        struct drm_i915_gem_object *obj = mo->obj;
  69        struct work_struct *active;
  70
  71        /* Cancel any active worker and force us to re-evaluate gup */
  72        mutex_lock(&obj->mm.lock);
  73        active = fetch_and_zero(&obj->userptr.work);
  74        mutex_unlock(&obj->mm.lock);
  75        if (active)
  76                goto out;
  77
  78        i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
  79
  80        mutex_lock(&obj->base.dev->struct_mutex);
  81
  82        /* We are inside a kthread context and can't be interrupted */
  83        if (i915_gem_object_unbind(obj) == 0)
  84                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
  85        WARN_ONCE(obj->mm.pages,
  86                  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
  87                  obj->bind_count,
  88                  atomic_read(&obj->mm.pages_pin_count),
  89                  obj->pin_display);
  90
  91        mutex_unlock(&obj->base.dev->struct_mutex);
  92
  93out:
  94        i915_gem_object_put(obj);
  95}
  96
  97static void add_object(struct i915_mmu_object *mo)
  98{
  99        if (mo->attached)
 100                return;
 101
 102        interval_tree_insert(&mo->it, &mo->mn->objects);
 103        mo->attached = true;
 104}
 105
 106static void del_object(struct i915_mmu_object *mo)
 107{
 108        if (!mo->attached)
 109                return;
 110
 111        interval_tree_remove(&mo->it, &mo->mn->objects);
 112        mo->attached = false;
 113}
 114
 115static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 116                                                       struct mm_struct *mm,
 117                                                       unsigned long start,
 118                                                       unsigned long end)
 119{
 120        struct i915_mmu_notifier *mn =
 121                container_of(_mn, struct i915_mmu_notifier, mn);
 122        struct i915_mmu_object *mo;
 123        struct interval_tree_node *it;
 124        LIST_HEAD(cancelled);
 125
 126        if (RB_EMPTY_ROOT(&mn->objects))
 127                return;
 128
 129        /* interval ranges are inclusive, but invalidate range is exclusive */
 130        end--;
 131
 132        spin_lock(&mn->lock);
 133        it = interval_tree_iter_first(&mn->objects, start, end);
 134        while (it) {
 135                /* The mmu_object is released late when destroying the
 136                 * GEM object so it is entirely possible to gain a
 137                 * reference on an object in the process of being freed
 138                 * since our serialisation is via the spinlock and not
 139                 * the struct_mutex - and consequently use it after it
 140                 * is freed and then double free it. To prevent that
 141                 * use-after-free we only acquire a reference on the
 142                 * object if it is not in the process of being destroyed.
 143                 */
 144                mo = container_of(it, struct i915_mmu_object, it);
 145                if (kref_get_unless_zero(&mo->obj->base.refcount))
 146                        queue_work(mn->wq, &mo->work);
 147
 148                list_add(&mo->link, &cancelled);
 149                it = interval_tree_iter_next(it, start, end);
 150        }
 151        list_for_each_entry(mo, &cancelled, link)
 152                del_object(mo);
 153        spin_unlock(&mn->lock);
 154
 155        if (!list_empty(&cancelled))
 156                flush_workqueue(mn->wq);
 157}
 158
 159static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
 160        .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
 161};
 162
 163static struct i915_mmu_notifier *
 164i915_mmu_notifier_create(struct mm_struct *mm)
 165{
 166        struct i915_mmu_notifier *mn;
 167        int ret;
 168
 169        mn = kmalloc(sizeof(*mn), GFP_KERNEL);
 170        if (mn == NULL)
 171                return ERR_PTR(-ENOMEM);
 172
 173        spin_lock_init(&mn->lock);
 174        mn->mn.ops = &i915_gem_userptr_notifier;
 175        mn->objects = RB_ROOT;
 176        mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
 177        if (mn->wq == NULL) {
 178                kfree(mn);
 179                return ERR_PTR(-ENOMEM);
 180        }
 181
 182         /* Protected by mmap_sem (write-lock) */
 183        ret = __mmu_notifier_register(&mn->mn, mm);
 184        if (ret) {
 185                destroy_workqueue(mn->wq);
 186                kfree(mn);
 187                return ERR_PTR(ret);
 188        }
 189
 190        return mn;
 191}
 192
 193static void
 194i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 195{
 196        struct i915_mmu_object *mo;
 197
 198        mo = obj->userptr.mmu_object;
 199        if (mo == NULL)
 200                return;
 201
 202        spin_lock(&mo->mn->lock);
 203        del_object(mo);
 204        spin_unlock(&mo->mn->lock);
 205        kfree(mo);
 206
 207        obj->userptr.mmu_object = NULL;
 208}
 209
 210static struct i915_mmu_notifier *
 211i915_mmu_notifier_find(struct i915_mm_struct *mm)
 212{
 213        struct i915_mmu_notifier *mn = mm->mn;
 214
 215        mn = mm->mn;
 216        if (mn)
 217                return mn;
 218
 219        down_write(&mm->mm->mmap_sem);
 220        mutex_lock(&mm->i915->mm_lock);
 221        if ((mn = mm->mn) == NULL) {
 222                mn = i915_mmu_notifier_create(mm->mm);
 223                if (!IS_ERR(mn))
 224                        mm->mn = mn;
 225        }
 226        mutex_unlock(&mm->i915->mm_lock);
 227        up_write(&mm->mm->mmap_sem);
 228
 229        return mn;
 230}
 231
 232static int
 233i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 234                                    unsigned flags)
 235{
 236        struct i915_mmu_notifier *mn;
 237        struct i915_mmu_object *mo;
 238
 239        if (flags & I915_USERPTR_UNSYNCHRONIZED)
 240                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
 241
 242        if (WARN_ON(obj->userptr.mm == NULL))
 243                return -EINVAL;
 244
 245        mn = i915_mmu_notifier_find(obj->userptr.mm);
 246        if (IS_ERR(mn))
 247                return PTR_ERR(mn);
 248
 249        mo = kzalloc(sizeof(*mo), GFP_KERNEL);
 250        if (mo == NULL)
 251                return -ENOMEM;
 252
 253        mo->mn = mn;
 254        mo->obj = obj;
 255        mo->it.start = obj->userptr.ptr;
 256        mo->it.last = obj->userptr.ptr + obj->base.size - 1;
 257        INIT_WORK(&mo->work, cancel_userptr);
 258
 259        obj->userptr.mmu_object = mo;
 260        return 0;
 261}
 262
 263static void
 264i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
 265                       struct mm_struct *mm)
 266{
 267        if (mn == NULL)
 268                return;
 269
 270        mmu_notifier_unregister(&mn->mn, mm);
 271        destroy_workqueue(mn->wq);
 272        kfree(mn);
 273}
 274
 275#else
 276
 277static void
 278i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 279{
 280}
 281
 282static int
 283i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 284                                    unsigned flags)
 285{
 286        if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
 287                return -ENODEV;
 288
 289        if (!capable(CAP_SYS_ADMIN))
 290                return -EPERM;
 291
 292        return 0;
 293}
 294
 295static void
 296i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
 297                       struct mm_struct *mm)
 298{
 299}
 300
 301#endif
 302
 303static struct i915_mm_struct *
 304__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
 305{
 306        struct i915_mm_struct *mm;
 307
 308        /* Protected by dev_priv->mm_lock */
 309        hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
 310                if (mm->mm == real)
 311                        return mm;
 312
 313        return NULL;
 314}
 315
 316static int
 317i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 318{
 319        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 320        struct i915_mm_struct *mm;
 321        int ret = 0;
 322
 323        /* During release of the GEM object we hold the struct_mutex. This
 324         * precludes us from calling mmput() at that time as that may be
 325         * the last reference and so call exit_mmap(). exit_mmap() will
 326         * attempt to reap the vma, and if we were holding a GTT mmap
 327         * would then call drm_gem_vm_close() and attempt to reacquire
 328         * the struct mutex. So in order to avoid that recursion, we have
 329         * to defer releasing the mm reference until after we drop the
 330         * struct_mutex, i.e. we need to schedule a worker to do the clean
 331         * up.
 332         */
 333        mutex_lock(&dev_priv->mm_lock);
 334        mm = __i915_mm_struct_find(dev_priv, current->mm);
 335        if (mm == NULL) {
 336                mm = kmalloc(sizeof(*mm), GFP_KERNEL);
 337                if (mm == NULL) {
 338                        ret = -ENOMEM;
 339                        goto out;
 340                }
 341
 342                kref_init(&mm->kref);
 343                mm->i915 = to_i915(obj->base.dev);
 344
 345                mm->mm = current->mm;
 346                mmgrab(current->mm);
 347
 348                mm->mn = NULL;
 349
 350                /* Protected by dev_priv->mm_lock */
 351                hash_add(dev_priv->mm_structs,
 352                         &mm->node, (unsigned long)mm->mm);
 353        } else
 354                kref_get(&mm->kref);
 355
 356        obj->userptr.mm = mm;
 357out:
 358        mutex_unlock(&dev_priv->mm_lock);
 359        return ret;
 360}
 361
 362static void
 363__i915_mm_struct_free__worker(struct work_struct *work)
 364{
 365        struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
 366        i915_mmu_notifier_free(mm->mn, mm->mm);
 367        mmdrop(mm->mm);
 368        kfree(mm);
 369}
 370
 371static void
 372__i915_mm_struct_free(struct kref *kref)
 373{
 374        struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
 375
 376        /* Protected by dev_priv->mm_lock */
 377        hash_del(&mm->node);
 378        mutex_unlock(&mm->i915->mm_lock);
 379
 380        INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
 381        queue_work(mm->i915->mm.userptr_wq, &mm->work);
 382}
 383
 384static void
 385i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
 386{
 387        if (obj->userptr.mm == NULL)
 388                return;
 389
 390        kref_put_mutex(&obj->userptr.mm->kref,
 391                       __i915_mm_struct_free,
 392                       &to_i915(obj->base.dev)->mm_lock);
 393        obj->userptr.mm = NULL;
 394}
 395
 396struct get_pages_work {
 397        struct work_struct work;
 398        struct drm_i915_gem_object *obj;
 399        struct task_struct *task;
 400};
 401
 402#if IS_ENABLED(CONFIG_SWIOTLB)
 403#define swiotlb_active() swiotlb_nr_tbl()
 404#else
 405#define swiotlb_active() 0
 406#endif
 407
 408static int
 409st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
 410{
 411        struct scatterlist *sg;
 412        int ret, n;
 413
 414        *st = kmalloc(sizeof(**st), GFP_KERNEL);
 415        if (*st == NULL)
 416                return -ENOMEM;
 417
 418        if (swiotlb_active()) {
 419                ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
 420                if (ret)
 421                        goto err;
 422
 423                for_each_sg((*st)->sgl, sg, num_pages, n)
 424                        sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
 425        } else {
 426                ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
 427                                                0, num_pages << PAGE_SHIFT,
 428                                                GFP_KERNEL);
 429                if (ret)
 430                        goto err;
 431        }
 432
 433        return 0;
 434
 435err:
 436        kfree(*st);
 437        *st = NULL;
 438        return ret;
 439}
 440
 441static struct sg_table *
 442__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
 443                             struct page **pvec, int num_pages)
 444{
 445        struct sg_table *pages;
 446        int ret;
 447
 448        ret = st_set_pages(&pages, pvec, num_pages);
 449        if (ret)
 450                return ERR_PTR(ret);
 451
 452        ret = i915_gem_gtt_prepare_pages(obj, pages);
 453        if (ret) {
 454                sg_free_table(pages);
 455                kfree(pages);
 456                return ERR_PTR(ret);
 457        }
 458
 459        return pages;
 460}
 461
 462static int
 463__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
 464                              bool value)
 465{
 466        int ret = 0;
 467
 468        /* During mm_invalidate_range we need to cancel any userptr that
 469         * overlaps the range being invalidated. Doing so requires the
 470         * struct_mutex, and that risks recursion. In order to cause
 471         * recursion, the user must alias the userptr address space with
 472         * a GTT mmapping (possible with a MAP_FIXED) - then when we have
 473         * to invalidate that mmaping, mm_invalidate_range is called with
 474         * the userptr address *and* the struct_mutex held.  To prevent that
 475         * we set a flag under the i915_mmu_notifier spinlock to indicate
 476         * whether this object is valid.
 477         */
 478#if defined(CONFIG_MMU_NOTIFIER)
 479        if (obj->userptr.mmu_object == NULL)
 480                return 0;
 481
 482        spin_lock(&obj->userptr.mmu_object->mn->lock);
 483        /* In order to serialise get_pages with an outstanding
 484         * cancel_userptr, we must drop the struct_mutex and try again.
 485         */
 486        if (!value)
 487                del_object(obj->userptr.mmu_object);
 488        else if (!work_pending(&obj->userptr.mmu_object->work))
 489                add_object(obj->userptr.mmu_object);
 490        else
 491                ret = -EAGAIN;
 492        spin_unlock(&obj->userptr.mmu_object->mn->lock);
 493#endif
 494
 495        return ret;
 496}
 497
 498static void
 499__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 500{
 501        struct get_pages_work *work = container_of(_work, typeof(*work), work);
 502        struct drm_i915_gem_object *obj = work->obj;
 503        const int npages = obj->base.size >> PAGE_SHIFT;
 504        struct page **pvec;
 505        int pinned, ret;
 506
 507        ret = -ENOMEM;
 508        pinned = 0;
 509
 510        pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 511        if (pvec != NULL) {
 512                struct mm_struct *mm = obj->userptr.mm->mm;
 513                unsigned int flags = 0;
 514
 515                if (!obj->userptr.read_only)
 516                        flags |= FOLL_WRITE;
 517
 518                ret = -EFAULT;
 519                if (mmget_not_zero(mm)) {
 520                        down_read(&mm->mmap_sem);
 521                        while (pinned < npages) {
 522                                ret = get_user_pages_remote
 523                                        (work->task, mm,
 524                                         obj->userptr.ptr + pinned * PAGE_SIZE,
 525                                         npages - pinned,
 526                                         flags,
 527                                         pvec + pinned, NULL, NULL);
 528                                if (ret < 0)
 529                                        break;
 530
 531                                pinned += ret;
 532                        }
 533                        up_read(&mm->mmap_sem);
 534                        mmput(mm);
 535                }
 536        }
 537
 538        mutex_lock(&obj->mm.lock);
 539        if (obj->userptr.work == &work->work) {
 540                struct sg_table *pages = ERR_PTR(ret);
 541
 542                if (pinned == npages) {
 543                        pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
 544                        if (!IS_ERR(pages)) {
 545                                __i915_gem_object_set_pages(obj, pages);
 546                                pinned = 0;
 547                                pages = NULL;
 548                        }
 549                }
 550
 551                obj->userptr.work = ERR_CAST(pages);
 552                if (IS_ERR(pages))
 553                        __i915_gem_userptr_set_active(obj, false);
 554        }
 555        mutex_unlock(&obj->mm.lock);
 556
 557        release_pages(pvec, pinned, 0);
 558        kvfree(pvec);
 559
 560        i915_gem_object_put(obj);
 561        put_task_struct(work->task);
 562        kfree(work);
 563}
 564
 565static struct sg_table *
 566__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 567{
 568        struct get_pages_work *work;
 569
 570        /* Spawn a worker so that we can acquire the
 571         * user pages without holding our mutex. Access
 572         * to the user pages requires mmap_sem, and we have
 573         * a strict lock ordering of mmap_sem, struct_mutex -
 574         * we already hold struct_mutex here and so cannot
 575         * call gup without encountering a lock inversion.
 576         *
 577         * Userspace will keep on repeating the operation
 578         * (thanks to EAGAIN) until either we hit the fast
 579         * path or the worker completes. If the worker is
 580         * cancelled or superseded, the task is still run
 581         * but the results ignored. (This leads to
 582         * complications that we may have a stray object
 583         * refcount that we need to be wary of when
 584         * checking for existing objects during creation.)
 585         * If the worker encounters an error, it reports
 586         * that error back to this function through
 587         * obj->userptr.work = ERR_PTR.
 588         */
 589        work = kmalloc(sizeof(*work), GFP_KERNEL);
 590        if (work == NULL)
 591                return ERR_PTR(-ENOMEM);
 592
 593        obj->userptr.work = &work->work;
 594
 595        work->obj = i915_gem_object_get(obj);
 596
 597        work->task = current;
 598        get_task_struct(work->task);
 599
 600        INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
 601        queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
 602
 603        return ERR_PTR(-EAGAIN);
 604}
 605
 606static struct sg_table *
 607i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 608{
 609        const int num_pages = obj->base.size >> PAGE_SHIFT;
 610        struct mm_struct *mm = obj->userptr.mm->mm;
 611        struct page **pvec;
 612        struct sg_table *pages;
 613        bool active;
 614        int pinned;
 615
 616        /* If userspace should engineer that these pages are replaced in
 617         * the vma between us binding this page into the GTT and completion
 618         * of rendering... Their loss. If they change the mapping of their
 619         * pages they need to create a new bo to point to the new vma.
 620         *
 621         * However, that still leaves open the possibility of the vma
 622         * being copied upon fork. Which falls under the same userspace
 623         * synchronisation issue as a regular bo, except that this time
 624         * the process may not be expecting that a particular piece of
 625         * memory is tied to the GPU.
 626         *
 627         * Fortunately, we can hook into the mmu_notifier in order to
 628         * discard the page references prior to anything nasty happening
 629         * to the vma (discard or cloning) which should prevent the more
 630         * egregious cases from causing harm.
 631         */
 632
 633        if (obj->userptr.work) {
 634                /* active flag should still be held for the pending work */
 635                if (IS_ERR(obj->userptr.work))
 636                        return ERR_CAST(obj->userptr.work);
 637                else
 638                        return ERR_PTR(-EAGAIN);
 639        }
 640
 641        pvec = NULL;
 642        pinned = 0;
 643
 644        if (mm == current->mm) {
 645                pvec = kvmalloc_array(num_pages, sizeof(struct page *),
 646                                      GFP_KERNEL |
 647                                      __GFP_NORETRY |
 648                                      __GFP_NOWARN);
 649                if (pvec) /* defer to worker if malloc fails */
 650                        pinned = __get_user_pages_fast(obj->userptr.ptr,
 651                                                       num_pages,
 652                                                       !obj->userptr.read_only,
 653                                                       pvec);
 654        }
 655
 656        active = false;
 657        if (pinned < 0) {
 658                pages = ERR_PTR(pinned);
 659                pinned = 0;
 660        } else if (pinned < num_pages) {
 661                pages = __i915_gem_userptr_get_pages_schedule(obj);
 662                active = pages == ERR_PTR(-EAGAIN);
 663        } else {
 664                pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
 665                active = !IS_ERR(pages);
 666        }
 667        if (active)
 668                __i915_gem_userptr_set_active(obj, true);
 669
 670        if (IS_ERR(pages))
 671                release_pages(pvec, pinned, 0);
 672        kvfree(pvec);
 673
 674        return pages;
 675}
 676
 677static void
 678i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 679                           struct sg_table *pages)
 680{
 681        struct sgt_iter sgt_iter;
 682        struct page *page;
 683
 684        BUG_ON(obj->userptr.work != NULL);
 685        __i915_gem_userptr_set_active(obj, false);
 686
 687        if (obj->mm.madv != I915_MADV_WILLNEED)
 688                obj->mm.dirty = false;
 689
 690        i915_gem_gtt_finish_pages(obj, pages);
 691
 692        for_each_sgt_page(page, sgt_iter, pages) {
 693                if (obj->mm.dirty)
 694                        set_page_dirty(page);
 695
 696                mark_page_accessed(page);
 697                put_page(page);
 698        }
 699        obj->mm.dirty = false;
 700
 701        sg_free_table(pages);
 702        kfree(pages);
 703}
 704
 705static void
 706i915_gem_userptr_release(struct drm_i915_gem_object *obj)
 707{
 708        i915_gem_userptr_release__mmu_notifier(obj);
 709        i915_gem_userptr_release__mm_struct(obj);
 710}
 711
 712static int
 713i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 714{
 715        if (obj->userptr.mmu_object)
 716                return 0;
 717
 718        return i915_gem_userptr_init__mmu_notifier(obj, 0);
 719}
 720
 721static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 722        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 723                 I915_GEM_OBJECT_IS_SHRINKABLE,
 724        .get_pages = i915_gem_userptr_get_pages,
 725        .put_pages = i915_gem_userptr_put_pages,
 726        .dmabuf_export = i915_gem_userptr_dmabuf_export,
 727        .release = i915_gem_userptr_release,
 728};
 729
 730/**
 731 * Creates a new mm object that wraps some normal memory from the process
 732 * context - user memory.
 733 *
 734 * We impose several restrictions upon the memory being mapped
 735 * into the GPU.
 736 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
 737 * 2. It must be normal system memory, not a pointer into another map of IO
 738 *    space (e.g. it must not be a GTT mmapping of another object).
 739 * 3. We only allow a bo as large as we could in theory map into the GTT,
 740 *    that is we limit the size to the total size of the GTT.
 741 * 4. The bo is marked as being snoopable. The backing pages are left
 742 *    accessible directly by the CPU, but reads and writes by the GPU may
 743 *    incur the cost of a snoop (unless you have an LLC architecture).
 744 *
 745 * Synchronisation between multiple users and the GPU is left to userspace
 746 * through the normal set-domain-ioctl. The kernel will enforce that the
 747 * GPU relinquishes the VMA before it is returned back to the system
 748 * i.e. upon free(), munmap() or process termination. However, the userspace
 749 * malloc() library may not immediately relinquish the VMA after free() and
 750 * instead reuse it whilst the GPU is still reading and writing to the VMA.
 751 * Caveat emptor.
 752 *
 753 * Also note, that the object created here is not currently a "first class"
 754 * object, in that several ioctls are banned. These are the CPU access
 755 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
 756 * direct access via your pointer rather than use those ioctls. Another
 757 * restriction is that we do not allow userptr surfaces to be pinned to the
 758 * hardware and so we reject any attempt to create a framebuffer out of a
 759 * userptr.
 760 *
 761 * If you think this is a good interface to use to pass GPU memory between
 762 * drivers, please use dma-buf instead. In fact, wherever possible use
 763 * dma-buf instead.
 764 */
 765int
 766i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 767{
 768        struct drm_i915_private *dev_priv = to_i915(dev);
 769        struct drm_i915_gem_userptr *args = data;
 770        struct drm_i915_gem_object *obj;
 771        int ret;
 772        u32 handle;
 773
 774        if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
 775                /* We cannot support coherent userptr objects on hw without
 776                 * LLC and broken snooping.
 777                 */
 778                return -ENODEV;
 779        }
 780
 781        if (args->flags & ~(I915_USERPTR_READ_ONLY |
 782                            I915_USERPTR_UNSYNCHRONIZED))
 783                return -EINVAL;
 784
 785        if (offset_in_page(args->user_ptr | args->user_size))
 786                return -EINVAL;
 787
 788        if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
 789                       (char __user *)(unsigned long)args->user_ptr, args->user_size))
 790                return -EFAULT;
 791
 792        if (args->flags & I915_USERPTR_READ_ONLY) {
 793                /* On almost all of the current hw, we cannot tell the GPU that a
 794                 * page is readonly, so this is just a placeholder in the uAPI.
 795                 */
 796                return -ENODEV;
 797        }
 798
 799        obj = i915_gem_object_alloc(dev_priv);
 800        if (obj == NULL)
 801                return -ENOMEM;
 802
 803        drm_gem_private_object_init(dev, &obj->base, args->user_size);
 804        i915_gem_object_init(obj, &i915_gem_userptr_ops);
 805        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 806        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 807        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 808
 809        obj->userptr.ptr = args->user_ptr;
 810        obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
 811
 812        /* And keep a pointer to the current->mm for resolving the user pages
 813         * at binding. This means that we need to hook into the mmu_notifier
 814         * in order to detect if the mmu is destroyed.
 815         */
 816        ret = i915_gem_userptr_init__mm_struct(obj);
 817        if (ret == 0)
 818                ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
 819        if (ret == 0)
 820                ret = drm_gem_handle_create(file, &obj->base, &handle);
 821
 822        /* drop reference from allocate - handle holds it now */
 823        i915_gem_object_put(obj);
 824        if (ret)
 825                return ret;
 826
 827        args->handle = handle;
 828        return 0;
 829}
 830
 831int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 832{
 833        mutex_init(&dev_priv->mm_lock);
 834        hash_init(dev_priv->mm_structs);
 835
 836        dev_priv->mm.userptr_wq =
 837                alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
 838        if (!dev_priv->mm.userptr_wq)
 839                return -ENOMEM;
 840
 841        return 0;
 842}
 843
 844void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
 845{
 846        destroy_workqueue(dev_priv->mm.userptr_wq);
 847}
 848