linux/drivers/gpu/drm/i915/i915_gem.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *
  26 */
  27
  28#include "drmP.h"
  29#include "drm.h"
  30#include "i915_drm.h"
  31#include "i915_drv.h"
  32#include "i915_trace.h"
  33#include "intel_drv.h"
  34#include <linux/slab.h>
  35#include <linux/swap.h>
  36#include <linux/pci.h>
  37
  38static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
  39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  41static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
  42                                                          bool write);
  43static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
  44                                                                  uint64_t offset,
  45                                                                  uint64_t size);
  46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
  47static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
  48                                                    unsigned alignment,
  49                                                    bool map_and_fenceable);
  50static void i915_gem_clear_fence_reg(struct drm_device *dev,
  51                                     struct drm_i915_fence_reg *reg);
  52static int i915_gem_phys_pwrite(struct drm_device *dev,
  53                                struct drm_i915_gem_object *obj,
  54                                struct drm_i915_gem_pwrite *args,
  55                                struct drm_file *file);
  56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
  57
  58static int i915_gem_inactive_shrink(struct shrinker *shrinker,
  59                                    int nr_to_scan,
  60                                    gfp_t gfp_mask);
  61
  62
  63/* some bookkeeping */
  64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  65                                  size_t size)
  66{
  67        dev_priv->mm.object_count++;
  68        dev_priv->mm.object_memory += size;
  69}
  70
  71static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  72                                     size_t size)
  73{
  74        dev_priv->mm.object_count--;
  75        dev_priv->mm.object_memory -= size;
  76}
  77
  78int
  79i915_gem_check_is_wedged(struct drm_device *dev)
  80{
  81        struct drm_i915_private *dev_priv = dev->dev_private;
  82        struct completion *x = &dev_priv->error_completion;
  83        unsigned long flags;
  84        int ret;
  85
  86        if (!atomic_read(&dev_priv->mm.wedged))
  87                return 0;
  88
  89        ret = wait_for_completion_interruptible(x);
  90        if (ret)
  91                return ret;
  92
  93        /* Success, we reset the GPU! */
  94        if (!atomic_read(&dev_priv->mm.wedged))
  95                return 0;
  96
  97        /* GPU is hung, bump the completion count to account for
  98         * the token we just consumed so that we never hit zero and
  99         * end up waiting upon a subsequent completion event that
 100         * will never happen.
 101         */
 102        spin_lock_irqsave(&x->wait.lock, flags);
 103        x->done++;
 104        spin_unlock_irqrestore(&x->wait.lock, flags);
 105        return -EIO;
 106}
 107
 108int i915_mutex_lock_interruptible(struct drm_device *dev)
 109{
 110        struct drm_i915_private *dev_priv = dev->dev_private;
 111        int ret;
 112
 113        ret = i915_gem_check_is_wedged(dev);
 114        if (ret)
 115                return ret;
 116
 117        ret = mutex_lock_interruptible(&dev->struct_mutex);
 118        if (ret)
 119                return ret;
 120
 121        if (atomic_read(&dev_priv->mm.wedged)) {
 122                mutex_unlock(&dev->struct_mutex);
 123                return -EAGAIN;
 124        }
 125
 126        WARN_ON(i915_verify_lists(dev));
 127        return 0;
 128}
 129
 130static inline bool
 131i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 132{
 133        return obj->gtt_space && !obj->active && obj->pin_count == 0;
 134}
 135
 136void i915_gem_do_init(struct drm_device *dev,
 137                      unsigned long start,
 138                      unsigned long mappable_end,
 139                      unsigned long end)
 140{
 141        drm_i915_private_t *dev_priv = dev->dev_private;
 142
 143        drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
 144
 145        dev_priv->mm.gtt_start = start;
 146        dev_priv->mm.gtt_mappable_end = mappable_end;
 147        dev_priv->mm.gtt_end = end;
 148        dev_priv->mm.gtt_total = end - start;
 149        dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
 150
 151        /* Take over this portion of the GTT */
 152        intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
 153}
 154
 155int
 156i915_gem_init_ioctl(struct drm_device *dev, void *data,
 157                    struct drm_file *file)
 158{
 159        struct drm_i915_gem_init *args = data;
 160
 161        if (args->gtt_start >= args->gtt_end ||
 162            (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
 163                return -EINVAL;
 164
 165        mutex_lock(&dev->struct_mutex);
 166        i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
 167        mutex_unlock(&dev->struct_mutex);
 168
 169        return 0;
 170}
 171
 172int
 173i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 174                            struct drm_file *file)
 175{
 176        struct drm_i915_private *dev_priv = dev->dev_private;
 177        struct drm_i915_gem_get_aperture *args = data;
 178        struct drm_i915_gem_object *obj;
 179        size_t pinned;
 180
 181        if (!(dev->driver->driver_features & DRIVER_GEM))
 182                return -ENODEV;
 183
 184        pinned = 0;
 185        mutex_lock(&dev->struct_mutex);
 186        list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
 187                pinned += obj->gtt_space->size;
 188        mutex_unlock(&dev->struct_mutex);
 189
 190        args->aper_size = dev_priv->mm.gtt_total;
 191        args->aper_available_size = args->aper_size -pinned;
 192
 193        return 0;
 194}
 195
 196/**
 197 * Creates a new mm object and returns a handle to it.
 198 */
 199int
 200i915_gem_create_ioctl(struct drm_device *dev, void *data,
 201                      struct drm_file *file)
 202{
 203        struct drm_i915_gem_create *args = data;
 204        struct drm_i915_gem_object *obj;
 205        int ret;
 206        u32 handle;
 207
 208        args->size = roundup(args->size, PAGE_SIZE);
 209
 210        /* Allocate the new object */
 211        obj = i915_gem_alloc_object(dev, args->size);
 212        if (obj == NULL)
 213                return -ENOMEM;
 214
 215        ret = drm_gem_handle_create(file, &obj->base, &handle);
 216        if (ret) {
 217                drm_gem_object_release(&obj->base);
 218                i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
 219                kfree(obj);
 220                return ret;
 221        }
 222
 223        /* drop reference from allocate - handle holds it now */
 224        drm_gem_object_unreference(&obj->base);
 225        trace_i915_gem_object_create(obj);
 226
 227        args->handle = handle;
 228        return 0;
 229}
 230
 231static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 232{
 233        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 234
 235        return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
 236                obj->tiling_mode != I915_TILING_NONE;
 237}
 238
 239static inline void
 240slow_shmem_copy(struct page *dst_page,
 241                int dst_offset,
 242                struct page *src_page,
 243                int src_offset,
 244                int length)
 245{
 246        char *dst_vaddr, *src_vaddr;
 247
 248        dst_vaddr = kmap(dst_page);
 249        src_vaddr = kmap(src_page);
 250
 251        memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
 252
 253        kunmap(src_page);
 254        kunmap(dst_page);
 255}
 256
 257static inline void
 258slow_shmem_bit17_copy(struct page *gpu_page,
 259                      int gpu_offset,
 260                      struct page *cpu_page,
 261                      int cpu_offset,
 262                      int length,
 263                      int is_read)
 264{
 265        char *gpu_vaddr, *cpu_vaddr;
 266
 267        /* Use the unswizzled path if this page isn't affected. */
 268        if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
 269                if (is_read)
 270                        return slow_shmem_copy(cpu_page, cpu_offset,
 271                                               gpu_page, gpu_offset, length);
 272                else
 273                        return slow_shmem_copy(gpu_page, gpu_offset,
 274                                               cpu_page, cpu_offset, length);
 275        }
 276
 277        gpu_vaddr = kmap(gpu_page);
 278        cpu_vaddr = kmap(cpu_page);
 279
 280        /* Copy the data, XORing A6 with A17 (1). The user already knows he's
 281         * XORing with the other bits (A9 for Y, A9 and A10 for X)
 282         */
 283        while (length > 0) {
 284                int cacheline_end = ALIGN(gpu_offset + 1, 64);
 285                int this_length = min(cacheline_end - gpu_offset, length);
 286                int swizzled_gpu_offset = gpu_offset ^ 64;
 287
 288                if (is_read) {
 289                        memcpy(cpu_vaddr + cpu_offset,
 290                               gpu_vaddr + swizzled_gpu_offset,
 291                               this_length);
 292                } else {
 293                        memcpy(gpu_vaddr + swizzled_gpu_offset,
 294                               cpu_vaddr + cpu_offset,
 295                               this_length);
 296                }
 297                cpu_offset += this_length;
 298                gpu_offset += this_length;
 299                length -= this_length;
 300        }
 301
 302        kunmap(cpu_page);
 303        kunmap(gpu_page);
 304}
 305
 306/**
 307 * This is the fast shmem pread path, which attempts to copy_from_user directly
 308 * from the backing pages of the object to the user's address space.  On a
 309 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
 310 */
 311static int
 312i915_gem_shmem_pread_fast(struct drm_device *dev,
 313                          struct drm_i915_gem_object *obj,
 314                          struct drm_i915_gem_pread *args,
 315                          struct drm_file *file)
 316{
 317        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 318        ssize_t remain;
 319        loff_t offset;
 320        char __user *user_data;
 321        int page_offset, page_length;
 322
 323        user_data = (char __user *) (uintptr_t) args->data_ptr;
 324        remain = args->size;
 325
 326        offset = args->offset;
 327
 328        while (remain > 0) {
 329                struct page *page;
 330                char *vaddr;
 331                int ret;
 332
 333                /* Operation in this page
 334                 *
 335                 * page_offset = offset within page
 336                 * page_length = bytes to copy for this page
 337                 */
 338                page_offset = offset & (PAGE_SIZE-1);
 339                page_length = remain;
 340                if ((page_offset + remain) > PAGE_SIZE)
 341                        page_length = PAGE_SIZE - page_offset;
 342
 343                page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
 344                                           GFP_HIGHUSER | __GFP_RECLAIMABLE);
 345                if (IS_ERR(page))
 346                        return PTR_ERR(page);
 347
 348                vaddr = kmap_atomic(page);
 349                ret = __copy_to_user_inatomic(user_data,
 350                                              vaddr + page_offset,
 351                                              page_length);
 352                kunmap_atomic(vaddr);
 353
 354                mark_page_accessed(page);
 355                page_cache_release(page);
 356                if (ret)
 357                        return -EFAULT;
 358
 359                remain -= page_length;
 360                user_data += page_length;
 361                offset += page_length;
 362        }
 363
 364        return 0;
 365}
 366
 367/**
 368 * This is the fallback shmem pread path, which allocates temporary storage
 369 * in kernel space to copy_to_user into outside of the struct_mutex, so we
 370 * can copy out of the object's backing pages while holding the struct mutex
 371 * and not take page faults.
 372 */
 373static int
 374i915_gem_shmem_pread_slow(struct drm_device *dev,
 375                          struct drm_i915_gem_object *obj,
 376                          struct drm_i915_gem_pread *args,
 377                          struct drm_file *file)
 378{
 379        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 380        struct mm_struct *mm = current->mm;
 381        struct page **user_pages;
 382        ssize_t remain;
 383        loff_t offset, pinned_pages, i;
 384        loff_t first_data_page, last_data_page, num_pages;
 385        int shmem_page_offset;
 386        int data_page_index, data_page_offset;
 387        int page_length;
 388        int ret;
 389        uint64_t data_ptr = args->data_ptr;
 390        int do_bit17_swizzling;
 391
 392        remain = args->size;
 393
 394        /* Pin the user pages containing the data.  We can't fault while
 395         * holding the struct mutex, yet we want to hold it while
 396         * dereferencing the user data.
 397         */
 398        first_data_page = data_ptr / PAGE_SIZE;
 399        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 400        num_pages = last_data_page - first_data_page + 1;
 401
 402        user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
 403        if (user_pages == NULL)
 404                return -ENOMEM;
 405
 406        mutex_unlock(&dev->struct_mutex);
 407        down_read(&mm->mmap_sem);
 408        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
 409                                      num_pages, 1, 0, user_pages, NULL);
 410        up_read(&mm->mmap_sem);
 411        mutex_lock(&dev->struct_mutex);
 412        if (pinned_pages < num_pages) {
 413                ret = -EFAULT;
 414                goto out;
 415        }
 416
 417        ret = i915_gem_object_set_cpu_read_domain_range(obj,
 418                                                        args->offset,
 419                                                        args->size);
 420        if (ret)
 421                goto out;
 422
 423        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 424
 425        offset = args->offset;
 426
 427        while (remain > 0) {
 428                struct page *page;
 429
 430                /* Operation in this page
 431                 *
 432                 * shmem_page_offset = offset within page in shmem file
 433                 * data_page_index = page number in get_user_pages return
 434                 * data_page_offset = offset with data_page_index page.
 435                 * page_length = bytes to copy for this page
 436                 */
 437                shmem_page_offset = offset & ~PAGE_MASK;
 438                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 439                data_page_offset = data_ptr & ~PAGE_MASK;
 440
 441                page_length = remain;
 442                if ((shmem_page_offset + page_length) > PAGE_SIZE)
 443                        page_length = PAGE_SIZE - shmem_page_offset;
 444                if ((data_page_offset + page_length) > PAGE_SIZE)
 445                        page_length = PAGE_SIZE - data_page_offset;
 446
 447                page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
 448                                           GFP_HIGHUSER | __GFP_RECLAIMABLE);
 449                if (IS_ERR(page))
 450                        return PTR_ERR(page);
 451
 452                if (do_bit17_swizzling) {
 453                        slow_shmem_bit17_copy(page,
 454                                              shmem_page_offset,
 455                                              user_pages[data_page_index],
 456                                              data_page_offset,
 457                                              page_length,
 458                                              1);
 459                } else {
 460                        slow_shmem_copy(user_pages[data_page_index],
 461                                        data_page_offset,
 462                                        page,
 463                                        shmem_page_offset,
 464                                        page_length);
 465                }
 466
 467                mark_page_accessed(page);
 468                page_cache_release(page);
 469
 470                remain -= page_length;
 471                data_ptr += page_length;
 472                offset += page_length;
 473        }
 474
 475out:
 476        for (i = 0; i < pinned_pages; i++) {
 477                SetPageDirty(user_pages[i]);
 478                mark_page_accessed(user_pages[i]);
 479                page_cache_release(user_pages[i]);
 480        }
 481        drm_free_large(user_pages);
 482
 483        return ret;
 484}
 485
 486/**
 487 * Reads data from the object referenced by handle.
 488 *
 489 * On error, the contents of *data are undefined.
 490 */
 491int
 492i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 493                     struct drm_file *file)
 494{
 495        struct drm_i915_gem_pread *args = data;
 496        struct drm_i915_gem_object *obj;
 497        int ret = 0;
 498
 499        if (args->size == 0)
 500                return 0;
 501
 502        if (!access_ok(VERIFY_WRITE,
 503                       (char __user *)(uintptr_t)args->data_ptr,
 504                       args->size))
 505                return -EFAULT;
 506
 507        ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
 508                                       args->size);
 509        if (ret)
 510                return -EFAULT;
 511
 512        ret = i915_mutex_lock_interruptible(dev);
 513        if (ret)
 514                return ret;
 515
 516        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 517        if (obj == NULL) {
 518                ret = -ENOENT;
 519                goto unlock;
 520        }
 521
 522        /* Bounds check source.  */
 523        if (args->offset > obj->base.size ||
 524            args->size > obj->base.size - args->offset) {
 525                ret = -EINVAL;
 526                goto out;
 527        }
 528
 529        ret = i915_gem_object_set_cpu_read_domain_range(obj,
 530                                                        args->offset,
 531                                                        args->size);
 532        if (ret)
 533                goto out;
 534
 535        ret = -EFAULT;
 536        if (!i915_gem_object_needs_bit17_swizzle(obj))
 537                ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
 538        if (ret == -EFAULT)
 539                ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
 540
 541out:
 542        drm_gem_object_unreference(&obj->base);
 543unlock:
 544        mutex_unlock(&dev->struct_mutex);
 545        return ret;
 546}
 547
 548/* This is the fast write path which cannot handle
 549 * page faults in the source data
 550 */
 551
 552static inline int
 553fast_user_write(struct io_mapping *mapping,
 554                loff_t page_base, int page_offset,
 555                char __user *user_data,
 556                int length)
 557{
 558        char *vaddr_atomic;
 559        unsigned long unwritten;
 560
 561        vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
 562        unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
 563                                                      user_data, length);
 564        io_mapping_unmap_atomic(vaddr_atomic);
 565        return unwritten;
 566}
 567
 568/* Here's the write path which can sleep for
 569 * page faults
 570 */
 571
 572static inline void
 573slow_kernel_write(struct io_mapping *mapping,
 574                  loff_t gtt_base, int gtt_offset,
 575                  struct page *user_page, int user_offset,
 576                  int length)
 577{
 578        char __iomem *dst_vaddr;
 579        char *src_vaddr;
 580
 581        dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
 582        src_vaddr = kmap(user_page);
 583
 584        memcpy_toio(dst_vaddr + gtt_offset,
 585                    src_vaddr + user_offset,
 586                    length);
 587
 588        kunmap(user_page);
 589        io_mapping_unmap(dst_vaddr);
 590}
 591
 592/**
 593 * This is the fast pwrite path, where we copy the data directly from the
 594 * user into the GTT, uncached.
 595 */
 596static int
 597i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 598                         struct drm_i915_gem_object *obj,
 599                         struct drm_i915_gem_pwrite *args,
 600                         struct drm_file *file)
 601{
 602        drm_i915_private_t *dev_priv = dev->dev_private;
 603        ssize_t remain;
 604        loff_t offset, page_base;
 605        char __user *user_data;
 606        int page_offset, page_length;
 607
 608        user_data = (char __user *) (uintptr_t) args->data_ptr;
 609        remain = args->size;
 610
 611        offset = obj->gtt_offset + args->offset;
 612
 613        while (remain > 0) {
 614                /* Operation in this page
 615                 *
 616                 * page_base = page offset within aperture
 617                 * page_offset = offset within page
 618                 * page_length = bytes to copy for this page
 619                 */
 620                page_base = (offset & ~(PAGE_SIZE-1));
 621                page_offset = offset & (PAGE_SIZE-1);
 622                page_length = remain;
 623                if ((page_offset + remain) > PAGE_SIZE)
 624                        page_length = PAGE_SIZE - page_offset;
 625
 626                /* If we get a fault while copying data, then (presumably) our
 627                 * source page isn't available.  Return the error and we'll
 628                 * retry in the slow path.
 629                 */
 630                if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
 631                                    page_offset, user_data, page_length))
 632
 633                        return -EFAULT;
 634
 635                remain -= page_length;
 636                user_data += page_length;
 637                offset += page_length;
 638        }
 639
 640        return 0;
 641}
 642
 643/**
 644 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
 645 * the memory and maps it using kmap_atomic for copying.
 646 *
 647 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
 648 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
 649 */
 650static int
 651i915_gem_gtt_pwrite_slow(struct drm_device *dev,
 652                         struct drm_i915_gem_object *obj,
 653                         struct drm_i915_gem_pwrite *args,
 654                         struct drm_file *file)
 655{
 656        drm_i915_private_t *dev_priv = dev->dev_private;
 657        ssize_t remain;
 658        loff_t gtt_page_base, offset;
 659        loff_t first_data_page, last_data_page, num_pages;
 660        loff_t pinned_pages, i;
 661        struct page **user_pages;
 662        struct mm_struct *mm = current->mm;
 663        int gtt_page_offset, data_page_offset, data_page_index, page_length;
 664        int ret;
 665        uint64_t data_ptr = args->data_ptr;
 666
 667        remain = args->size;
 668
 669        /* Pin the user pages containing the data.  We can't fault while
 670         * holding the struct mutex, and all of the pwrite implementations
 671         * want to hold it while dereferencing the user data.
 672         */
 673        first_data_page = data_ptr / PAGE_SIZE;
 674        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 675        num_pages = last_data_page - first_data_page + 1;
 676
 677        user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
 678        if (user_pages == NULL)
 679                return -ENOMEM;
 680
 681        mutex_unlock(&dev->struct_mutex);
 682        down_read(&mm->mmap_sem);
 683        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
 684                                      num_pages, 0, 0, user_pages, NULL);
 685        up_read(&mm->mmap_sem);
 686        mutex_lock(&dev->struct_mutex);
 687        if (pinned_pages < num_pages) {
 688                ret = -EFAULT;
 689                goto out_unpin_pages;
 690        }
 691
 692        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 693        if (ret)
 694                goto out_unpin_pages;
 695
 696        ret = i915_gem_object_put_fence(obj);
 697        if (ret)
 698                goto out_unpin_pages;
 699
 700        offset = obj->gtt_offset + args->offset;
 701
 702        while (remain > 0) {
 703                /* Operation in this page
 704                 *
 705                 * gtt_page_base = page offset within aperture
 706                 * gtt_page_offset = offset within page in aperture
 707                 * data_page_index = page number in get_user_pages return
 708                 * data_page_offset = offset with data_page_index page.
 709                 * page_length = bytes to copy for this page
 710                 */
 711                gtt_page_base = offset & PAGE_MASK;
 712                gtt_page_offset = offset & ~PAGE_MASK;
 713                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 714                data_page_offset = data_ptr & ~PAGE_MASK;
 715
 716                page_length = remain;
 717                if ((gtt_page_offset + page_length) > PAGE_SIZE)
 718                        page_length = PAGE_SIZE - gtt_page_offset;
 719                if ((data_page_offset + page_length) > PAGE_SIZE)
 720                        page_length = PAGE_SIZE - data_page_offset;
 721
 722                slow_kernel_write(dev_priv->mm.gtt_mapping,
 723                                  gtt_page_base, gtt_page_offset,
 724                                  user_pages[data_page_index],
 725                                  data_page_offset,
 726                                  page_length);
 727
 728                remain -= page_length;
 729                offset += page_length;
 730                data_ptr += page_length;
 731        }
 732
 733out_unpin_pages:
 734        for (i = 0; i < pinned_pages; i++)
 735                page_cache_release(user_pages[i]);
 736        drm_free_large(user_pages);
 737
 738        return ret;
 739}
 740
 741/**
 742 * This is the fast shmem pwrite path, which attempts to directly
 743 * copy_from_user into the kmapped pages backing the object.
 744 */
 745static int
 746i915_gem_shmem_pwrite_fast(struct drm_device *dev,
 747                           struct drm_i915_gem_object *obj,
 748                           struct drm_i915_gem_pwrite *args,
 749                           struct drm_file *file)
 750{
 751        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 752        ssize_t remain;
 753        loff_t offset;
 754        char __user *user_data;
 755        int page_offset, page_length;
 756
 757        user_data = (char __user *) (uintptr_t) args->data_ptr;
 758        remain = args->size;
 759
 760        offset = args->offset;
 761        obj->dirty = 1;
 762
 763        while (remain > 0) {
 764                struct page *page;
 765                char *vaddr;
 766                int ret;
 767
 768                /* Operation in this page
 769                 *
 770                 * page_offset = offset within page
 771                 * page_length = bytes to copy for this page
 772                 */
 773                page_offset = offset & (PAGE_SIZE-1);
 774                page_length = remain;
 775                if ((page_offset + remain) > PAGE_SIZE)
 776                        page_length = PAGE_SIZE - page_offset;
 777
 778                page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
 779                                           GFP_HIGHUSER | __GFP_RECLAIMABLE);
 780                if (IS_ERR(page))
 781                        return PTR_ERR(page);
 782
 783                vaddr = kmap_atomic(page, KM_USER0);
 784                ret = __copy_from_user_inatomic(vaddr + page_offset,
 785                                                user_data,
 786                                                page_length);
 787                kunmap_atomic(vaddr, KM_USER0);
 788
 789                set_page_dirty(page);
 790                mark_page_accessed(page);
 791                page_cache_release(page);
 792
 793                /* If we get a fault while copying data, then (presumably) our
 794                 * source page isn't available.  Return the error and we'll
 795                 * retry in the slow path.
 796                 */
 797                if (ret)
 798                        return -EFAULT;
 799
 800                remain -= page_length;
 801                user_data += page_length;
 802                offset += page_length;
 803        }
 804
 805        return 0;
 806}
 807
 808/**
 809 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
 810 * the memory and maps it using kmap_atomic for copying.
 811 *
 812 * This avoids taking mmap_sem for faulting on the user's address while the
 813 * struct_mutex is held.
 814 */
 815static int
 816i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 817                           struct drm_i915_gem_object *obj,
 818                           struct drm_i915_gem_pwrite *args,
 819                           struct drm_file *file)
 820{
 821        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 822        struct mm_struct *mm = current->mm;
 823        struct page **user_pages;
 824        ssize_t remain;
 825        loff_t offset, pinned_pages, i;
 826        loff_t first_data_page, last_data_page, num_pages;
 827        int shmem_page_offset;
 828        int data_page_index,  data_page_offset;
 829        int page_length;
 830        int ret;
 831        uint64_t data_ptr = args->data_ptr;
 832        int do_bit17_swizzling;
 833
 834        remain = args->size;
 835
 836        /* Pin the user pages containing the data.  We can't fault while
 837         * holding the struct mutex, and all of the pwrite implementations
 838         * want to hold it while dereferencing the user data.
 839         */
 840        first_data_page = data_ptr / PAGE_SIZE;
 841        last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 842        num_pages = last_data_page - first_data_page + 1;
 843
 844        user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
 845        if (user_pages == NULL)
 846                return -ENOMEM;
 847
 848        mutex_unlock(&dev->struct_mutex);
 849        down_read(&mm->mmap_sem);
 850        pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
 851                                      num_pages, 0, 0, user_pages, NULL);
 852        up_read(&mm->mmap_sem);
 853        mutex_lock(&dev->struct_mutex);
 854        if (pinned_pages < num_pages) {
 855                ret = -EFAULT;
 856                goto out;
 857        }
 858
 859        ret = i915_gem_object_set_to_cpu_domain(obj, 1);
 860        if (ret)
 861                goto out;
 862
 863        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 864
 865        offset = args->offset;
 866        obj->dirty = 1;
 867
 868        while (remain > 0) {
 869                struct page *page;
 870
 871                /* Operation in this page
 872                 *
 873                 * shmem_page_offset = offset within page in shmem file
 874                 * data_page_index = page number in get_user_pages return
 875                 * data_page_offset = offset with data_page_index page.
 876                 * page_length = bytes to copy for this page
 877                 */
 878                shmem_page_offset = offset & ~PAGE_MASK;
 879                data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 880                data_page_offset = data_ptr & ~PAGE_MASK;
 881
 882                page_length = remain;
 883                if ((shmem_page_offset + page_length) > PAGE_SIZE)
 884                        page_length = PAGE_SIZE - shmem_page_offset;
 885                if ((data_page_offset + page_length) > PAGE_SIZE)
 886                        page_length = PAGE_SIZE - data_page_offset;
 887
 888                page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
 889                                           GFP_HIGHUSER | __GFP_RECLAIMABLE);
 890                if (IS_ERR(page)) {
 891                        ret = PTR_ERR(page);
 892                        goto out;
 893                }
 894
 895                if (do_bit17_swizzling) {
 896                        slow_shmem_bit17_copy(page,
 897                                              shmem_page_offset,
 898                                              user_pages[data_page_index],
 899                                              data_page_offset,
 900                                              page_length,
 901                                              0);
 902                } else {
 903                        slow_shmem_copy(page,
 904                                        shmem_page_offset,
 905                                        user_pages[data_page_index],
 906                                        data_page_offset,
 907                                        page_length);
 908                }
 909
 910                set_page_dirty(page);
 911                mark_page_accessed(page);
 912                page_cache_release(page);
 913
 914                remain -= page_length;
 915                data_ptr += page_length;
 916                offset += page_length;
 917        }
 918
 919out:
 920        for (i = 0; i < pinned_pages; i++)
 921                page_cache_release(user_pages[i]);
 922        drm_free_large(user_pages);
 923
 924        return ret;
 925}
 926
 927/**
 928 * Writes data to the object referenced by handle.
 929 *
 930 * On error, the contents of the buffer that were to be modified are undefined.
 931 */
 932int
 933i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 934                      struct drm_file *file)
 935{
 936        struct drm_i915_gem_pwrite *args = data;
 937        struct drm_i915_gem_object *obj;
 938        int ret;
 939
 940        if (args->size == 0)
 941                return 0;
 942
 943        if (!access_ok(VERIFY_READ,
 944                       (char __user *)(uintptr_t)args->data_ptr,
 945                       args->size))
 946                return -EFAULT;
 947
 948        ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
 949                                      args->size);
 950        if (ret)
 951                return -EFAULT;
 952
 953        ret = i915_mutex_lock_interruptible(dev);
 954        if (ret)
 955                return ret;
 956
 957        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 958        if (obj == NULL) {
 959                ret = -ENOENT;
 960                goto unlock;
 961        }
 962
 963        /* Bounds check destination. */
 964        if (args->offset > obj->base.size ||
 965            args->size > obj->base.size - args->offset) {
 966                ret = -EINVAL;
 967                goto out;
 968        }
 969
 970        /* We can only do the GTT pwrite on untiled buffers, as otherwise
 971         * it would end up going through the fenced access, and we'll get
 972         * different detiling behavior between reading and writing.
 973         * pread/pwrite currently are reading and writing from the CPU
 974         * perspective, requiring manual detiling by the client.
 975         */
 976        if (obj->phys_obj)
 977                ret = i915_gem_phys_pwrite(dev, obj, args, file);
 978        else if (obj->gtt_space &&
 979                 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 980                ret = i915_gem_object_pin(obj, 0, true);
 981                if (ret)
 982                        goto out;
 983
 984                ret = i915_gem_object_set_to_gtt_domain(obj, true);
 985                if (ret)
 986                        goto out_unpin;
 987
 988                ret = i915_gem_object_put_fence(obj);
 989                if (ret)
 990                        goto out_unpin;
 991
 992                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
 993                if (ret == -EFAULT)
 994                        ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
 995
 996out_unpin:
 997                i915_gem_object_unpin(obj);
 998        } else {
 999                ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1000                if (ret)
1001                        goto out;
1002
1003                ret = -EFAULT;
1004                if (!i915_gem_object_needs_bit17_swizzle(obj))
1005                        ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1006                if (ret == -EFAULT)
1007                        ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1008        }
1009
1010out:
1011        drm_gem_object_unreference(&obj->base);
1012unlock:
1013        mutex_unlock(&dev->struct_mutex);
1014        return ret;
1015}
1016
1017/**
1018 * Called when user space prepares to use an object with the CPU, either
1019 * through the mmap ioctl's mapping or a GTT mapping.
1020 */
1021int
1022i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1023                          struct drm_file *file)
1024{
1025        struct drm_i915_gem_set_domain *args = data;
1026        struct drm_i915_gem_object *obj;
1027        uint32_t read_domains = args->read_domains;
1028        uint32_t write_domain = args->write_domain;
1029        int ret;
1030
1031        if (!(dev->driver->driver_features & DRIVER_GEM))
1032                return -ENODEV;
1033
1034        /* Only handle setting domains to types used by the CPU. */
1035        if (write_domain & I915_GEM_GPU_DOMAINS)
1036                return -EINVAL;
1037
1038        if (read_domains & I915_GEM_GPU_DOMAINS)
1039                return -EINVAL;
1040
1041        /* Having something in the write domain implies it's in the read
1042         * domain, and only that read domain.  Enforce that in the request.
1043         */
1044        if (write_domain != 0 && read_domains != write_domain)
1045                return -EINVAL;
1046
1047        ret = i915_mutex_lock_interruptible(dev);
1048        if (ret)
1049                return ret;
1050
1051        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1052        if (obj == NULL) {
1053                ret = -ENOENT;
1054                goto unlock;
1055        }
1056
1057        if (read_domains & I915_GEM_DOMAIN_GTT) {
1058                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1059
1060                /* Silently promote "you're not bound, there was nothing to do"
1061                 * to success, since the client was just asking us to
1062                 * make sure everything was done.
1063                 */
1064                if (ret == -EINVAL)
1065                        ret = 0;
1066        } else {
1067                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1068        }
1069
1070        drm_gem_object_unreference(&obj->base);
1071unlock:
1072        mutex_unlock(&dev->struct_mutex);
1073        return ret;
1074}
1075
1076/**
1077 * Called when user space has done writes to this buffer
1078 */
1079int
1080i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1081                         struct drm_file *file)
1082{
1083        struct drm_i915_gem_sw_finish *args = data;
1084        struct drm_i915_gem_object *obj;
1085        int ret = 0;
1086
1087        if (!(dev->driver->driver_features & DRIVER_GEM))
1088                return -ENODEV;
1089
1090        ret = i915_mutex_lock_interruptible(dev);
1091        if (ret)
1092                return ret;
1093
1094        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1095        if (obj == NULL) {
1096                ret = -ENOENT;
1097                goto unlock;
1098        }
1099
1100        /* Pinned buffers may be scanout, so flush the cache */
1101        if (obj->pin_count)
1102                i915_gem_object_flush_cpu_write_domain(obj);
1103
1104        drm_gem_object_unreference(&obj->base);
1105unlock:
1106        mutex_unlock(&dev->struct_mutex);
1107        return ret;
1108}
1109
1110/**
1111 * Maps the contents of an object, returning the address it is mapped
1112 * into.
1113 *
1114 * While the mapping holds a reference on the contents of the object, it doesn't
1115 * imply a ref on the object itself.
1116 */
1117int
1118i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1119                    struct drm_file *file)
1120{
1121        struct drm_i915_private *dev_priv = dev->dev_private;
1122        struct drm_i915_gem_mmap *args = data;
1123        struct drm_gem_object *obj;
1124        loff_t offset;
1125        unsigned long addr;
1126
1127        if (!(dev->driver->driver_features & DRIVER_GEM))
1128                return -ENODEV;
1129
1130        obj = drm_gem_object_lookup(dev, file, args->handle);
1131        if (obj == NULL)
1132                return -ENOENT;
1133
1134        if (obj->size > dev_priv->mm.gtt_mappable_end) {
1135                drm_gem_object_unreference_unlocked(obj);
1136                return -E2BIG;
1137        }
1138
1139        offset = args->offset;
1140
1141        down_write(&current->mm->mmap_sem);
1142        addr = do_mmap(obj->filp, 0, args->size,
1143                       PROT_READ | PROT_WRITE, MAP_SHARED,
1144                       args->offset);
1145        up_write(&current->mm->mmap_sem);
1146        drm_gem_object_unreference_unlocked(obj);
1147        if (IS_ERR((void *)addr))
1148                return addr;
1149
1150        args->addr_ptr = (uint64_t) addr;
1151
1152        return 0;
1153}
1154
1155/**
1156 * i915_gem_fault - fault a page into the GTT
1157 * vma: VMA in question
1158 * vmf: fault info
1159 *
1160 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1161 * from userspace.  The fault handler takes care of binding the object to
1162 * the GTT (if needed), allocating and programming a fence register (again,
1163 * only if needed based on whether the old reg is still valid or the object
1164 * is tiled) and inserting a new PTE into the faulting process.
1165 *
1166 * Note that the faulting process may involve evicting existing objects
1167 * from the GTT and/or fence registers to make room.  So performance may
1168 * suffer if the GTT working set is large or there are few fence registers
1169 * left.
1170 */
1171int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1172{
1173        struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1174        struct drm_device *dev = obj->base.dev;
1175        drm_i915_private_t *dev_priv = dev->dev_private;
1176        pgoff_t page_offset;
1177        unsigned long pfn;
1178        int ret = 0;
1179        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1180
1181        /* We don't use vmf->pgoff since that has the fake offset */
1182        page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1183                PAGE_SHIFT;
1184
1185        /* Now bind it into the GTT if needed */
1186        mutex_lock(&dev->struct_mutex);
1187
1188        if (!obj->map_and_fenceable) {
1189                ret = i915_gem_object_unbind(obj);
1190                if (ret)
1191                        goto unlock;
1192        }
1193        if (!obj->gtt_space) {
1194                ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1195                if (ret)
1196                        goto unlock;
1197        }
1198
1199        ret = i915_gem_object_set_to_gtt_domain(obj, write);
1200        if (ret)
1201                goto unlock;
1202
1203        if (obj->tiling_mode == I915_TILING_NONE)
1204                ret = i915_gem_object_put_fence(obj);
1205        else
1206                ret = i915_gem_object_get_fence(obj, NULL, true);
1207        if (ret)
1208                goto unlock;
1209
1210        if (i915_gem_object_is_inactive(obj))
1211                list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1212
1213        obj->fault_mappable = true;
1214
1215        pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1216                page_offset;
1217
1218        /* Finally, remap it using the new GTT offset */
1219        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1220unlock:
1221        mutex_unlock(&dev->struct_mutex);
1222
1223        switch (ret) {
1224        case -EAGAIN:
1225                set_need_resched();
1226        case 0:
1227        case -ERESTARTSYS:
1228                return VM_FAULT_NOPAGE;
1229        case -ENOMEM:
1230                return VM_FAULT_OOM;
1231        default:
1232                return VM_FAULT_SIGBUS;
1233        }
1234}
1235
1236/**
1237 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1238 * @obj: obj in question
1239 *
1240 * GEM memory mapping works by handing back to userspace a fake mmap offset
1241 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1242 * up the object based on the offset and sets up the various memory mapping
1243 * structures.
1244 *
1245 * This routine allocates and attaches a fake offset for @obj.
1246 */
1247static int
1248i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1249{
1250        struct drm_device *dev = obj->base.dev;
1251        struct drm_gem_mm *mm = dev->mm_private;
1252        struct drm_map_list *list;
1253        struct drm_local_map *map;
1254        int ret = 0;
1255
1256        /* Set the object up for mmap'ing */
1257        list = &obj->base.map_list;
1258        list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1259        if (!list->map)
1260                return -ENOMEM;
1261
1262        map = list->map;
1263        map->type = _DRM_GEM;
1264        map->size = obj->base.size;
1265        map->handle = obj;
1266
1267        /* Get a DRM GEM mmap offset allocated... */
1268        list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1269                                                    obj->base.size / PAGE_SIZE,
1270                                                    0, 0);
1271        if (!list->file_offset_node) {
1272                DRM_ERROR("failed to allocate offset for bo %d\n",
1273                          obj->base.name);
1274                ret = -ENOSPC;
1275                goto out_free_list;
1276        }
1277
1278        list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1279                                                  obj->base.size / PAGE_SIZE,
1280                                                  0);
1281        if (!list->file_offset_node) {
1282                ret = -ENOMEM;
1283                goto out_free_list;
1284        }
1285
1286        list->hash.key = list->file_offset_node->start;
1287        ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1288        if (ret) {
1289                DRM_ERROR("failed to add to map hash\n");
1290                goto out_free_mm;
1291        }
1292
1293        return 0;
1294
1295out_free_mm:
1296        drm_mm_put_block(list->file_offset_node);
1297out_free_list:
1298        kfree(list->map);
1299        list->map = NULL;
1300
1301        return ret;
1302}
1303
1304/**
1305 * i915_gem_release_mmap - remove physical page mappings
1306 * @obj: obj in question
1307 *
1308 * Preserve the reservation of the mmapping with the DRM core code, but
1309 * relinquish ownership of the pages back to the system.
1310 *
1311 * It is vital that we remove the page mapping if we have mapped a tiled
1312 * object through the GTT and then lose the fence register due to
1313 * resource pressure. Similarly if the object has been moved out of the
1314 * aperture, than pages mapped into userspace must be revoked. Removing the
1315 * mapping will then trigger a page fault on the next user access, allowing
1316 * fixup by i915_gem_fault().
1317 */
1318void
1319i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1320{
1321        if (!obj->fault_mappable)
1322                return;
1323
1324        unmap_mapping_range(obj->base.dev->dev_mapping,
1325                            (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1326                            obj->base.size, 1);
1327
1328        obj->fault_mappable = false;
1329}
1330
1331static void
1332i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1333{
1334        struct drm_device *dev = obj->base.dev;
1335        struct drm_gem_mm *mm = dev->mm_private;
1336        struct drm_map_list *list = &obj->base.map_list;
1337
1338        drm_ht_remove_item(&mm->offset_hash, &list->hash);
1339        drm_mm_put_block(list->file_offset_node);
1340        kfree(list->map);
1341        list->map = NULL;
1342}
1343
1344static uint32_t
1345i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1346{
1347        struct drm_device *dev = obj->base.dev;
1348        uint32_t size;
1349
1350        if (INTEL_INFO(dev)->gen >= 4 ||
1351            obj->tiling_mode == I915_TILING_NONE)
1352                return obj->base.size;
1353
1354        /* Previous chips need a power-of-two fence region when tiling */
1355        if (INTEL_INFO(dev)->gen == 3)
1356                size = 1024*1024;
1357        else
1358                size = 512*1024;
1359
1360        while (size < obj->base.size)
1361                size <<= 1;
1362
1363        return size;
1364}
1365
1366/**
1367 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1368 * @obj: object to check
1369 *
1370 * Return the required GTT alignment for an object, taking into account
1371 * potential fence register mapping.
1372 */
1373static uint32_t
1374i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1375{
1376        struct drm_device *dev = obj->base.dev;
1377
1378        /*
1379         * Minimum alignment is 4k (GTT page size), but might be greater
1380         * if a fence register is needed for the object.
1381         */
1382        if (INTEL_INFO(dev)->gen >= 4 ||
1383            obj->tiling_mode == I915_TILING_NONE)
1384                return 4096;
1385
1386        /*
1387         * Previous chips need to be aligned to the size of the smallest
1388         * fence register that can contain the object.
1389         */
1390        return i915_gem_get_gtt_size(obj);
1391}
1392
1393/**
1394 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1395 *                                       unfenced object
1396 * @obj: object to check
1397 *
1398 * Return the required GTT alignment for an object, only taking into account
1399 * unfenced tiled surface requirements.
1400 */
1401uint32_t
1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1403{
1404        struct drm_device *dev = obj->base.dev;
1405        int tile_height;
1406
1407        /*
1408         * Minimum alignment is 4k (GTT page size) for sane hw.
1409         */
1410        if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1411            obj->tiling_mode == I915_TILING_NONE)
1412                return 4096;
1413
1414        /*
1415         * Older chips need unfenced tiled buffers to be aligned to the left
1416         * edge of an even tile row (where tile rows are counted as if the bo is
1417         * placed in a fenced gtt region).
1418         */
1419        if (IS_GEN2(dev) ||
1420            (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1421                tile_height = 32;
1422        else
1423                tile_height = 8;
1424
1425        return tile_height * obj->stride * 2;
1426}
1427
1428/**
1429 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1430 * @dev: DRM device
1431 * @data: GTT mapping ioctl data
1432 * @file: GEM object info
1433 *
1434 * Simply returns the fake offset to userspace so it can mmap it.
1435 * The mmap call will end up in drm_gem_mmap(), which will set things
1436 * up so we can get faults in the handler above.
1437 *
1438 * The fault handler will take care of binding the object into the GTT
1439 * (since it may have been evicted to make room for something), allocating
1440 * a fence register, and mapping the appropriate aperture address into
1441 * userspace.
1442 */
1443int
1444i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1445                        struct drm_file *file)
1446{
1447        struct drm_i915_private *dev_priv = dev->dev_private;
1448        struct drm_i915_gem_mmap_gtt *args = data;
1449        struct drm_i915_gem_object *obj;
1450        int ret;
1451
1452        if (!(dev->driver->driver_features & DRIVER_GEM))
1453                return -ENODEV;
1454
1455        ret = i915_mutex_lock_interruptible(dev);
1456        if (ret)
1457                return ret;
1458
1459        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1460        if (obj == NULL) {
1461                ret = -ENOENT;
1462                goto unlock;
1463        }
1464
1465        if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1466                ret = -E2BIG;
1467                goto unlock;
1468        }
1469
1470        if (obj->madv != I915_MADV_WILLNEED) {
1471                DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1472                ret = -EINVAL;
1473                goto out;
1474        }
1475
1476        if (!obj->base.map_list.map) {
1477                ret = i915_gem_create_mmap_offset(obj);
1478                if (ret)
1479                        goto out;
1480        }
1481
1482        args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1483
1484out:
1485        drm_gem_object_unreference(&obj->base);
1486unlock:
1487        mutex_unlock(&dev->struct_mutex);
1488        return ret;
1489}
1490
1491static int
1492i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1493                              gfp_t gfpmask)
1494{
1495        int page_count, i;
1496        struct address_space *mapping;
1497        struct inode *inode;
1498        struct page *page;
1499
1500        /* Get the list of pages out of our struct file.  They'll be pinned
1501         * at this point until we release them.
1502         */
1503        page_count = obj->base.size / PAGE_SIZE;
1504        BUG_ON(obj->pages != NULL);
1505        obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1506        if (obj->pages == NULL)
1507                return -ENOMEM;
1508
1509        inode = obj->base.filp->f_path.dentry->d_inode;
1510        mapping = inode->i_mapping;
1511        for (i = 0; i < page_count; i++) {
1512                page = read_cache_page_gfp(mapping, i,
1513                                           GFP_HIGHUSER |
1514                                           __GFP_COLD |
1515                                           __GFP_RECLAIMABLE |
1516                                           gfpmask);
1517                if (IS_ERR(page))
1518                        goto err_pages;
1519
1520                obj->pages[i] = page;
1521        }
1522
1523        if (obj->tiling_mode != I915_TILING_NONE)
1524                i915_gem_object_do_bit_17_swizzle(obj);
1525
1526        return 0;
1527
1528err_pages:
1529        while (i--)
1530                page_cache_release(obj->pages[i]);
1531
1532        drm_free_large(obj->pages);
1533        obj->pages = NULL;
1534        return PTR_ERR(page);
1535}
1536
1537static void
1538i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1539{
1540        int page_count = obj->base.size / PAGE_SIZE;
1541        int i;
1542
1543        BUG_ON(obj->madv == __I915_MADV_PURGED);
1544
1545        if (obj->tiling_mode != I915_TILING_NONE)
1546                i915_gem_object_save_bit_17_swizzle(obj);
1547
1548        if (obj->madv == I915_MADV_DONTNEED)
1549                obj->dirty = 0;
1550
1551        for (i = 0; i < page_count; i++) {
1552                if (obj->dirty)
1553                        set_page_dirty(obj->pages[i]);
1554
1555                if (obj->madv == I915_MADV_WILLNEED)
1556                        mark_page_accessed(obj->pages[i]);
1557
1558                page_cache_release(obj->pages[i]);
1559        }
1560        obj->dirty = 0;
1561
1562        drm_free_large(obj->pages);
1563        obj->pages = NULL;
1564}
1565
1566void
1567i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1568                               struct intel_ring_buffer *ring,
1569                               u32 seqno)
1570{
1571        struct drm_device *dev = obj->base.dev;
1572        struct drm_i915_private *dev_priv = dev->dev_private;
1573
1574        BUG_ON(ring == NULL);
1575        obj->ring = ring;
1576
1577        /* Add a reference if we're newly entering the active list. */
1578        if (!obj->active) {
1579                drm_gem_object_reference(&obj->base);
1580                obj->active = 1;
1581        }
1582
1583        /* Move from whatever list we were on to the tail of execution. */
1584        list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1585        list_move_tail(&obj->ring_list, &ring->active_list);
1586
1587        obj->last_rendering_seqno = seqno;
1588        if (obj->fenced_gpu_access) {
1589                struct drm_i915_fence_reg *reg;
1590
1591                BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1592
1593                obj->last_fenced_seqno = seqno;
1594                obj->last_fenced_ring = ring;
1595
1596                reg = &dev_priv->fence_regs[obj->fence_reg];
1597                list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1598        }
1599}
1600
1601static void
1602i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1603{
1604        list_del_init(&obj->ring_list);
1605        obj->last_rendering_seqno = 0;
1606}
1607
1608static void
1609i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1610{
1611        struct drm_device *dev = obj->base.dev;
1612        drm_i915_private_t *dev_priv = dev->dev_private;
1613
1614        BUG_ON(!obj->active);
1615        list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1616
1617        i915_gem_object_move_off_active(obj);
1618}
1619
1620static void
1621i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1622{
1623        struct drm_device *dev = obj->base.dev;
1624        struct drm_i915_private *dev_priv = dev->dev_private;
1625
1626        if (obj->pin_count != 0)
1627                list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1628        else
1629                list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1630
1631        BUG_ON(!list_empty(&obj->gpu_write_list));
1632        BUG_ON(!obj->active);
1633        obj->ring = NULL;
1634
1635        i915_gem_object_move_off_active(obj);
1636        obj->fenced_gpu_access = false;
1637
1638        obj->active = 0;
1639        obj->pending_gpu_write = false;
1640        drm_gem_object_unreference(&obj->base);
1641
1642        WARN_ON(i915_verify_lists(dev));
1643}
1644
1645/* Immediately discard the backing storage */
1646static void
1647i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1648{
1649        struct inode *inode;
1650
1651        /* Our goal here is to return as much of the memory as
1652         * is possible back to the system as we are called from OOM.
1653         * To do this we must instruct the shmfs to drop all of its
1654         * backing pages, *now*. Here we mirror the actions taken
1655         * when by shmem_delete_inode() to release the backing store.
1656         */
1657        inode = obj->base.filp->f_path.dentry->d_inode;
1658        truncate_inode_pages(inode->i_mapping, 0);
1659        if (inode->i_op->truncate_range)
1660                inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1661
1662        obj->madv = __I915_MADV_PURGED;
1663}
1664
1665static inline int
1666i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1667{
1668        return obj->madv == I915_MADV_DONTNEED;
1669}
1670
1671static void
1672i915_gem_process_flushing_list(struct drm_device *dev,
1673                               uint32_t flush_domains,
1674                               struct intel_ring_buffer *ring)
1675{
1676        struct drm_i915_gem_object *obj, *next;
1677
1678        list_for_each_entry_safe(obj, next,
1679                                 &ring->gpu_write_list,
1680                                 gpu_write_list) {
1681                if (obj->base.write_domain & flush_domains) {
1682                        uint32_t old_write_domain = obj->base.write_domain;
1683
1684                        obj->base.write_domain = 0;
1685                        list_del_init(&obj->gpu_write_list);
1686                        i915_gem_object_move_to_active(obj, ring,
1687                                                       i915_gem_next_request_seqno(dev, ring));
1688
1689                        trace_i915_gem_object_change_domain(obj,
1690                                                            obj->base.read_domains,
1691                                                            old_write_domain);
1692                }
1693        }
1694}
1695
1696int
1697i915_add_request(struct drm_device *dev,
1698                 struct drm_file *file,
1699                 struct drm_i915_gem_request *request,
1700                 struct intel_ring_buffer *ring)
1701{
1702        drm_i915_private_t *dev_priv = dev->dev_private;
1703        struct drm_i915_file_private *file_priv = NULL;
1704        uint32_t seqno;
1705        int was_empty;
1706        int ret;
1707
1708        BUG_ON(request == NULL);
1709
1710        if (file != NULL)
1711                file_priv = file->driver_priv;
1712
1713        ret = ring->add_request(ring, &seqno);
1714        if (ret)
1715            return ret;
1716
1717        ring->outstanding_lazy_request = false;
1718
1719        request->seqno = seqno;
1720        request->ring = ring;
1721        request->emitted_jiffies = jiffies;
1722        was_empty = list_empty(&ring->request_list);
1723        list_add_tail(&request->list, &ring->request_list);
1724
1725        if (file_priv) {
1726                spin_lock(&file_priv->mm.lock);
1727                request->file_priv = file_priv;
1728                list_add_tail(&request->client_list,
1729                              &file_priv->mm.request_list);
1730                spin_unlock(&file_priv->mm.lock);
1731        }
1732
1733        if (!dev_priv->mm.suspended) {
1734                mod_timer(&dev_priv->hangcheck_timer,
1735                          jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1736                if (was_empty)
1737                        queue_delayed_work(dev_priv->wq,
1738                                           &dev_priv->mm.retire_work, HZ);
1739        }
1740        return 0;
1741}
1742
1743static inline void
1744i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1745{
1746        struct drm_i915_file_private *file_priv = request->file_priv;
1747
1748        if (!file_priv)
1749                return;
1750
1751        spin_lock(&file_priv->mm.lock);
1752        list_del(&request->client_list);
1753        request->file_priv = NULL;
1754        spin_unlock(&file_priv->mm.lock);
1755}
1756
1757static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1758                                      struct intel_ring_buffer *ring)
1759{
1760        while (!list_empty(&ring->request_list)) {
1761                struct drm_i915_gem_request *request;
1762
1763                request = list_first_entry(&ring->request_list,
1764                                           struct drm_i915_gem_request,
1765                                           list);
1766
1767                list_del(&request->list);
1768                i915_gem_request_remove_from_client(request);
1769                kfree(request);
1770        }
1771
1772        while (!list_empty(&ring->active_list)) {
1773                struct drm_i915_gem_object *obj;
1774
1775                obj = list_first_entry(&ring->active_list,
1776                                       struct drm_i915_gem_object,
1777                                       ring_list);
1778
1779                obj->base.write_domain = 0;
1780                list_del_init(&obj->gpu_write_list);
1781                i915_gem_object_move_to_inactive(obj);
1782        }
1783}
1784
1785static void i915_gem_reset_fences(struct drm_device *dev)
1786{
1787        struct drm_i915_private *dev_priv = dev->dev_private;
1788        int i;
1789
1790        for (i = 0; i < 16; i++) {
1791                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1792                struct drm_i915_gem_object *obj = reg->obj;
1793
1794                if (!obj)
1795                        continue;
1796
1797                if (obj->tiling_mode)
1798                        i915_gem_release_mmap(obj);
1799
1800                reg->obj->fence_reg = I915_FENCE_REG_NONE;
1801                reg->obj->fenced_gpu_access = false;
1802                reg->obj->last_fenced_seqno = 0;
1803                reg->obj->last_fenced_ring = NULL;
1804                i915_gem_clear_fence_reg(dev, reg);
1805        }
1806}
1807
1808void i915_gem_reset(struct drm_device *dev)
1809{
1810        struct drm_i915_private *dev_priv = dev->dev_private;
1811        struct drm_i915_gem_object *obj;
1812        int i;
1813
1814        for (i = 0; i < I915_NUM_RINGS; i++)
1815                i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1816
1817        /* Remove anything from the flushing lists. The GPU cache is likely
1818         * to be lost on reset along with the data, so simply move the
1819         * lost bo to the inactive list.
1820         */
1821        while (!list_empty(&dev_priv->mm.flushing_list)) {
1822                obj= list_first_entry(&dev_priv->mm.flushing_list,
1823                                      struct drm_i915_gem_object,
1824                                      mm_list);
1825
1826                obj->base.write_domain = 0;
1827                list_del_init(&obj->gpu_write_list);
1828                i915_gem_object_move_to_inactive(obj);
1829        }
1830
1831        /* Move everything out of the GPU domains to ensure we do any
1832         * necessary invalidation upon reuse.
1833         */
1834        list_for_each_entry(obj,
1835                            &dev_priv->mm.inactive_list,
1836                            mm_list)
1837        {
1838                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1839        }
1840
1841        /* The fence registers are invalidated so clear them out */
1842        i915_gem_reset_fences(dev);
1843}
1844
1845/**
1846 * This function clears the request list as sequence numbers are passed.
1847 */
1848static void
1849i915_gem_retire_requests_ring(struct drm_device *dev,
1850                              struct intel_ring_buffer *ring)
1851{
1852        drm_i915_private_t *dev_priv = dev->dev_private;
1853        uint32_t seqno;
1854        int i;
1855
1856        if (!ring->status_page.page_addr ||
1857            list_empty(&ring->request_list))
1858                return;
1859
1860        WARN_ON(i915_verify_lists(dev));
1861
1862        seqno = ring->get_seqno(ring);
1863
1864        for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1865                if (seqno >= ring->sync_seqno[i])
1866                        ring->sync_seqno[i] = 0;
1867
1868        while (!list_empty(&ring->request_list)) {
1869                struct drm_i915_gem_request *request;
1870
1871                request = list_first_entry(&ring->request_list,
1872                                           struct drm_i915_gem_request,
1873                                           list);
1874
1875                if (!i915_seqno_passed(seqno, request->seqno))
1876                        break;
1877
1878                trace_i915_gem_request_retire(dev, request->seqno);
1879
1880                list_del(&request->list);
1881                i915_gem_request_remove_from_client(request);
1882                kfree(request);
1883        }
1884
1885        /* Move any buffers on the active list that are no longer referenced
1886         * by the ringbuffer to the flushing/inactive lists as appropriate.
1887         */
1888        while (!list_empty(&ring->active_list)) {
1889                struct drm_i915_gem_object *obj;
1890
1891                obj= list_first_entry(&ring->active_list,
1892                                      struct drm_i915_gem_object,
1893                                      ring_list);
1894
1895                if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1896                        break;
1897
1898                if (obj->base.write_domain != 0)
1899                        i915_gem_object_move_to_flushing(obj);
1900                else
1901                        i915_gem_object_move_to_inactive(obj);
1902        }
1903
1904        if (unlikely (dev_priv->trace_irq_seqno &&
1905                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1906                ring->irq_put(ring);
1907                dev_priv->trace_irq_seqno = 0;
1908        }
1909
1910        WARN_ON(i915_verify_lists(dev));
1911}
1912
1913void
1914i915_gem_retire_requests(struct drm_device *dev)
1915{
1916        drm_i915_private_t *dev_priv = dev->dev_private;
1917        int i;
1918
1919        if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1920            struct drm_i915_gem_object *obj, *next;
1921
1922            /* We must be careful that during unbind() we do not
1923             * accidentally infinitely recurse into retire requests.
1924             * Currently:
1925             *   retire -> free -> unbind -> wait -> retire_ring
1926             */
1927            list_for_each_entry_safe(obj, next,
1928                                     &dev_priv->mm.deferred_free_list,
1929                                     mm_list)
1930                    i915_gem_free_object_tail(obj);
1931        }
1932
1933        for (i = 0; i < I915_NUM_RINGS; i++)
1934                i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
1935}
1936
1937static void
1938i915_gem_retire_work_handler(struct work_struct *work)
1939{
1940        drm_i915_private_t *dev_priv;
1941        struct drm_device *dev;
1942        bool idle;
1943        int i;
1944
1945        dev_priv = container_of(work, drm_i915_private_t,
1946                                mm.retire_work.work);
1947        dev = dev_priv->dev;
1948
1949        /* Come back later if the device is busy... */
1950        if (!mutex_trylock(&dev->struct_mutex)) {
1951                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1952                return;
1953        }
1954
1955        i915_gem_retire_requests(dev);
1956
1957        /* Send a periodic flush down the ring so we don't hold onto GEM
1958         * objects indefinitely.
1959         */
1960        idle = true;
1961        for (i = 0; i < I915_NUM_RINGS; i++) {
1962                struct intel_ring_buffer *ring = &dev_priv->ring[i];
1963
1964                if (!list_empty(&ring->gpu_write_list)) {
1965                        struct drm_i915_gem_request *request;
1966                        int ret;
1967
1968                        ret = i915_gem_flush_ring(dev, ring, 0,
1969                                                  I915_GEM_GPU_DOMAINS);
1970                        request = kzalloc(sizeof(*request), GFP_KERNEL);
1971                        if (ret || request == NULL ||
1972                            i915_add_request(dev, NULL, request, ring))
1973                            kfree(request);
1974                }
1975
1976                idle &= list_empty(&ring->request_list);
1977        }
1978
1979        if (!dev_priv->mm.suspended && !idle)
1980                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1981
1982        mutex_unlock(&dev->struct_mutex);
1983}
1984
1985int
1986i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1987                     bool interruptible, struct intel_ring_buffer *ring)
1988{
1989        drm_i915_private_t *dev_priv = dev->dev_private;
1990        u32 ier;
1991        int ret = 0;
1992
1993        BUG_ON(seqno == 0);
1994
1995        if (atomic_read(&dev_priv->mm.wedged))
1996                return -EAGAIN;
1997
1998        if (seqno == ring->outstanding_lazy_request) {
1999                struct drm_i915_gem_request *request;
2000
2001                request = kzalloc(sizeof(*request), GFP_KERNEL);
2002                if (request == NULL)
2003                        return -ENOMEM;
2004
2005                ret = i915_add_request(dev, NULL, request, ring);
2006                if (ret) {
2007                        kfree(request);
2008                        return ret;
2009                }
2010
2011                seqno = request->seqno;
2012        }
2013
2014        if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2015                if (HAS_PCH_SPLIT(dev))
2016                        ier = I915_READ(DEIER) | I915_READ(GTIER);
2017                else
2018                        ier = I915_READ(IER);
2019                if (!ier) {
2020                        DRM_ERROR("something (likely vbetool) disabled "
2021                                  "interrupts, re-enabling\n");
2022                        i915_driver_irq_preinstall(dev);
2023                        i915_driver_irq_postinstall(dev);
2024                }
2025
2026                trace_i915_gem_request_wait_begin(dev, seqno);
2027
2028                ring->waiting_seqno = seqno;
2029                if (ring->irq_get(ring)) {
2030                        if (interruptible)
2031                                ret = wait_event_interruptible(ring->irq_queue,
2032                                                               i915_seqno_passed(ring->get_seqno(ring), seqno)
2033                                                               || atomic_read(&dev_priv->mm.wedged));
2034                        else
2035                                wait_event(ring->irq_queue,
2036                                           i915_seqno_passed(ring->get_seqno(ring), seqno)
2037                                           || atomic_read(&dev_priv->mm.wedged));
2038
2039                        ring->irq_put(ring);
2040                } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2041                                                      seqno) ||
2042                                    atomic_read(&dev_priv->mm.wedged), 3000))
2043                        ret = -EBUSY;
2044                ring->waiting_seqno = 0;
2045
2046                trace_i915_gem_request_wait_end(dev, seqno);
2047        }
2048        if (atomic_read(&dev_priv->mm.wedged))
2049                ret = -EAGAIN;
2050
2051        if (ret && ret != -ERESTARTSYS)
2052                DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2053                          __func__, ret, seqno, ring->get_seqno(ring),
2054                          dev_priv->next_seqno);
2055
2056        /* Directly dispatch request retiring.  While we have the work queue
2057         * to handle this, the waiter on a request often wants an associated
2058         * buffer to have made it to the inactive list, and we would need
2059         * a separate wait queue to handle that.
2060         */
2061        if (ret == 0)
2062                i915_gem_retire_requests_ring(dev, ring);
2063
2064        return ret;
2065}
2066
2067/**
2068 * Waits for a sequence number to be signaled, and cleans up the
2069 * request and object lists appropriately for that event.
2070 */
2071static int
2072i915_wait_request(struct drm_device *dev, uint32_t seqno,
2073                  struct intel_ring_buffer *ring)
2074{
2075        return i915_do_wait_request(dev, seqno, 1, ring);
2076}
2077
2078/**
2079 * Ensures that all rendering to the object has completed and the object is
2080 * safe to unbind from the GTT or access from the CPU.
2081 */
2082int
2083i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2084                               bool interruptible)
2085{
2086        struct drm_device *dev = obj->base.dev;
2087        int ret;
2088
2089        /* This function only exists to support waiting for existing rendering,
2090         * not for emitting required flushes.
2091         */
2092        BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2093
2094        /* If there is rendering queued on the buffer being evicted, wait for
2095         * it.
2096         */
2097        if (obj->active) {
2098                ret = i915_do_wait_request(dev,
2099                                           obj->last_rendering_seqno,
2100                                           interruptible,
2101                                           obj->ring);
2102                if (ret)
2103                        return ret;
2104        }
2105
2106        return 0;
2107}
2108
2109/**
2110 * Unbinds an object from the GTT aperture.
2111 */
2112int
2113i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2114{
2115        int ret = 0;
2116
2117        if (obj->gtt_space == NULL)
2118                return 0;
2119
2120        if (obj->pin_count != 0) {
2121                DRM_ERROR("Attempting to unbind pinned buffer\n");
2122                return -EINVAL;
2123        }
2124
2125        /* blow away mappings if mapped through GTT */
2126        i915_gem_release_mmap(obj);
2127
2128        /* Move the object to the CPU domain to ensure that
2129         * any possible CPU writes while it's not in the GTT
2130         * are flushed when we go to remap it. This will
2131         * also ensure that all pending GPU writes are finished
2132         * before we unbind.
2133         */
2134        ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2135        if (ret == -ERESTARTSYS)
2136                return ret;
2137        /* Continue on if we fail due to EIO, the GPU is hung so we
2138         * should be safe and we need to cleanup or else we might
2139         * cause memory corruption through use-after-free.
2140         */
2141        if (ret) {
2142                i915_gem_clflush_object(obj);
2143                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2144        }
2145
2146        /* release the fence reg _after_ flushing */
2147        ret = i915_gem_object_put_fence(obj);
2148        if (ret == -ERESTARTSYS)
2149                return ret;
2150
2151        i915_gem_gtt_unbind_object(obj);
2152        i915_gem_object_put_pages_gtt(obj);
2153
2154        list_del_init(&obj->gtt_list);
2155        list_del_init(&obj->mm_list);
2156        /* Avoid an unnecessary call to unbind on rebind. */
2157        obj->map_and_fenceable = true;
2158
2159        drm_mm_put_block(obj->gtt_space);
2160        obj->gtt_space = NULL;
2161        obj->gtt_offset = 0;
2162
2163        if (i915_gem_object_is_purgeable(obj))
2164                i915_gem_object_truncate(obj);
2165
2166        trace_i915_gem_object_unbind(obj);
2167
2168        return ret;
2169}
2170
2171int
2172i915_gem_flush_ring(struct drm_device *dev,
2173                    struct intel_ring_buffer *ring,
2174                    uint32_t invalidate_domains,
2175                    uint32_t flush_domains)
2176{
2177        int ret;
2178
2179        ret = ring->flush(ring, invalidate_domains, flush_domains);
2180        if (ret)
2181                return ret;
2182
2183        i915_gem_process_flushing_list(dev, flush_domains, ring);
2184        return 0;
2185}
2186
2187static int i915_ring_idle(struct drm_device *dev,
2188                          struct intel_ring_buffer *ring)
2189{
2190        int ret;
2191
2192        if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2193                return 0;
2194
2195        if (!list_empty(&ring->gpu_write_list)) {
2196                ret = i915_gem_flush_ring(dev, ring,
2197                                    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2198                if (ret)
2199                        return ret;
2200        }
2201
2202        return i915_wait_request(dev,
2203                                 i915_gem_next_request_seqno(dev, ring),
2204                                 ring);
2205}
2206
2207int
2208i915_gpu_idle(struct drm_device *dev)
2209{
2210        drm_i915_private_t *dev_priv = dev->dev_private;
2211        bool lists_empty;
2212        int ret, i;
2213
2214        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2215                       list_empty(&dev_priv->mm.active_list));
2216        if (lists_empty)
2217                return 0;
2218
2219        /* Flush everything onto the inactive list. */
2220        for (i = 0; i < I915_NUM_RINGS; i++) {
2221                ret = i915_ring_idle(dev, &dev_priv->ring[i]);
2222                if (ret)
2223                        return ret;
2224        }
2225
2226        return 0;
2227}
2228
2229static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2230                                       struct intel_ring_buffer *pipelined)
2231{
2232        struct drm_device *dev = obj->base.dev;
2233        drm_i915_private_t *dev_priv = dev->dev_private;
2234        u32 size = obj->gtt_space->size;
2235        int regnum = obj->fence_reg;
2236        uint64_t val;
2237
2238        val = (uint64_t)((obj->gtt_offset + size - 4096) &
2239                         0xfffff000) << 32;
2240        val |= obj->gtt_offset & 0xfffff000;
2241        val |= (uint64_t)((obj->stride / 128) - 1) <<
2242                SANDYBRIDGE_FENCE_PITCH_SHIFT;
2243
2244        if (obj->tiling_mode == I915_TILING_Y)
2245                val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2246        val |= I965_FENCE_REG_VALID;
2247
2248        if (pipelined) {
2249                int ret = intel_ring_begin(pipelined, 6);
2250                if (ret)
2251                        return ret;
2252
2253                intel_ring_emit(pipelined, MI_NOOP);
2254                intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2255                intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2256                intel_ring_emit(pipelined, (u32)val);
2257                intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2258                intel_ring_emit(pipelined, (u32)(val >> 32));
2259                intel_ring_advance(pipelined);
2260        } else
2261                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2262
2263        return 0;
2264}
2265
2266static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2267                                struct intel_ring_buffer *pipelined)
2268{
2269        struct drm_device *dev = obj->base.dev;
2270        drm_i915_private_t *dev_priv = dev->dev_private;
2271        u32 size = obj->gtt_space->size;
2272        int regnum = obj->fence_reg;
2273        uint64_t val;
2274
2275        val = (uint64_t)((obj->gtt_offset + size - 4096) &
2276                    0xfffff000) << 32;
2277        val |= obj->gtt_offset & 0xfffff000;
2278        val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2279        if (obj->tiling_mode == I915_TILING_Y)
2280                val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2281        val |= I965_FENCE_REG_VALID;
2282
2283        if (pipelined) {
2284                int ret = intel_ring_begin(pipelined, 6);
2285                if (ret)
2286                        return ret;
2287
2288                intel_ring_emit(pipelined, MI_NOOP);
2289                intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2290                intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2291                intel_ring_emit(pipelined, (u32)val);
2292                intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2293                intel_ring_emit(pipelined, (u32)(val >> 32));
2294                intel_ring_advance(pipelined);
2295        } else
2296                I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2297
2298        return 0;
2299}
2300
2301static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2302                                struct intel_ring_buffer *pipelined)
2303{
2304        struct drm_device *dev = obj->base.dev;
2305        drm_i915_private_t *dev_priv = dev->dev_private;
2306        u32 size = obj->gtt_space->size;
2307        u32 fence_reg, val, pitch_val;
2308        int tile_width;
2309
2310        if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2311                 (size & -size) != size ||
2312                 (obj->gtt_offset & (size - 1)),
2313                 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2314                 obj->gtt_offset, obj->map_and_fenceable, size))
2315                return -EINVAL;
2316
2317        if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2318                tile_width = 128;
2319        else
2320                tile_width = 512;
2321
2322        /* Note: pitch better be a power of two tile widths */
2323        pitch_val = obj->stride / tile_width;
2324        pitch_val = ffs(pitch_val) - 1;
2325
2326        val = obj->gtt_offset;
2327        if (obj->tiling_mode == I915_TILING_Y)
2328                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2329        val |= I915_FENCE_SIZE_BITS(size);
2330        val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2331        val |= I830_FENCE_REG_VALID;
2332
2333        fence_reg = obj->fence_reg;
2334        if (fence_reg < 8)
2335                fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2336        else
2337                fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2338
2339        if (pipelined) {
2340                int ret = intel_ring_begin(pipelined, 4);
2341                if (ret)
2342                        return ret;
2343
2344                intel_ring_emit(pipelined, MI_NOOP);
2345                intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2346                intel_ring_emit(pipelined, fence_reg);
2347                intel_ring_emit(pipelined, val);
2348                intel_ring_advance(pipelined);
2349        } else
2350                I915_WRITE(fence_reg, val);
2351
2352        return 0;
2353}
2354
2355static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2356                                struct intel_ring_buffer *pipelined)
2357{
2358        struct drm_device *dev = obj->base.dev;
2359        drm_i915_private_t *dev_priv = dev->dev_private;
2360        u32 size = obj->gtt_space->size;
2361        int regnum = obj->fence_reg;
2362        uint32_t val;
2363        uint32_t pitch_val;
2364
2365        if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2366                 (size & -size) != size ||
2367                 (obj->gtt_offset & (size - 1)),
2368                 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2369                 obj->gtt_offset, size))
2370                return -EINVAL;
2371
2372        pitch_val = obj->stride / 128;
2373        pitch_val = ffs(pitch_val) - 1;
2374
2375        val = obj->gtt_offset;
2376        if (obj->tiling_mode == I915_TILING_Y)
2377                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2378        val |= I830_FENCE_SIZE_BITS(size);
2379        val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2380        val |= I830_FENCE_REG_VALID;
2381
2382        if (pipelined) {
2383                int ret = intel_ring_begin(pipelined, 4);
2384                if (ret)
2385                        return ret;
2386
2387                intel_ring_emit(pipelined, MI_NOOP);
2388                intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2389                intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2390                intel_ring_emit(pipelined, val);
2391                intel_ring_advance(pipelined);
2392        } else
2393                I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2394
2395        return 0;
2396}
2397
2398static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2399{
2400        return i915_seqno_passed(ring->get_seqno(ring), seqno);
2401}
2402
2403static int
2404i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2405                            struct intel_ring_buffer *pipelined,
2406                            bool interruptible)
2407{
2408        int ret;
2409
2410        if (obj->fenced_gpu_access) {
2411                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2412                        ret = i915_gem_flush_ring(obj->base.dev,
2413                                                  obj->last_fenced_ring,
2414                                                  0, obj->base.write_domain);
2415                        if (ret)
2416                                return ret;
2417                }
2418
2419                obj->fenced_gpu_access = false;
2420        }
2421
2422        if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2423                if (!ring_passed_seqno(obj->last_fenced_ring,
2424                                       obj->last_fenced_seqno)) {
2425                        ret = i915_do_wait_request(obj->base.dev,
2426                                                   obj->last_fenced_seqno,
2427                                                   interruptible,
2428                                                   obj->last_fenced_ring);
2429                        if (ret)
2430                                return ret;
2431                }
2432
2433                obj->last_fenced_seqno = 0;
2434                obj->last_fenced_ring = NULL;
2435        }
2436
2437        /* Ensure that all CPU reads are completed before installing a fence
2438         * and all writes before removing the fence.
2439         */
2440        if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2441                mb();
2442
2443        return 0;
2444}
2445
2446int
2447i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2448{
2449        int ret;
2450
2451        if (obj->tiling_mode)
2452                i915_gem_release_mmap(obj);
2453
2454        ret = i915_gem_object_flush_fence(obj, NULL, true);
2455        if (ret)
2456                return ret;
2457
2458        if (obj->fence_reg != I915_FENCE_REG_NONE) {
2459                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2460                i915_gem_clear_fence_reg(obj->base.dev,
2461                                         &dev_priv->fence_regs[obj->fence_reg]);
2462
2463                obj->fence_reg = I915_FENCE_REG_NONE;
2464        }
2465
2466        return 0;
2467}
2468
2469static struct drm_i915_fence_reg *
2470i915_find_fence_reg(struct drm_device *dev,
2471                    struct intel_ring_buffer *pipelined)
2472{
2473        struct drm_i915_private *dev_priv = dev->dev_private;
2474        struct drm_i915_fence_reg *reg, *first, *avail;
2475        int i;
2476
2477        /* First try to find a free reg */
2478        avail = NULL;
2479        for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2480                reg = &dev_priv->fence_regs[i];
2481                if (!reg->obj)
2482                        return reg;
2483
2484                if (!reg->obj->pin_count)
2485                        avail = reg;
2486        }
2487
2488        if (avail == NULL)
2489                return NULL;
2490
2491        /* None available, try to steal one or wait for a user to finish */
2492        avail = first = NULL;
2493        list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2494                if (reg->obj->pin_count)
2495                        continue;
2496
2497                if (first == NULL)
2498                        first = reg;
2499
2500                if (!pipelined ||
2501                    !reg->obj->last_fenced_ring ||
2502                    reg->obj->last_fenced_ring == pipelined) {
2503                        avail = reg;
2504                        break;
2505                }
2506        }
2507
2508        if (avail == NULL)
2509                avail = first;
2510
2511        return avail;
2512}
2513
2514/**
2515 * i915_gem_object_get_fence - set up a fence reg for an object
2516 * @obj: object to map through a fence reg
2517 * @pipelined: ring on which to queue the change, or NULL for CPU access
2518 * @interruptible: must we wait uninterruptibly for the register to retire?
2519 *
2520 * When mapping objects through the GTT, userspace wants to be able to write
2521 * to them without having to worry about swizzling if the object is tiled.
2522 *
2523 * This function walks the fence regs looking for a free one for @obj,
2524 * stealing one if it can't find any.
2525 *
2526 * It then sets up the reg based on the object's properties: address, pitch
2527 * and tiling format.
2528 */
2529int
2530i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2531                          struct intel_ring_buffer *pipelined,
2532                          bool interruptible)
2533{
2534        struct drm_device *dev = obj->base.dev;
2535        struct drm_i915_private *dev_priv = dev->dev_private;
2536        struct drm_i915_fence_reg *reg;
2537        int ret;
2538
2539        /* XXX disable pipelining. There are bugs. Shocking. */
2540        pipelined = NULL;
2541
2542        /* Just update our place in the LRU if our fence is getting reused. */
2543        if (obj->fence_reg != I915_FENCE_REG_NONE) {
2544                reg = &dev_priv->fence_regs[obj->fence_reg];
2545                list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2546
2547                if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2548                        pipelined = NULL;
2549
2550                if (!pipelined) {
2551                        if (reg->setup_seqno) {
2552                                if (!ring_passed_seqno(obj->last_fenced_ring,
2553                                                       reg->setup_seqno)) {
2554                                        ret = i915_do_wait_request(obj->base.dev,
2555                                                                   reg->setup_seqno,
2556                                                                   interruptible,
2557                                                                   obj->last_fenced_ring);
2558                                        if (ret)
2559                                                return ret;
2560                                }
2561
2562                                reg->setup_seqno = 0;
2563                        }
2564                } else if (obj->last_fenced_ring &&
2565                           obj->last_fenced_ring != pipelined) {
2566                        ret = i915_gem_object_flush_fence(obj,
2567                                                          pipelined,
2568                                                          interruptible);
2569                        if (ret)
2570                                return ret;
2571                } else if (obj->tiling_changed) {
2572                        if (obj->fenced_gpu_access) {
2573                                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2574                                        ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
2575                                                                  0, obj->base.write_domain);
2576                                        if (ret)
2577                                                return ret;
2578                                }
2579
2580                                obj->fenced_gpu_access = false;
2581                        }
2582                }
2583
2584                if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2585                        pipelined = NULL;
2586                BUG_ON(!pipelined && reg->setup_seqno);
2587
2588                if (obj->tiling_changed) {
2589                        if (pipelined) {
2590                                reg->setup_seqno =
2591                                        i915_gem_next_request_seqno(dev, pipelined);
2592                                obj->last_fenced_seqno = reg->setup_seqno;
2593                                obj->last_fenced_ring = pipelined;
2594                        }
2595                        goto update;
2596                }
2597
2598                return 0;
2599        }
2600
2601        reg = i915_find_fence_reg(dev, pipelined);
2602        if (reg == NULL)
2603                return -ENOSPC;
2604
2605        ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
2606        if (ret)
2607                return ret;
2608
2609        if (reg->obj) {
2610                struct drm_i915_gem_object *old = reg->obj;
2611
2612                drm_gem_object_reference(&old->base);
2613
2614                if (old->tiling_mode)
2615                        i915_gem_release_mmap(old);
2616
2617                ret = i915_gem_object_flush_fence(old,
2618                                                  pipelined,
2619                                                  interruptible);
2620                if (ret) {
2621                        drm_gem_object_unreference(&old->base);
2622                        return ret;
2623                }
2624
2625                if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2626                        pipelined = NULL;
2627
2628                old->fence_reg = I915_FENCE_REG_NONE;
2629                old->last_fenced_ring = pipelined;
2630                old->last_fenced_seqno =
2631                        pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2632
2633                drm_gem_object_unreference(&old->base);
2634        } else if (obj->last_fenced_seqno == 0)
2635                pipelined = NULL;
2636
2637        reg->obj = obj;
2638        list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2639        obj->fence_reg = reg - dev_priv->fence_regs;
2640        obj->last_fenced_ring = pipelined;
2641
2642        reg->setup_seqno =
2643                pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2644        obj->last_fenced_seqno = reg->setup_seqno;
2645
2646update:
2647        obj->tiling_changed = false;
2648        switch (INTEL_INFO(dev)->gen) {
2649        case 6:
2650                ret = sandybridge_write_fence_reg(obj, pipelined);
2651                break;
2652        case 5:
2653        case 4:
2654                ret = i965_write_fence_reg(obj, pipelined);
2655                break;
2656        case 3:
2657                ret = i915_write_fence_reg(obj, pipelined);
2658                break;
2659        case 2:
2660                ret = i830_write_fence_reg(obj, pipelined);
2661                break;
2662        }
2663
2664        return ret;
2665}
2666
2667/**
2668 * i915_gem_clear_fence_reg - clear out fence register info
2669 * @obj: object to clear
2670 *
2671 * Zeroes out the fence register itself and clears out the associated
2672 * data structures in dev_priv and obj.
2673 */
2674static void
2675i915_gem_clear_fence_reg(struct drm_device *dev,
2676                         struct drm_i915_fence_reg *reg)
2677{
2678        drm_i915_private_t *dev_priv = dev->dev_private;
2679        uint32_t fence_reg = reg - dev_priv->fence_regs;
2680
2681        switch (INTEL_INFO(dev)->gen) {
2682        case 6:
2683                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2684                break;
2685        case 5:
2686        case 4:
2687                I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2688                break;
2689        case 3:
2690                if (fence_reg >= 8)
2691                        fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2692                else
2693        case 2:
2694                        fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2695
2696                I915_WRITE(fence_reg, 0);
2697                break;
2698        }
2699
2700        list_del_init(&reg->lru_list);
2701        reg->obj = NULL;
2702        reg->setup_seqno = 0;
2703}
2704
2705/**
2706 * Finds free space in the GTT aperture and binds the object there.
2707 */
2708static int
2709i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2710                            unsigned alignment,
2711                            bool map_and_fenceable)
2712{
2713        struct drm_device *dev = obj->base.dev;
2714        drm_i915_private_t *dev_priv = dev->dev_private;
2715        struct drm_mm_node *free_space;
2716        gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2717        u32 size, fence_size, fence_alignment, unfenced_alignment;
2718        bool mappable, fenceable;
2719        int ret;
2720
2721        if (obj->madv != I915_MADV_WILLNEED) {
2722                DRM_ERROR("Attempting to bind a purgeable object\n");
2723                return -EINVAL;
2724        }
2725
2726        fence_size = i915_gem_get_gtt_size(obj);
2727        fence_alignment = i915_gem_get_gtt_alignment(obj);
2728        unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
2729
2730        if (alignment == 0)
2731                alignment = map_and_fenceable ? fence_alignment :
2732                                                unfenced_alignment;
2733        if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2734                DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2735                return -EINVAL;
2736        }
2737
2738        size = map_and_fenceable ? fence_size : obj->base.size;
2739
2740        /* If the object is bigger than the entire aperture, reject it early
2741         * before evicting everything in a vain attempt to find space.
2742         */
2743        if (obj->base.size >
2744            (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2745                DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2746                return -E2BIG;
2747        }
2748
2749 search_free:
2750        if (map_and_fenceable)
2751                free_space =
2752                        drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2753                                                    size, alignment, 0,
2754                                                    dev_priv->mm.gtt_mappable_end,
2755                                                    0);
2756        else
2757                free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2758                                                size, alignment, 0);
2759
2760        if (free_space != NULL) {
2761                if (map_and_fenceable)
2762                        obj->gtt_space =
2763                                drm_mm_get_block_range_generic(free_space,
2764                                                               size, alignment, 0,
2765                                                               dev_priv->mm.gtt_mappable_end,
2766                                                               0);
2767                else
2768                        obj->gtt_space =
2769                                drm_mm_get_block(free_space, size, alignment);
2770        }
2771        if (obj->gtt_space == NULL) {
2772                /* If the gtt is empty and we're still having trouble
2773                 * fitting our object in, we're out of memory.
2774                 */
2775                ret = i915_gem_evict_something(dev, size, alignment,
2776                                               map_and_fenceable);
2777                if (ret)
2778                        return ret;
2779
2780                goto search_free;
2781        }
2782
2783        ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2784        if (ret) {
2785                drm_mm_put_block(obj->gtt_space);
2786                obj->gtt_space = NULL;
2787
2788                if (ret == -ENOMEM) {
2789                        /* first try to reclaim some memory by clearing the GTT */
2790                        ret = i915_gem_evict_everything(dev, false);
2791                        if (ret) {
2792                                /* now try to shrink everyone else */
2793                                if (gfpmask) {
2794                                        gfpmask = 0;
2795                                        goto search_free;
2796                                }
2797
2798                                return -ENOMEM;
2799                        }
2800
2801                        goto search_free;
2802                }
2803
2804                return ret;
2805        }
2806
2807        ret = i915_gem_gtt_bind_object(obj);
2808        if (ret) {
2809                i915_gem_object_put_pages_gtt(obj);
2810                drm_mm_put_block(obj->gtt_space);
2811                obj->gtt_space = NULL;
2812
2813                if (i915_gem_evict_everything(dev, false))
2814                        return ret;
2815
2816                goto search_free;
2817        }
2818
2819        list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2820        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2821
2822        /* Assert that the object is not currently in any GPU domain. As it
2823         * wasn't in the GTT, there shouldn't be any way it could have been in
2824         * a GPU cache
2825         */
2826        BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2827        BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2828
2829        obj->gtt_offset = obj->gtt_space->start;
2830
2831        fenceable =
2832                obj->gtt_space->size == fence_size &&
2833                (obj->gtt_space->start & (fence_alignment -1)) == 0;
2834
2835        mappable =
2836                obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2837
2838        obj->map_and_fenceable = mappable && fenceable;
2839
2840        trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
2841        return 0;
2842}
2843
2844void
2845i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2846{
2847        /* If we don't have a page list set up, then we're not pinned
2848         * to GPU, and we can ignore the cache flush because it'll happen
2849         * again at bind time.
2850         */
2851        if (obj->pages == NULL)
2852                return;
2853
2854        trace_i915_gem_object_clflush(obj);
2855
2856        drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2857}
2858
2859/** Flushes any GPU write domain for the object if it's dirty. */
2860static int
2861i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2862{
2863        struct drm_device *dev = obj->base.dev;
2864
2865        if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2866                return 0;
2867
2868        /* Queue the GPU write cache flushing we need. */
2869        return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
2870}
2871
2872/** Flushes the GTT write domain for the object if it's dirty. */
2873static void
2874i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2875{
2876        uint32_t old_write_domain;
2877
2878        if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2879                return;
2880
2881        /* No actual flushing is required for the GTT write domain.  Writes
2882         * to it immediately go to main memory as far as we know, so there's
2883         * no chipset flush.  It also doesn't land in render cache.
2884         *
2885         * However, we do have to enforce the order so that all writes through
2886         * the GTT land before any writes to the device, such as updates to
2887         * the GATT itself.
2888         */
2889        wmb();
2890
2891        i915_gem_release_mmap(obj);
2892
2893        old_write_domain = obj->base.write_domain;
2894        obj->base.write_domain = 0;
2895
2896        trace_i915_gem_object_change_domain(obj,
2897                                            obj->base.read_domains,
2898                                            old_write_domain);
2899}
2900
2901/** Flushes the CPU write domain for the object if it's dirty. */
2902static void
2903i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2904{
2905        uint32_t old_write_domain;
2906
2907        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2908                return;
2909
2910        i915_gem_clflush_object(obj);
2911        intel_gtt_chipset_flush();
2912        old_write_domain = obj->base.write_domain;
2913        obj->base.write_domain = 0;
2914
2915        trace_i915_gem_object_change_domain(obj,
2916                                            obj->base.read_domains,
2917                                            old_write_domain);
2918}
2919
2920/**
2921 * Moves a single object to the GTT read, and possibly write domain.
2922 *
2923 * This function returns when the move is complete, including waiting on
2924 * flushes to occur.
2925 */
2926int
2927i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2928{
2929        uint32_t old_write_domain, old_read_domains;
2930        int ret;
2931
2932        /* Not valid to be called on unbound objects. */
2933        if (obj->gtt_space == NULL)
2934                return -EINVAL;
2935
2936        ret = i915_gem_object_flush_gpu_write_domain(obj);
2937        if (ret)
2938                return ret;
2939
2940        if (obj->pending_gpu_write || write) {
2941                ret = i915_gem_object_wait_rendering(obj, true);
2942                if (ret)
2943                        return ret;
2944        }
2945
2946        i915_gem_object_flush_cpu_write_domain(obj);
2947
2948        old_write_domain = obj->base.write_domain;
2949        old_read_domains = obj->base.read_domains;
2950
2951        /* It should now be out of any other write domains, and we can update
2952         * the domain values for our changes.
2953         */
2954        BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2955        obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2956        if (write) {
2957                obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2958                obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2959                obj->dirty = 1;
2960        }
2961
2962        trace_i915_gem_object_change_domain(obj,
2963                                            old_read_domains,
2964                                            old_write_domain);
2965
2966        return 0;
2967}
2968
2969/*
2970 * Prepare buffer for display plane. Use uninterruptible for possible flush
2971 * wait, as in modesetting process we're not supposed to be interrupted.
2972 */
2973int
2974i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
2975                                     struct intel_ring_buffer *pipelined)
2976{
2977        uint32_t old_read_domains;
2978        int ret;
2979
2980        /* Not valid to be called on unbound objects. */
2981        if (obj->gtt_space == NULL)
2982                return -EINVAL;
2983
2984        ret = i915_gem_object_flush_gpu_write_domain(obj);
2985        if (ret)
2986                return ret;
2987
2988
2989        /* Currently, we are always called from an non-interruptible context. */
2990        if (pipelined != obj->ring) {
2991                ret = i915_gem_object_wait_rendering(obj, false);
2992                if (ret)
2993                        return ret;
2994        }
2995
2996        i915_gem_object_flush_cpu_write_domain(obj);
2997
2998        old_read_domains = obj->base.read_domains;
2999        obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3000
3001        trace_i915_gem_object_change_domain(obj,
3002                                            old_read_domains,
3003                                            obj->base.write_domain);
3004
3005        return 0;
3006}
3007
3008int
3009i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
3010                          bool interruptible)
3011{
3012        int ret;
3013
3014        if (!obj->active)
3015                return 0;
3016
3017        if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3018                ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
3019                                          0, obj->base.write_domain);
3020                if (ret)
3021                        return ret;
3022        }
3023
3024        return i915_gem_object_wait_rendering(obj, interruptible);
3025}
3026
3027/**
3028 * Moves a single object to the CPU read, and possibly write domain.
3029 *
3030 * This function returns when the move is complete, including waiting on
3031 * flushes to occur.
3032 */
3033static int
3034i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3035{
3036        uint32_t old_write_domain, old_read_domains;
3037        int ret;
3038
3039        ret = i915_gem_object_flush_gpu_write_domain(obj);
3040        if (ret)
3041                return ret;
3042
3043        ret = i915_gem_object_wait_rendering(obj, true);
3044        if (ret)
3045                return ret;
3046
3047        i915_gem_object_flush_gtt_write_domain(obj);
3048
3049        /* If we have a partially-valid cache of the object in the CPU,
3050         * finish invalidating it and free the per-page flags.
3051         */
3052        i915_gem_object_set_to_full_cpu_read_domain(obj);
3053
3054        old_write_domain = obj->base.write_domain;
3055        old_read_domains = obj->base.read_domains;
3056
3057        /* Flush the CPU cache if it's still invalid. */
3058        if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3059                i915_gem_clflush_object(obj);
3060
3061                obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3062        }
3063
3064        /* It should now be out of any other write domains, and we can update
3065         * the domain values for our changes.
3066         */
3067        BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3068
3069        /* If we're writing through the CPU, then the GPU read domains will
3070         * need to be invalidated at next use.
3071         */
3072        if (write) {
3073                obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3074                obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3075        }
3076
3077        trace_i915_gem_object_change_domain(obj,
3078                                            old_read_domains,
3079                                            old_write_domain);
3080
3081        return 0;
3082}
3083
3084/**
3085 * Moves the object from a partially CPU read to a full one.
3086 *
3087 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3088 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3089 */
3090static void
3091i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3092{
3093        if (!obj->page_cpu_valid)
3094                return;
3095
3096        /* If we're partially in the CPU read domain, finish moving it in.
3097         */
3098        if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3099                int i;
3100
3101                for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3102                        if (obj->page_cpu_valid[i])
3103                                continue;
3104                        drm_clflush_pages(obj->pages + i, 1);
3105                }
3106        }
3107
3108        /* Free the page_cpu_valid mappings which are now stale, whether
3109         * or not we've got I915_GEM_DOMAIN_CPU.
3110         */
3111        kfree(obj->page_cpu_valid);
3112        obj->page_cpu_valid = NULL;
3113}
3114
3115/**
3116 * Set the CPU read domain on a range of the object.
3117 *
3118 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3119 * not entirely valid.  The page_cpu_valid member of the object flags which
3120 * pages have been flushed, and will be respected by
3121 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3122 * of the whole object.
3123 *
3124 * This function returns when the move is complete, including waiting on
3125 * flushes to occur.
3126 */
3127static int
3128i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3129                                          uint64_t offset, uint64_t size)
3130{
3131        uint32_t old_read_domains;
3132        int i, ret;
3133
3134        if (offset == 0 && size == obj->base.size)
3135                return i915_gem_object_set_to_cpu_domain(obj, 0);
3136
3137        ret = i915_gem_object_flush_gpu_write_domain(obj);
3138        if (ret)
3139                return ret;
3140
3141        ret = i915_gem_object_wait_rendering(obj, true);
3142        if (ret)
3143                return ret;
3144
3145        i915_gem_object_flush_gtt_write_domain(obj);
3146
3147        /* If we're already fully in the CPU read domain, we're done. */
3148        if (obj->page_cpu_valid == NULL &&
3149            (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3150                return 0;
3151
3152        /* Otherwise, create/clear the per-page CPU read domain flag if we're
3153         * newly adding I915_GEM_DOMAIN_CPU
3154         */
3155        if (obj->page_cpu_valid == NULL) {
3156                obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3157                                              GFP_KERNEL);
3158                if (obj->page_cpu_valid == NULL)
3159                        return -ENOMEM;
3160        } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3161                memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3162
3163        /* Flush the cache on any pages that are still invalid from the CPU's
3164         * perspective.
3165         */
3166        for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3167             i++) {
3168                if (obj->page_cpu_valid[i])
3169                        continue;
3170
3171                drm_clflush_pages(obj->pages + i, 1);
3172
3173                obj->page_cpu_valid[i] = 1;
3174        }
3175
3176        /* It should now be out of any other write domains, and we can update
3177         * the domain values for our changes.
3178         */
3179        BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3180
3181        old_read_domains = obj->base.read_domains;
3182        obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3183
3184        trace_i915_gem_object_change_domain(obj,
3185                                            old_read_domains,
3186                                            obj->base.write_domain);
3187
3188        return 0;
3189}
3190
3191/* Throttle our rendering by waiting until the ring has completed our requests
3192 * emitted over 20 msec ago.
3193 *
3194 * Note that if we were to use the current jiffies each time around the loop,
3195 * we wouldn't escape the function with any frames outstanding if the time to
3196 * render a frame was over 20ms.
3197 *
3198 * This should get us reasonable parallelism between CPU and GPU but also
3199 * relatively low latency when blocking on a particular request to finish.
3200 */
3201static int
3202i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3203{
3204        struct drm_i915_private *dev_priv = dev->dev_private;
3205        struct drm_i915_file_private *file_priv = file->driver_priv;
3206        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3207        struct drm_i915_gem_request *request;
3208        struct intel_ring_buffer *ring = NULL;
3209        u32 seqno = 0;
3210        int ret;
3211
3212        spin_lock(&file_priv->mm.lock);
3213        list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3214                if (time_after_eq(request->emitted_jiffies, recent_enough))
3215                        break;
3216
3217                ring = request->ring;
3218                seqno = request->seqno;
3219        }
3220        spin_unlock(&file_priv->mm.lock);
3221
3222        if (seqno == 0)
3223                return 0;
3224
3225        ret = 0;
3226        if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3227                /* And wait for the seqno passing without holding any locks and
3228                 * causing extra latency for others. This is safe as the irq
3229                 * generation is designed to be run atomically and so is
3230                 * lockless.
3231                 */
3232                if (ring->irq_get(ring)) {
3233                        ret = wait_event_interruptible(ring->irq_queue,
3234                                                       i915_seqno_passed(ring->get_seqno(ring), seqno)
3235                                                       || atomic_read(&dev_priv->mm.wedged));
3236                        ring->irq_put(ring);
3237
3238                        if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3239                                ret = -EIO;
3240                }
3241        }
3242
3243        if (ret == 0)
3244                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3245
3246        return ret;
3247}
3248
3249int
3250i915_gem_object_pin(struct drm_i915_gem_object *obj,
3251                    uint32_t alignment,
3252                    bool map_and_fenceable)
3253{
3254        struct drm_device *dev = obj->base.dev;
3255        struct drm_i915_private *dev_priv = dev->dev_private;
3256        int ret;
3257
3258        BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3259        WARN_ON(i915_verify_lists(dev));
3260
3261        if (obj->gtt_space != NULL) {
3262                if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3263                    (map_and_fenceable && !obj->map_and_fenceable)) {
3264                        WARN(obj->pin_count,
3265                             "bo is already pinned with incorrect alignment:"
3266                             " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3267                             " obj->map_and_fenceable=%d\n",
3268                             obj->gtt_offset, alignment,
3269                             map_and_fenceable,
3270                             obj->map_and_fenceable);
3271                        ret = i915_gem_object_unbind(obj);
3272                        if (ret)
3273                                return ret;
3274                }
3275        }
3276
3277        if (obj->gtt_space == NULL) {
3278                ret = i915_gem_object_bind_to_gtt(obj, alignment,
3279                                                  map_and_fenceable);
3280                if (ret)
3281                        return ret;
3282        }
3283
3284        if (obj->pin_count++ == 0) {
3285                if (!obj->active)
3286                        list_move_tail(&obj->mm_list,
3287                                       &dev_priv->mm.pinned_list);
3288        }
3289        obj->pin_mappable |= map_and_fenceable;
3290
3291        WARN_ON(i915_verify_lists(dev));
3292        return 0;
3293}
3294
3295void
3296i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3297{
3298        struct drm_device *dev = obj->base.dev;
3299        drm_i915_private_t *dev_priv = dev->dev_private;
3300
3301        WARN_ON(i915_verify_lists(dev));
3302        BUG_ON(obj->pin_count == 0);
3303        BUG_ON(obj->gtt_space == NULL);
3304
3305        if (--obj->pin_count == 0) {
3306                if (!obj->active)
3307                        list_move_tail(&obj->mm_list,
3308                                       &dev_priv->mm.inactive_list);
3309                obj->pin_mappable = false;
3310        }
3311        WARN_ON(i915_verify_lists(dev));
3312}
3313
3314int
3315i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3316                   struct drm_file *file)
3317{
3318        struct drm_i915_gem_pin *args = data;
3319        struct drm_i915_gem_object *obj;
3320        int ret;
3321
3322        ret = i915_mutex_lock_interruptible(dev);
3323        if (ret)
3324                return ret;
3325
3326        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3327        if (obj == NULL) {
3328                ret = -ENOENT;
3329                goto unlock;
3330        }
3331
3332        if (obj->madv != I915_MADV_WILLNEED) {
3333                DRM_ERROR("Attempting to pin a purgeable buffer\n");
3334                ret = -EINVAL;
3335                goto out;
3336        }
3337
3338        if (obj->pin_filp != NULL && obj->pin_filp != file) {
3339                DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3340                          args->handle);
3341                ret = -EINVAL;
3342                goto out;
3343        }
3344
3345        obj->user_pin_count++;
3346        obj->pin_filp = file;
3347        if (obj->user_pin_count == 1) {
3348                ret = i915_gem_object_pin(obj, args->alignment, true);
3349                if (ret)
3350                        goto out;
3351        }
3352
3353        /* XXX - flush the CPU caches for pinned objects
3354         * as the X server doesn't manage domains yet
3355         */
3356        i915_gem_object_flush_cpu_write_domain(obj);
3357        args->offset = obj->gtt_offset;
3358out:
3359        drm_gem_object_unreference(&obj->base);
3360unlock:
3361        mutex_unlock(&dev->struct_mutex);
3362        return ret;
3363}
3364
3365int
3366i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3367                     struct drm_file *file)
3368{
3369        struct drm_i915_gem_pin *args = data;
3370        struct drm_i915_gem_object *obj;
3371        int ret;
3372
3373        ret = i915_mutex_lock_interruptible(dev);
3374        if (ret)
3375                return ret;
3376
3377        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3378        if (obj == NULL) {
3379                ret = -ENOENT;
3380                goto unlock;
3381        }
3382
3383        if (obj->pin_filp != file) {
3384                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3385                          args->handle);
3386                ret = -EINVAL;
3387                goto out;
3388        }
3389        obj->user_pin_count--;
3390        if (obj->user_pin_count == 0) {
3391                obj->pin_filp = NULL;
3392                i915_gem_object_unpin(obj);
3393        }
3394
3395out:
3396        drm_gem_object_unreference(&obj->base);
3397unlock:
3398        mutex_unlock(&dev->struct_mutex);
3399        return ret;
3400}
3401
3402int
3403i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3404                    struct drm_file *file)
3405{
3406        struct drm_i915_gem_busy *args = data;
3407        struct drm_i915_gem_object *obj;
3408        int ret;
3409
3410        ret = i915_mutex_lock_interruptible(dev);
3411        if (ret)
3412                return ret;
3413
3414        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3415        if (obj == NULL) {
3416                ret = -ENOENT;
3417                goto unlock;
3418        }
3419
3420        /* Count all active objects as busy, even if they are currently not used
3421         * by the gpu. Users of this interface expect objects to eventually
3422         * become non-busy without any further actions, therefore emit any
3423         * necessary flushes here.
3424         */
3425        args->busy = obj->active;
3426        if (args->busy) {
3427                /* Unconditionally flush objects, even when the gpu still uses this
3428                 * object. Userspace calling this function indicates that it wants to
3429                 * use this buffer rather sooner than later, so issuing the required
3430                 * flush earlier is beneficial.
3431                 */
3432                if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3433                        ret = i915_gem_flush_ring(dev, obj->ring,
3434                                                  0, obj->base.write_domain);
3435                } else if (obj->ring->outstanding_lazy_request ==
3436                           obj->last_rendering_seqno) {
3437                        struct drm_i915_gem_request *request;
3438
3439                        /* This ring is not being cleared by active usage,
3440                         * so emit a request to do so.
3441                         */
3442                        request = kzalloc(sizeof(*request), GFP_KERNEL);
3443                        if (request)
3444                                ret = i915_add_request(dev,
3445                                                       NULL, request,
3446                                                       obj->ring);
3447                        else
3448                                ret = -ENOMEM;
3449                }
3450
3451                /* Update the active list for the hardware's current position.
3452                 * Otherwise this only updates on a delayed timer or when irqs
3453                 * are actually unmasked, and our working set ends up being
3454                 * larger than required.
3455                 */
3456                i915_gem_retire_requests_ring(dev, obj->ring);
3457
3458                args->busy = obj->active;
3459        }
3460
3461        drm_gem_object_unreference(&obj->base);
3462unlock:
3463        mutex_unlock(&dev->struct_mutex);
3464        return ret;
3465}
3466
3467int
3468i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3469                        struct drm_file *file_priv)
3470{
3471    return i915_gem_ring_throttle(dev, file_priv);
3472}
3473
3474int
3475i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3476                       struct drm_file *file_priv)
3477{
3478        struct drm_i915_gem_madvise *args = data;
3479        struct drm_i915_gem_object *obj;
3480        int ret;
3481
3482        switch (args->madv) {
3483        case I915_MADV_DONTNEED:
3484        case I915_MADV_WILLNEED:
3485            break;
3486        default:
3487            return -EINVAL;
3488        }
3489
3490        ret = i915_mutex_lock_interruptible(dev);
3491        if (ret)
3492                return ret;
3493
3494        obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3495        if (obj == NULL) {
3496                ret = -ENOENT;
3497                goto unlock;
3498        }
3499
3500        if (obj->pin_count) {
3501                ret = -EINVAL;
3502                goto out;
3503        }
3504
3505        if (obj->madv != __I915_MADV_PURGED)
3506                obj->madv = args->madv;
3507
3508        /* if the object is no longer bound, discard its backing storage */
3509        if (i915_gem_object_is_purgeable(obj) &&
3510            obj->gtt_space == NULL)
3511                i915_gem_object_truncate(obj);
3512
3513        args->retained = obj->madv != __I915_MADV_PURGED;
3514
3515out:
3516        drm_gem_object_unreference(&obj->base);
3517unlock:
3518        mutex_unlock(&dev->struct_mutex);
3519        return ret;
3520}
3521
3522struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3523                                                  size_t size)
3524{
3525        struct drm_i915_private *dev_priv = dev->dev_private;
3526        struct drm_i915_gem_object *obj;
3527
3528        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3529        if (obj == NULL)
3530                return NULL;
3531
3532        if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3533                kfree(obj);
3534                return NULL;
3535        }
3536
3537        i915_gem_info_add_obj(dev_priv, size);
3538
3539        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3540        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3541
3542        obj->agp_type = AGP_USER_MEMORY;
3543        obj->base.driver_private = NULL;
3544        obj->fence_reg = I915_FENCE_REG_NONE;
3545        INIT_LIST_HEAD(&obj->mm_list);
3546        INIT_LIST_HEAD(&obj->gtt_list);
3547        INIT_LIST_HEAD(&obj->ring_list);
3548        INIT_LIST_HEAD(&obj->exec_list);
3549        INIT_LIST_HEAD(&obj->gpu_write_list);
3550        obj->madv = I915_MADV_WILLNEED;
3551        /* Avoid an unnecessary call to unbind on the first bind. */
3552        obj->map_and_fenceable = true;
3553
3554        return obj;
3555}
3556
3557int i915_gem_init_object(struct drm_gem_object *obj)
3558{
3559        BUG();
3560
3561        return 0;
3562}
3563
3564static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3565{
3566        struct drm_device *dev = obj->base.dev;
3567        drm_i915_private_t *dev_priv = dev->dev_private;
3568        int ret;
3569
3570        ret = i915_gem_object_unbind(obj);
3571        if (ret == -ERESTARTSYS) {
3572                list_move(&obj->mm_list,
3573                          &dev_priv->mm.deferred_free_list);
3574                return;
3575        }
3576
3577        if (obj->base.map_list.map)
3578                i915_gem_free_mmap_offset(obj);
3579
3580        drm_gem_object_release(&obj->base);
3581        i915_gem_info_remove_obj(dev_priv, obj->base.size);
3582
3583        kfree(obj->page_cpu_valid);
3584        kfree(obj->bit_17);
3585        kfree(obj);
3586}
3587
3588void i915_gem_free_object(struct drm_gem_object *gem_obj)
3589{
3590        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3591        struct drm_device *dev = obj->base.dev;
3592
3593        trace_i915_gem_object_destroy(obj);
3594
3595        while (obj->pin_count > 0)
3596                i915_gem_object_unpin(obj);
3597
3598        if (obj->phys_obj)
3599                i915_gem_detach_phys_object(dev, obj);
3600
3601        i915_gem_free_object_tail(obj);
3602}
3603
3604int
3605i915_gem_idle(struct drm_device *dev)
3606{
3607        drm_i915_private_t *dev_priv = dev->dev_private;
3608        int ret;
3609
3610        mutex_lock(&dev->struct_mutex);
3611
3612        if (dev_priv->mm.suspended) {
3613                mutex_unlock(&dev->struct_mutex);
3614                return 0;
3615        }
3616
3617        ret = i915_gpu_idle(dev);
3618        if (ret) {
3619                mutex_unlock(&dev->struct_mutex);
3620                return ret;
3621        }
3622
3623        /* Under UMS, be paranoid and evict. */
3624        if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3625                ret = i915_gem_evict_inactive(dev, false);
3626                if (ret) {
3627                        mutex_unlock(&dev->struct_mutex);
3628                        return ret;
3629                }
3630        }
3631
3632        i915_gem_reset_fences(dev);
3633
3634        /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3635         * We need to replace this with a semaphore, or something.
3636         * And not confound mm.suspended!
3637         */
3638        dev_priv->mm.suspended = 1;
3639        del_timer_sync(&dev_priv->hangcheck_timer);
3640
3641        i915_kernel_lost_context(dev);
3642        i915_gem_cleanup_ringbuffer(dev);
3643
3644        mutex_unlock(&dev->struct_mutex);
3645
3646        /* Cancel the retire work handler, which should be idle now. */
3647        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3648
3649        return 0;
3650}
3651
3652int
3653i915_gem_init_ringbuffer(struct drm_device *dev)
3654{
3655        drm_i915_private_t *dev_priv = dev->dev_private;
3656        int ret;
3657
3658        ret = intel_init_render_ring_buffer(dev);
3659        if (ret)
3660                return ret;
3661
3662        if (HAS_BSD(dev)) {
3663                ret = intel_init_bsd_ring_buffer(dev);
3664                if (ret)
3665                        goto cleanup_render_ring;
3666        }
3667
3668        if (HAS_BLT(dev)) {
3669                ret = intel_init_blt_ring_buffer(dev);
3670                if (ret)
3671                        goto cleanup_bsd_ring;
3672        }
3673
3674        dev_priv->next_seqno = 1;
3675
3676        return 0;
3677
3678cleanup_bsd_ring:
3679        intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3680cleanup_render_ring:
3681        intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3682        return ret;
3683}
3684
3685void
3686i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3687{
3688        drm_i915_private_t *dev_priv = dev->dev_private;
3689        int i;
3690
3691        for (i = 0; i < I915_NUM_RINGS; i++)
3692                intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3693}
3694
3695int
3696i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3697                       struct drm_file *file_priv)
3698{
3699        drm_i915_private_t *dev_priv = dev->dev_private;
3700        int ret, i;
3701
3702        if (drm_core_check_feature(dev, DRIVER_MODESET))
3703                return 0;
3704
3705        if (atomic_read(&dev_priv->mm.wedged)) {
3706                DRM_ERROR("Reenabling wedged hardware, good luck\n");
3707                atomic_set(&dev_priv->mm.wedged, 0);
3708        }
3709
3710        mutex_lock(&dev->struct_mutex);
3711        dev_priv->mm.suspended = 0;
3712
3713        ret = i915_gem_init_ringbuffer(dev);
3714        if (ret != 0) {
3715                mutex_unlock(&dev->struct_mutex);
3716                return ret;
3717        }
3718
3719        BUG_ON(!list_empty(&dev_priv->mm.active_list));
3720        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3721        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3722        for (i = 0; i < I915_NUM_RINGS; i++) {
3723                BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3724                BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3725        }
3726        mutex_unlock(&dev->struct_mutex);
3727
3728        ret = drm_irq_install(dev);
3729        if (ret)
3730                goto cleanup_ringbuffer;
3731
3732        return 0;
3733
3734cleanup_ringbuffer:
3735        mutex_lock(&dev->struct_mutex);
3736        i915_gem_cleanup_ringbuffer(dev);
3737        dev_priv->mm.suspended = 1;
3738        mutex_unlock(&dev->struct_mutex);
3739
3740        return ret;
3741}
3742
3743int
3744i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3745                       struct drm_file *file_priv)
3746{
3747        if (drm_core_check_feature(dev, DRIVER_MODESET))
3748                return 0;
3749
3750        drm_irq_uninstall(dev);
3751        return i915_gem_idle(dev);
3752}
3753
3754void
3755i915_gem_lastclose(struct drm_device *dev)
3756{
3757        int ret;
3758
3759        if (drm_core_check_feature(dev, DRIVER_MODESET))
3760                return;
3761
3762        ret = i915_gem_idle(dev);
3763        if (ret)
3764                DRM_ERROR("failed to idle hardware: %d\n", ret);
3765}
3766
3767static void
3768init_ring_lists(struct intel_ring_buffer *ring)
3769{
3770        INIT_LIST_HEAD(&ring->active_list);
3771        INIT_LIST_HEAD(&ring->request_list);
3772        INIT_LIST_HEAD(&ring->gpu_write_list);
3773}
3774
3775void
3776i915_gem_load(struct drm_device *dev)
3777{
3778        int i;
3779        drm_i915_private_t *dev_priv = dev->dev_private;
3780
3781        INIT_LIST_HEAD(&dev_priv->mm.active_list);
3782        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3783        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3784        INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3785        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3786        INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3787        INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3788        for (i = 0; i < I915_NUM_RINGS; i++)
3789                init_ring_lists(&dev_priv->ring[i]);
3790        for (i = 0; i < 16; i++)
3791                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3792        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3793                          i915_gem_retire_work_handler);
3794        init_completion(&dev_priv->error_completion);
3795
3796        /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3797        if (IS_GEN3(dev)) {
3798                u32 tmp = I915_READ(MI_ARB_STATE);
3799                if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3800                        /* arb state is a masked write, so set bit + bit in mask */
3801                        tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3802                        I915_WRITE(MI_ARB_STATE, tmp);
3803                }
3804        }
3805
3806        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3807
3808        /* Old X drivers will take 0-2 for front, back, depth buffers */
3809        if (!drm_core_check_feature(dev, DRIVER_MODESET))
3810                dev_priv->fence_reg_start = 3;
3811
3812        if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3813                dev_priv->num_fence_regs = 16;
3814        else
3815                dev_priv->num_fence_regs = 8;
3816
3817        /* Initialize fence registers to zero */
3818        switch (INTEL_INFO(dev)->gen) {
3819        case 6:
3820                for (i = 0; i < 16; i++)
3821                        I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
3822                break;
3823        case 5:
3824        case 4:
3825                for (i = 0; i < 16; i++)
3826                        I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
3827                break;
3828        case 3:
3829                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3830                        for (i = 0; i < 8; i++)
3831                                I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
3832        case 2:
3833                for (i = 0; i < 8; i++)
3834                        I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
3835                break;
3836        }
3837        i915_gem_detect_bit_6_swizzle(dev);
3838        init_waitqueue_head(&dev_priv->pending_flip_queue);
3839
3840        dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3841        dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3842        register_shrinker(&dev_priv->mm.inactive_shrinker);
3843}
3844
3845/*
3846 * Create a physically contiguous memory object for this object
3847 * e.g. for cursor + overlay regs
3848 */
3849static int i915_gem_init_phys_object(struct drm_device *dev,
3850                                     int id, int size, int align)
3851{
3852        drm_i915_private_t *dev_priv = dev->dev_private;
3853        struct drm_i915_gem_phys_object *phys_obj;
3854        int ret;
3855
3856        if (dev_priv->mm.phys_objs[id - 1] || !size)
3857                return 0;
3858
3859        phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3860        if (!phys_obj)
3861                return -ENOMEM;
3862
3863        phys_obj->id = id;
3864
3865        phys_obj->handle = drm_pci_alloc(dev, size, align);
3866        if (!phys_obj->handle) {
3867                ret = -ENOMEM;
3868                goto kfree_obj;
3869        }
3870#ifdef CONFIG_X86
3871        set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3872#endif
3873
3874        dev_priv->mm.phys_objs[id - 1] = phys_obj;
3875
3876        return 0;
3877kfree_obj:
3878        kfree(phys_obj);
3879        return ret;
3880}
3881
3882static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3883{
3884        drm_i915_private_t *dev_priv = dev->dev_private;
3885        struct drm_i915_gem_phys_object *phys_obj;
3886
3887        if (!dev_priv->mm.phys_objs[id - 1])
3888                return;
3889
3890        phys_obj = dev_priv->mm.phys_objs[id - 1];
3891        if (phys_obj->cur_obj) {
3892                i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3893        }
3894
3895#ifdef CONFIG_X86
3896        set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3897#endif
3898        drm_pci_free(dev, phys_obj->handle);
3899        kfree(phys_obj);
3900        dev_priv->mm.phys_objs[id - 1] = NULL;
3901}
3902
3903void i915_gem_free_all_phys_object(struct drm_device *dev)
3904{
3905        int i;
3906
3907        for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3908                i915_gem_free_phys_object(dev, i);
3909}
3910
3911void i915_gem_detach_phys_object(struct drm_device *dev,
3912                                 struct drm_i915_gem_object *obj)
3913{
3914        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3915        char *vaddr;
3916        int i;
3917        int page_count;
3918
3919        if (!obj->phys_obj)
3920                return;
3921        vaddr = obj->phys_obj->handle->vaddr;
3922
3923        page_count = obj->base.size / PAGE_SIZE;
3924        for (i = 0; i < page_count; i++) {
3925                struct page *page = read_cache_page_gfp(mapping, i,
3926                                                        GFP_HIGHUSER | __GFP_RECLAIMABLE);
3927                if (!IS_ERR(page)) {
3928                        char *dst = kmap_atomic(page);
3929                        memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3930                        kunmap_atomic(dst);
3931
3932                        drm_clflush_pages(&page, 1);
3933
3934                        set_page_dirty(page);
3935                        mark_page_accessed(page);
3936                        page_cache_release(page);
3937                }
3938        }
3939        intel_gtt_chipset_flush();
3940
3941        obj->phys_obj->cur_obj = NULL;
3942        obj->phys_obj = NULL;
3943}
3944
3945int
3946i915_gem_attach_phys_object(struct drm_device *dev,
3947                            struct drm_i915_gem_object *obj,
3948                            int id,
3949                            int align)
3950{
3951        struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3952        drm_i915_private_t *dev_priv = dev->dev_private;
3953        int ret = 0;
3954        int page_count;
3955        int i;
3956
3957        if (id > I915_MAX_PHYS_OBJECT)
3958                return -EINVAL;
3959
3960        if (obj->phys_obj) {
3961                if (obj->phys_obj->id == id)
3962                        return 0;
3963                i915_gem_detach_phys_object(dev, obj);
3964        }
3965
3966        /* create a new object */
3967        if (!dev_priv->mm.phys_objs[id - 1]) {
3968                ret = i915_gem_init_phys_object(dev, id,
3969                                                obj->base.size, align);
3970                if (ret) {
3971                        DRM_ERROR("failed to init phys object %d size: %zu\n",
3972                                  id, obj->base.size);
3973                        return ret;
3974                }
3975        }
3976
3977        /* bind to the object */
3978        obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3979        obj->phys_obj->cur_obj = obj;
3980
3981        page_count = obj->base.size / PAGE_SIZE;
3982
3983        for (i = 0; i < page_count; i++) {
3984                struct page *page;
3985                char *dst, *src;
3986
3987                page = read_cache_page_gfp(mapping, i,
3988                                           GFP_HIGHUSER | __GFP_RECLAIMABLE);
3989                if (IS_ERR(page))
3990                        return PTR_ERR(page);
3991
3992                src = kmap_atomic(page);
3993                dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3994                memcpy(dst, src, PAGE_SIZE);
3995                kunmap_atomic(src);
3996
3997                mark_page_accessed(page);
3998                page_cache_release(page);
3999        }
4000
4001        return 0;
4002}
4003
4004static int
4005i915_gem_phys_pwrite(struct drm_device *dev,
4006                     struct drm_i915_gem_object *obj,
4007                     struct drm_i915_gem_pwrite *args,
4008                     struct drm_file *file_priv)
4009{
4010        void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4011        char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4012
4013        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4014                unsigned long unwritten;
4015
4016                /* The physical object once assigned is fixed for the lifetime
4017                 * of the obj, so we can safely drop the lock and continue
4018                 * to access vaddr.
4019                 */
4020                mutex_unlock(&dev->struct_mutex);
4021                unwritten = copy_from_user(vaddr, user_data, args->size);
4022                mutex_lock(&dev->struct_mutex);
4023                if (unwritten)
4024                        return -EFAULT;
4025        }
4026
4027        intel_gtt_chipset_flush();
4028        return 0;
4029}
4030
4031void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4032{
4033        struct drm_i915_file_private *file_priv = file->driver_priv;
4034
4035        /* Clean up our request list when the client is going away, so that
4036         * later retire_requests won't dereference our soon-to-be-gone
4037         * file_priv.
4038         */
4039        spin_lock(&file_priv->mm.lock);
4040        while (!list_empty(&file_priv->mm.request_list)) {
4041                struct drm_i915_gem_request *request;
4042
4043                request = list_first_entry(&file_priv->mm.request_list,
4044                                           struct drm_i915_gem_request,
4045                                           client_list);
4046                list_del(&request->client_list);
4047                request->file_priv = NULL;
4048        }
4049        spin_unlock(&file_priv->mm.lock);
4050}
4051
4052static int
4053i915_gpu_is_active(struct drm_device *dev)
4054{
4055        drm_i915_private_t *dev_priv = dev->dev_private;
4056        int lists_empty;
4057
4058        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4059                      list_empty(&dev_priv->mm.active_list);
4060
4061        return !lists_empty;
4062}
4063
4064static int
4065i915_gem_inactive_shrink(struct shrinker *shrinker,
4066                         int nr_to_scan,
4067                         gfp_t gfp_mask)
4068{
4069        struct drm_i915_private *dev_priv =
4070                container_of(shrinker,
4071                             struct drm_i915_private,
4072                             mm.inactive_shrinker);
4073        struct drm_device *dev = dev_priv->dev;
4074        struct drm_i915_gem_object *obj, *next;
4075        int cnt;
4076
4077        if (!mutex_trylock(&dev->struct_mutex))
4078                return 0;
4079
4080        /* "fast-path" to count number of available objects */
4081        if (nr_to_scan == 0) {
4082                cnt = 0;
4083                list_for_each_entry(obj,
4084                                    &dev_priv->mm.inactive_list,
4085                                    mm_list)
4086                        cnt++;
4087                mutex_unlock(&dev->struct_mutex);
4088                return cnt / 100 * sysctl_vfs_cache_pressure;
4089        }
4090
4091rescan:
4092        /* first scan for clean buffers */
4093        i915_gem_retire_requests(dev);
4094
4095        list_for_each_entry_safe(obj, next,
4096                                 &dev_priv->mm.inactive_list,
4097                                 mm_list) {
4098                if (i915_gem_object_is_purgeable(obj)) {
4099                        if (i915_gem_object_unbind(obj) == 0 &&
4100                            --nr_to_scan == 0)
4101                                break;
4102                }
4103        }
4104
4105        /* second pass, evict/count anything still on the inactive list */
4106        cnt = 0;
4107        list_for_each_entry_safe(obj, next,
4108                                 &dev_priv->mm.inactive_list,
4109                                 mm_list) {
4110                if (nr_to_scan &&
4111                    i915_gem_object_unbind(obj) == 0)
4112                        nr_to_scan--;
4113                else
4114                        cnt++;
4115        }
4116
4117        if (nr_to_scan && i915_gpu_is_active(dev)) {
4118                /*
4119                 * We are desperate for pages, so as a last resort, wait
4120                 * for the GPU to finish and discard whatever we can.
4121                 * This has a dramatic impact to reduce the number of
4122                 * OOM-killer events whilst running the GPU aggressively.
4123                 */
4124                if (i915_gpu_idle(dev) == 0)
4125                        goto rescan;
4126        }
4127        mutex_unlock(&dev->struct_mutex);
4128        return cnt / 100 * sysctl_vfs_cache_pressure;
4129}
4130