linux/drivers/gpu/drm/i915/i915_gem_execbuffer.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008,2010 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Chris Wilson <chris@chris-wilson.co.uk>
  26 *
  27 */
  28
  29#include <drm/drmP.h>
  30#include <drm/i915_drm.h>
  31#include "i915_drv.h"
  32#include "i915_trace.h"
  33#include "intel_drv.h"
  34#include <linux/dma_remapping.h>
  35#include <linux/uaccess.h>
  36
  37#define  __EXEC_OBJECT_HAS_PIN (1<<31)
  38#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
  39#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
  40#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
  41
  42#define BATCH_OFFSET_BIAS (256*1024)
  43
  44struct eb_vmas {
  45        struct list_head vmas;
  46        int and;
  47        union {
  48                struct i915_vma *lut[0];
  49                struct hlist_head buckets[0];
  50        };
  51};
  52
  53static struct eb_vmas *
  54eb_create(struct drm_i915_gem_execbuffer2 *args)
  55{
  56        struct eb_vmas *eb = NULL;
  57
  58        if (args->flags & I915_EXEC_HANDLE_LUT) {
  59                unsigned size = args->buffer_count;
  60                size *= sizeof(struct i915_vma *);
  61                size += sizeof(struct eb_vmas);
  62                eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  63        }
  64
  65        if (eb == NULL) {
  66                unsigned size = args->buffer_count;
  67                unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  68                BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  69                while (count > 2*size)
  70                        count >>= 1;
  71                eb = kzalloc(count*sizeof(struct hlist_head) +
  72                             sizeof(struct eb_vmas),
  73                             GFP_TEMPORARY);
  74                if (eb == NULL)
  75                        return eb;
  76
  77                eb->and = count - 1;
  78        } else
  79                eb->and = -args->buffer_count;
  80
  81        INIT_LIST_HEAD(&eb->vmas);
  82        return eb;
  83}
  84
  85static void
  86eb_reset(struct eb_vmas *eb)
  87{
  88        if (eb->and >= 0)
  89                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  90}
  91
  92static int
  93eb_lookup_vmas(struct eb_vmas *eb,
  94               struct drm_i915_gem_exec_object2 *exec,
  95               const struct drm_i915_gem_execbuffer2 *args,
  96               struct i915_address_space *vm,
  97               struct drm_file *file)
  98{
  99        struct drm_i915_gem_object *obj;
 100        struct list_head objects;
 101        int i, ret;
 102
 103        INIT_LIST_HEAD(&objects);
 104        spin_lock(&file->table_lock);
 105        /* Grab a reference to the object and release the lock so we can lookup
 106         * or create the VMA without using GFP_ATOMIC */
 107        for (i = 0; i < args->buffer_count; i++) {
 108                obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 109                if (obj == NULL) {
 110                        spin_unlock(&file->table_lock);
 111                        DRM_DEBUG("Invalid object handle %d at index %d\n",
 112                                   exec[i].handle, i);
 113                        ret = -ENOENT;
 114                        goto err;
 115                }
 116
 117                if (!list_empty(&obj->obj_exec_link)) {
 118                        spin_unlock(&file->table_lock);
 119                        DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 120                                   obj, exec[i].handle, i);
 121                        ret = -EINVAL;
 122                        goto err;
 123                }
 124
 125                drm_gem_object_reference(&obj->base);
 126                list_add_tail(&obj->obj_exec_link, &objects);
 127        }
 128        spin_unlock(&file->table_lock);
 129
 130        i = 0;
 131        while (!list_empty(&objects)) {
 132                struct i915_vma *vma;
 133
 134                obj = list_first_entry(&objects,
 135                                       struct drm_i915_gem_object,
 136                                       obj_exec_link);
 137
 138                /*
 139                 * NOTE: We can leak any vmas created here when something fails
 140                 * later on. But that's no issue since vma_unbind can deal with
 141                 * vmas which are not actually bound. And since only
 142                 * lookup_or_create exists as an interface to get at the vma
 143                 * from the (obj, vm) we don't run the risk of creating
 144                 * duplicated vmas for the same vm.
 145                 */
 146                vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
 147                if (IS_ERR(vma)) {
 148                        DRM_DEBUG("Failed to lookup VMA\n");
 149                        ret = PTR_ERR(vma);
 150                        goto err;
 151                }
 152
 153                /* Transfer ownership from the objects list to the vmas list. */
 154                list_add_tail(&vma->exec_list, &eb->vmas);
 155                list_del_init(&obj->obj_exec_link);
 156
 157                vma->exec_entry = &exec[i];
 158                if (eb->and < 0) {
 159                        eb->lut[i] = vma;
 160                } else {
 161                        uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
 162                        vma->exec_handle = handle;
 163                        hlist_add_head(&vma->exec_node,
 164                                       &eb->buckets[handle & eb->and]);
 165                }
 166                ++i;
 167        }
 168
 169        return 0;
 170
 171
 172err:
 173        while (!list_empty(&objects)) {
 174                obj = list_first_entry(&objects,
 175                                       struct drm_i915_gem_object,
 176                                       obj_exec_link);
 177                list_del_init(&obj->obj_exec_link);
 178                drm_gem_object_unreference(&obj->base);
 179        }
 180        /*
 181         * Objects already transfered to the vmas list will be unreferenced by
 182         * eb_destroy.
 183         */
 184
 185        return ret;
 186}
 187
 188static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 189{
 190        if (eb->and < 0) {
 191                if (handle >= -eb->and)
 192                        return NULL;
 193                return eb->lut[handle];
 194        } else {
 195                struct hlist_head *head;
 196                struct hlist_node *node;
 197
 198                head = &eb->buckets[handle & eb->and];
 199                hlist_for_each(node, head) {
 200                        struct i915_vma *vma;
 201
 202                        vma = hlist_entry(node, struct i915_vma, exec_node);
 203                        if (vma->exec_handle == handle)
 204                                return vma;
 205                }
 206                return NULL;
 207        }
 208}
 209
 210static void
 211i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 212{
 213        struct drm_i915_gem_exec_object2 *entry;
 214        struct drm_i915_gem_object *obj = vma->obj;
 215
 216        if (!drm_mm_node_allocated(&vma->node))
 217                return;
 218
 219        entry = vma->exec_entry;
 220
 221        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 222                i915_gem_object_unpin_fence(obj);
 223
 224        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
 225                vma->pin_count--;
 226
 227        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 228}
 229
 230static void eb_destroy(struct eb_vmas *eb)
 231{
 232        while (!list_empty(&eb->vmas)) {
 233                struct i915_vma *vma;
 234
 235                vma = list_first_entry(&eb->vmas,
 236                                       struct i915_vma,
 237                                       exec_list);
 238                list_del_init(&vma->exec_list);
 239                i915_gem_execbuffer_unreserve_vma(vma);
 240                drm_gem_object_unreference(&vma->obj->base);
 241        }
 242        kfree(eb);
 243}
 244
 245static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 246{
 247        return (HAS_LLC(obj->base.dev) ||
 248                obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 249                obj->cache_level != I915_CACHE_NONE);
 250}
 251
 252static int
 253relocate_entry_cpu(struct drm_i915_gem_object *obj,
 254                   struct drm_i915_gem_relocation_entry *reloc,
 255                   uint64_t target_offset)
 256{
 257        struct drm_device *dev = obj->base.dev;
 258        uint32_t page_offset = offset_in_page(reloc->offset);
 259        uint64_t delta = reloc->delta + target_offset;
 260        char *vaddr;
 261        int ret;
 262
 263        ret = i915_gem_object_set_to_cpu_domain(obj, true);
 264        if (ret)
 265                return ret;
 266
 267        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 268                                reloc->offset >> PAGE_SHIFT));
 269        *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
 270
 271        if (INTEL_INFO(dev)->gen >= 8) {
 272                page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 273
 274                if (page_offset == 0) {
 275                        kunmap_atomic(vaddr);
 276                        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 277                            (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
 278                }
 279
 280                *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
 281        }
 282
 283        kunmap_atomic(vaddr);
 284
 285        return 0;
 286}
 287
 288static int
 289relocate_entry_gtt(struct drm_i915_gem_object *obj,
 290                   struct drm_i915_gem_relocation_entry *reloc,
 291                   uint64_t target_offset)
 292{
 293        struct drm_device *dev = obj->base.dev;
 294        struct drm_i915_private *dev_priv = dev->dev_private;
 295        uint64_t delta = reloc->delta + target_offset;
 296        uint64_t offset;
 297        void __iomem *reloc_page;
 298        int ret;
 299
 300        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 301        if (ret)
 302                return ret;
 303
 304        ret = i915_gem_object_put_fence(obj);
 305        if (ret)
 306                return ret;
 307
 308        /* Map the page containing the relocation we're going to perform.  */
 309        offset = i915_gem_obj_ggtt_offset(obj);
 310        offset += reloc->offset;
 311        reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 312                                              offset & PAGE_MASK);
 313        iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 314
 315        if (INTEL_INFO(dev)->gen >= 8) {
 316                offset += sizeof(uint32_t);
 317
 318                if (offset_in_page(offset) == 0) {
 319                        io_mapping_unmap_atomic(reloc_page);
 320                        reloc_page =
 321                                io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 322                                                         offset);
 323                }
 324
 325                iowrite32(upper_32_bits(delta),
 326                          reloc_page + offset_in_page(offset));
 327        }
 328
 329        io_mapping_unmap_atomic(reloc_page);
 330
 331        return 0;
 332}
 333
 334static void
 335clflush_write32(void *addr, uint32_t value)
 336{
 337        /* This is not a fast path, so KISS. */
 338        drm_clflush_virt_range(addr, sizeof(uint32_t));
 339        *(uint32_t *)addr = value;
 340        drm_clflush_virt_range(addr, sizeof(uint32_t));
 341}
 342
 343static int
 344relocate_entry_clflush(struct drm_i915_gem_object *obj,
 345                       struct drm_i915_gem_relocation_entry *reloc,
 346                       uint64_t target_offset)
 347{
 348        struct drm_device *dev = obj->base.dev;
 349        uint32_t page_offset = offset_in_page(reloc->offset);
 350        uint64_t delta = (int)reloc->delta + target_offset;
 351        char *vaddr;
 352        int ret;
 353
 354        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 355        if (ret)
 356                return ret;
 357
 358        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 359                                reloc->offset >> PAGE_SHIFT));
 360        clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 361
 362        if (INTEL_INFO(dev)->gen >= 8) {
 363                page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 364
 365                if (page_offset == 0) {
 366                        kunmap_atomic(vaddr);
 367                        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 368                            (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
 369                }
 370
 371                clflush_write32(vaddr + page_offset, upper_32_bits(delta));
 372        }
 373
 374        kunmap_atomic(vaddr);
 375
 376        return 0;
 377}
 378
 379static int
 380i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 381                                   struct eb_vmas *eb,
 382                                   struct drm_i915_gem_relocation_entry *reloc)
 383{
 384        struct drm_device *dev = obj->base.dev;
 385        struct drm_gem_object *target_obj;
 386        struct drm_i915_gem_object *target_i915_obj;
 387        struct i915_vma *target_vma;
 388        uint64_t target_offset;
 389        int ret;
 390
 391        /* we've already hold a reference to all valid objects */
 392        target_vma = eb_get_vma(eb, reloc->target_handle);
 393        if (unlikely(target_vma == NULL))
 394                return -ENOENT;
 395        target_i915_obj = target_vma->obj;
 396        target_obj = &target_vma->obj->base;
 397
 398        target_offset = target_vma->node.start;
 399
 400        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 401         * pipe_control writes because the gpu doesn't properly redirect them
 402         * through the ppgtt for non_secure batchbuffers. */
 403        if (unlikely(IS_GEN6(dev) &&
 404            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
 405                ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
 406                                    PIN_GLOBAL);
 407                if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
 408                        return ret;
 409        }
 410
 411        /* Validate that the target is in a valid r/w GPU domain */
 412        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 413                DRM_DEBUG("reloc with multiple write domains: "
 414                          "obj %p target %d offset %d "
 415                          "read %08x write %08x",
 416                          obj, reloc->target_handle,
 417                          (int) reloc->offset,
 418                          reloc->read_domains,
 419                          reloc->write_domain);
 420                return -EINVAL;
 421        }
 422        if (unlikely((reloc->write_domain | reloc->read_domains)
 423                     & ~I915_GEM_GPU_DOMAINS)) {
 424                DRM_DEBUG("reloc with read/write non-GPU domains: "
 425                          "obj %p target %d offset %d "
 426                          "read %08x write %08x",
 427                          obj, reloc->target_handle,
 428                          (int) reloc->offset,
 429                          reloc->read_domains,
 430                          reloc->write_domain);
 431                return -EINVAL;
 432        }
 433
 434        target_obj->pending_read_domains |= reloc->read_domains;
 435        target_obj->pending_write_domain |= reloc->write_domain;
 436
 437        /* If the relocation already has the right value in it, no
 438         * more work needs to be done.
 439         */
 440        if (target_offset == reloc->presumed_offset)
 441                return 0;
 442
 443        /* Check that the relocation address is valid... */
 444        if (unlikely(reloc->offset >
 445                obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
 446                DRM_DEBUG("Relocation beyond object bounds: "
 447                          "obj %p target %d offset %d size %d.\n",
 448                          obj, reloc->target_handle,
 449                          (int) reloc->offset,
 450                          (int) obj->base.size);
 451                return -EINVAL;
 452        }
 453        if (unlikely(reloc->offset & 3)) {
 454                DRM_DEBUG("Relocation not 4-byte aligned: "
 455                          "obj %p target %d offset %d.\n",
 456                          obj, reloc->target_handle,
 457                          (int) reloc->offset);
 458                return -EINVAL;
 459        }
 460
 461        /* We can't wait for rendering with pagefaults disabled */
 462        if (obj->active && pagefault_disabled())
 463                return -EFAULT;
 464
 465        if (use_cpu_reloc(obj))
 466                ret = relocate_entry_cpu(obj, reloc, target_offset);
 467        else if (obj->map_and_fenceable)
 468                ret = relocate_entry_gtt(obj, reloc, target_offset);
 469        else if (cpu_has_clflush)
 470                ret = relocate_entry_clflush(obj, reloc, target_offset);
 471        else {
 472                WARN_ONCE(1, "Impossible case in relocation handling\n");
 473                ret = -ENODEV;
 474        }
 475
 476        if (ret)
 477                return ret;
 478
 479        /* and update the user's relocation entry */
 480        reloc->presumed_offset = target_offset;
 481
 482        return 0;
 483}
 484
 485static int
 486i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 487                                 struct eb_vmas *eb)
 488{
 489#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 490        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 491        struct drm_i915_gem_relocation_entry __user *user_relocs;
 492        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 493        int remain, ret;
 494
 495        user_relocs = to_user_ptr(entry->relocs_ptr);
 496
 497        remain = entry->relocation_count;
 498        while (remain) {
 499                struct drm_i915_gem_relocation_entry *r = stack_reloc;
 500                int count = remain;
 501                if (count > ARRAY_SIZE(stack_reloc))
 502                        count = ARRAY_SIZE(stack_reloc);
 503                remain -= count;
 504
 505                if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
 506                        return -EFAULT;
 507
 508                do {
 509                        u64 offset = r->presumed_offset;
 510
 511                        ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
 512                        if (ret)
 513                                return ret;
 514
 515                        if (r->presumed_offset != offset &&
 516                            __copy_to_user_inatomic(&user_relocs->presumed_offset,
 517                                                    &r->presumed_offset,
 518                                                    sizeof(r->presumed_offset))) {
 519                                return -EFAULT;
 520                        }
 521
 522                        user_relocs++;
 523                        r++;
 524                } while (--count);
 525        }
 526
 527        return 0;
 528#undef N_RELOC
 529}
 530
 531static int
 532i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 533                                      struct eb_vmas *eb,
 534                                      struct drm_i915_gem_relocation_entry *relocs)
 535{
 536        const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 537        int i, ret;
 538
 539        for (i = 0; i < entry->relocation_count; i++) {
 540                ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
 541                if (ret)
 542                        return ret;
 543        }
 544
 545        return 0;
 546}
 547
 548static int
 549i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 550{
 551        struct i915_vma *vma;
 552        int ret = 0;
 553
 554        /* This is the fast path and we cannot handle a pagefault whilst
 555         * holding the struct mutex lest the user pass in the relocations
 556         * contained within a mmaped bo. For in such a case we, the page
 557         * fault handler would call i915_gem_fault() and we would try to
 558         * acquire the struct mutex again. Obviously this is bad and so
 559         * lockdep complains vehemently.
 560         */
 561        pagefault_disable();
 562        list_for_each_entry(vma, &eb->vmas, exec_list) {
 563                ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 564                if (ret)
 565                        break;
 566        }
 567        pagefault_enable();
 568
 569        return ret;
 570}
 571
 572static bool only_mappable_for_reloc(unsigned int flags)
 573{
 574        return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
 575                __EXEC_OBJECT_NEEDS_MAP;
 576}
 577
 578static int
 579i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 580                                struct intel_engine_cs *ring,
 581                                bool *need_reloc)
 582{
 583        struct drm_i915_gem_object *obj = vma->obj;
 584        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 585        uint64_t flags;
 586        int ret;
 587
 588        flags = PIN_USER;
 589        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
 590                flags |= PIN_GLOBAL;
 591
 592        if (!drm_mm_node_allocated(&vma->node)) {
 593                if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
 594                        flags |= PIN_GLOBAL | PIN_MAPPABLE;
 595                if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
 596                        flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
 597        }
 598
 599        ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
 600        if ((ret == -ENOSPC  || ret == -E2BIG) &&
 601            only_mappable_for_reloc(entry->flags))
 602                ret = i915_gem_object_pin(obj, vma->vm,
 603                                          entry->alignment,
 604                                          flags & ~PIN_MAPPABLE);
 605        if (ret)
 606                return ret;
 607
 608        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 609
 610        if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 611                ret = i915_gem_object_get_fence(obj);
 612                if (ret)
 613                        return ret;
 614
 615                if (i915_gem_object_pin_fence(obj))
 616                        entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 617        }
 618
 619        if (entry->offset != vma->node.start) {
 620                entry->offset = vma->node.start;
 621                *need_reloc = true;
 622        }
 623
 624        if (entry->flags & EXEC_OBJECT_WRITE) {
 625                obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
 626                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
 627        }
 628
 629        return 0;
 630}
 631
 632static bool
 633need_reloc_mappable(struct i915_vma *vma)
 634{
 635        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 636
 637        if (entry->relocation_count == 0)
 638                return false;
 639
 640        if (!i915_is_ggtt(vma->vm))
 641                return false;
 642
 643        /* See also use_cpu_reloc() */
 644        if (HAS_LLC(vma->obj->base.dev))
 645                return false;
 646
 647        if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
 648                return false;
 649
 650        return true;
 651}
 652
 653static bool
 654eb_vma_misplaced(struct i915_vma *vma)
 655{
 656        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 657        struct drm_i915_gem_object *obj = vma->obj;
 658
 659        WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
 660               !i915_is_ggtt(vma->vm));
 661
 662        if (entry->alignment &&
 663            vma->node.start & (entry->alignment - 1))
 664                return true;
 665
 666        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
 667            vma->node.start < BATCH_OFFSET_BIAS)
 668                return true;
 669
 670        /* avoid costly ping-pong once a batch bo ended up non-mappable */
 671        if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
 672                return !only_mappable_for_reloc(entry->flags);
 673
 674        return false;
 675}
 676
 677static int
 678i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 679                            struct list_head *vmas,
 680                            bool *need_relocs)
 681{
 682        struct drm_i915_gem_object *obj;
 683        struct i915_vma *vma;
 684        struct i915_address_space *vm;
 685        struct list_head ordered_vmas;
 686        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 687        int retry;
 688
 689        i915_gem_retire_requests_ring(ring);
 690
 691        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 692
 693        INIT_LIST_HEAD(&ordered_vmas);
 694        while (!list_empty(vmas)) {
 695                struct drm_i915_gem_exec_object2 *entry;
 696                bool need_fence, need_mappable;
 697
 698                vma = list_first_entry(vmas, struct i915_vma, exec_list);
 699                obj = vma->obj;
 700                entry = vma->exec_entry;
 701
 702                if (!has_fenced_gpu_access)
 703                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
 704                need_fence =
 705                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 706                        obj->tiling_mode != I915_TILING_NONE;
 707                need_mappable = need_fence || need_reloc_mappable(vma);
 708
 709                if (need_mappable) {
 710                        entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
 711                        list_move(&vma->exec_list, &ordered_vmas);
 712                } else
 713                        list_move_tail(&vma->exec_list, &ordered_vmas);
 714
 715                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 716                obj->base.pending_write_domain = 0;
 717        }
 718        list_splice(&ordered_vmas, vmas);
 719
 720        /* Attempt to pin all of the buffers into the GTT.
 721         * This is done in 3 phases:
 722         *
 723         * 1a. Unbind all objects that do not match the GTT constraints for
 724         *     the execbuffer (fenceable, mappable, alignment etc).
 725         * 1b. Increment pin count for already bound objects.
 726         * 2.  Bind new objects.
 727         * 3.  Decrement pin count.
 728         *
 729         * This avoid unnecessary unbinding of later objects in order to make
 730         * room for the earlier objects *unless* we need to defragment.
 731         */
 732        retry = 0;
 733        do {
 734                int ret = 0;
 735
 736                /* Unbind any ill-fitting objects or pin. */
 737                list_for_each_entry(vma, vmas, exec_list) {
 738                        if (!drm_mm_node_allocated(&vma->node))
 739                                continue;
 740
 741                        if (eb_vma_misplaced(vma))
 742                                ret = i915_vma_unbind(vma);
 743                        else
 744                                ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 745                        if (ret)
 746                                goto err;
 747                }
 748
 749                /* Bind fresh objects */
 750                list_for_each_entry(vma, vmas, exec_list) {
 751                        if (drm_mm_node_allocated(&vma->node))
 752                                continue;
 753
 754                        ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 755                        if (ret)
 756                                goto err;
 757                }
 758
 759err:
 760                if (ret != -ENOSPC || retry++)
 761                        return ret;
 762
 763                /* Decrement pin count for bound objects */
 764                list_for_each_entry(vma, vmas, exec_list)
 765                        i915_gem_execbuffer_unreserve_vma(vma);
 766
 767                ret = i915_gem_evict_vm(vm, true);
 768                if (ret)
 769                        return ret;
 770        } while (1);
 771}
 772
 773static int
 774i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 775                                  struct drm_i915_gem_execbuffer2 *args,
 776                                  struct drm_file *file,
 777                                  struct intel_engine_cs *ring,
 778                                  struct eb_vmas *eb,
 779                                  struct drm_i915_gem_exec_object2 *exec)
 780{
 781        struct drm_i915_gem_relocation_entry *reloc;
 782        struct i915_address_space *vm;
 783        struct i915_vma *vma;
 784        bool need_relocs;
 785        int *reloc_offset;
 786        int i, total, ret;
 787        unsigned count = args->buffer_count;
 788
 789        vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 790
 791        /* We may process another execbuffer during the unlock... */
 792        while (!list_empty(&eb->vmas)) {
 793                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
 794                list_del_init(&vma->exec_list);
 795                i915_gem_execbuffer_unreserve_vma(vma);
 796                drm_gem_object_unreference(&vma->obj->base);
 797        }
 798
 799        mutex_unlock(&dev->struct_mutex);
 800
 801        total = 0;
 802        for (i = 0; i < count; i++)
 803                total += exec[i].relocation_count;
 804
 805        reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
 806        reloc = drm_malloc_ab(total, sizeof(*reloc));
 807        if (reloc == NULL || reloc_offset == NULL) {
 808                drm_free_large(reloc);
 809                drm_free_large(reloc_offset);
 810                mutex_lock(&dev->struct_mutex);
 811                return -ENOMEM;
 812        }
 813
 814        total = 0;
 815        for (i = 0; i < count; i++) {
 816                struct drm_i915_gem_relocation_entry __user *user_relocs;
 817                u64 invalid_offset = (u64)-1;
 818                int j;
 819
 820                user_relocs = to_user_ptr(exec[i].relocs_ptr);
 821
 822                if (copy_from_user(reloc+total, user_relocs,
 823                                   exec[i].relocation_count * sizeof(*reloc))) {
 824                        ret = -EFAULT;
 825                        mutex_lock(&dev->struct_mutex);
 826                        goto err;
 827                }
 828
 829                /* As we do not update the known relocation offsets after
 830                 * relocating (due to the complexities in lock handling),
 831                 * we need to mark them as invalid now so that we force the
 832                 * relocation processing next time. Just in case the target
 833                 * object is evicted and then rebound into its old
 834                 * presumed_offset before the next execbuffer - if that
 835                 * happened we would make the mistake of assuming that the
 836                 * relocations were valid.
 837                 */
 838                for (j = 0; j < exec[i].relocation_count; j++) {
 839                        if (__copy_to_user(&user_relocs[j].presumed_offset,
 840                                           &invalid_offset,
 841                                           sizeof(invalid_offset))) {
 842                                ret = -EFAULT;
 843                                mutex_lock(&dev->struct_mutex);
 844                                goto err;
 845                        }
 846                }
 847
 848                reloc_offset[i] = total;
 849                total += exec[i].relocation_count;
 850        }
 851
 852        ret = i915_mutex_lock_interruptible(dev);
 853        if (ret) {
 854                mutex_lock(&dev->struct_mutex);
 855                goto err;
 856        }
 857
 858        /* reacquire the objects */
 859        eb_reset(eb);
 860        ret = eb_lookup_vmas(eb, exec, args, vm, file);
 861        if (ret)
 862                goto err;
 863
 864        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
 865        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 866        if (ret)
 867                goto err;
 868
 869        list_for_each_entry(vma, &eb->vmas, exec_list) {
 870                int offset = vma->exec_entry - exec;
 871                ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
 872                                                            reloc + reloc_offset[offset]);
 873                if (ret)
 874                        goto err;
 875        }
 876
 877        /* Leave the user relocations as are, this is the painfully slow path,
 878         * and we want to avoid the complication of dropping the lock whilst
 879         * having buffers reserved in the aperture and so causing spurious
 880         * ENOSPC for random operations.
 881         */
 882
 883err:
 884        drm_free_large(reloc);
 885        drm_free_large(reloc_offset);
 886        return ret;
 887}
 888
 889static int
 890i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 891                                struct list_head *vmas)
 892{
 893        const unsigned other_rings = ~intel_ring_flag(ring);
 894        struct i915_vma *vma;
 895        uint32_t flush_domains = 0;
 896        bool flush_chipset = false;
 897        int ret;
 898
 899        list_for_each_entry(vma, vmas, exec_list) {
 900                struct drm_i915_gem_object *obj = vma->obj;
 901
 902                if (obj->active & other_rings) {
 903                        ret = i915_gem_object_sync(obj, ring);
 904                        if (ret)
 905                                return ret;
 906                }
 907
 908                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 909                        flush_chipset |= i915_gem_clflush_object(obj, false);
 910
 911                flush_domains |= obj->base.write_domain;
 912        }
 913
 914        if (flush_chipset)
 915                i915_gem_chipset_flush(ring->dev);
 916
 917        if (flush_domains & I915_GEM_DOMAIN_GTT)
 918                wmb();
 919
 920        /* Unconditionally invalidate gpu caches and ensure that we do flush
 921         * any residual writes from the previous batch.
 922         */
 923        return intel_ring_invalidate_all_caches(ring);
 924}
 925
 926static bool
 927i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 928{
 929        if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
 930                return false;
 931
 932        return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
 933}
 934
 935static int
 936validate_exec_list(struct drm_device *dev,
 937                   struct drm_i915_gem_exec_object2 *exec,
 938                   int count)
 939{
 940        unsigned relocs_total = 0;
 941        unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 942        unsigned invalid_flags;
 943        int i;
 944
 945        invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
 946        if (USES_FULL_PPGTT(dev))
 947                invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
 948
 949        for (i = 0; i < count; i++) {
 950                char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
 951                int length; /* limited by fault_in_pages_readable() */
 952
 953                if (exec[i].flags & invalid_flags)
 954                        return -EINVAL;
 955
 956                /* First check for malicious input causing overflow in
 957                 * the worst case where we need to allocate the entire
 958                 * relocation tree as a single array.
 959                 */
 960                if (exec[i].relocation_count > relocs_max - relocs_total)
 961                        return -EINVAL;
 962                relocs_total += exec[i].relocation_count;
 963
 964                length = exec[i].relocation_count *
 965                        sizeof(struct drm_i915_gem_relocation_entry);
 966                /*
 967                 * We must check that the entire relocation array is safe
 968                 * to read, but since we may need to update the presumed
 969                 * offsets during execution, check for full write access.
 970                 */
 971                if (!access_ok(VERIFY_WRITE, ptr, length))
 972                        return -EFAULT;
 973
 974                if (likely(!i915.prefault_disable)) {
 975                        if (fault_in_multipages_readable(ptr, length))
 976                                return -EFAULT;
 977                }
 978        }
 979
 980        return 0;
 981}
 982
 983static struct intel_context *
 984i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 985                          struct intel_engine_cs *ring, const u32 ctx_id)
 986{
 987        struct intel_context *ctx = NULL;
 988        struct i915_ctx_hang_stats *hs;
 989
 990        if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
 991                return ERR_PTR(-EINVAL);
 992
 993        ctx = i915_gem_context_get(file->driver_priv, ctx_id);
 994        if (IS_ERR(ctx))
 995                return ctx;
 996
 997        hs = &ctx->hang_stats;
 998        if (hs->banned) {
 999                DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1000                return ERR_PTR(-EIO);
1001        }
1002
1003        if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1004                int ret = intel_lr_context_deferred_create(ctx, ring);
1005                if (ret) {
1006                        DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1007                        return ERR_PTR(ret);
1008                }
1009        }
1010
1011        return ctx;
1012}
1013
1014void
1015i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1016                                   struct intel_engine_cs *ring)
1017{
1018        struct drm_i915_gem_request *req = intel_ring_get_request(ring);
1019        struct i915_vma *vma;
1020
1021        list_for_each_entry(vma, vmas, exec_list) {
1022                struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1023                struct drm_i915_gem_object *obj = vma->obj;
1024                u32 old_read = obj->base.read_domains;
1025                u32 old_write = obj->base.write_domain;
1026
1027                obj->base.write_domain = obj->base.pending_write_domain;
1028                if (obj->base.write_domain == 0)
1029                        obj->base.pending_read_domains |= obj->base.read_domains;
1030                obj->base.read_domains = obj->base.pending_read_domains;
1031
1032                i915_vma_move_to_active(vma, ring);
1033                if (obj->base.write_domain) {
1034                        obj->dirty = 1;
1035                        i915_gem_request_assign(&obj->last_write_req, req);
1036
1037                        intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
1038
1039                        /* update for the implicit flush after a batch */
1040                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1041                }
1042                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1043                        i915_gem_request_assign(&obj->last_fenced_req, req);
1044                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1045                                struct drm_i915_private *dev_priv = to_i915(ring->dev);
1046                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1047                                               &dev_priv->mm.fence_list);
1048                        }
1049                }
1050
1051                trace_i915_gem_object_change_domain(obj, old_read, old_write);
1052        }
1053}
1054
1055void
1056i915_gem_execbuffer_retire_commands(struct drm_device *dev,
1057                                    struct drm_file *file,
1058                                    struct intel_engine_cs *ring,
1059                                    struct drm_i915_gem_object *obj)
1060{
1061        /* Unconditionally force add_request to emit a full flush. */
1062        ring->gpu_caches_dirty = true;
1063
1064        /* Add a breadcrumb for the completion of the batch buffer */
1065        (void)__i915_add_request(ring, file, obj);
1066}
1067
1068static int
1069i915_reset_gen7_sol_offsets(struct drm_device *dev,
1070                            struct intel_engine_cs *ring)
1071{
1072        struct drm_i915_private *dev_priv = dev->dev_private;
1073        int ret, i;
1074
1075        if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1076                DRM_DEBUG("sol reset is gen7/rcs only\n");
1077                return -EINVAL;
1078        }
1079
1080        ret = intel_ring_begin(ring, 4 * 3);
1081        if (ret)
1082                return ret;
1083
1084        for (i = 0; i < 4; i++) {
1085                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1086                intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1087                intel_ring_emit(ring, 0);
1088        }
1089
1090        intel_ring_advance(ring);
1091
1092        return 0;
1093}
1094
1095static int
1096i915_emit_box(struct intel_engine_cs *ring,
1097              struct drm_clip_rect *box,
1098              int DR1, int DR4)
1099{
1100        int ret;
1101
1102        if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
1103            box->y2 <= 0 || box->x2 <= 0) {
1104                DRM_ERROR("Bad box %d,%d..%d,%d\n",
1105                          box->x1, box->y1, box->x2, box->y2);
1106                return -EINVAL;
1107        }
1108
1109        if (INTEL_INFO(ring->dev)->gen >= 4) {
1110                ret = intel_ring_begin(ring, 4);
1111                if (ret)
1112                        return ret;
1113
1114                intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
1115                intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1116                intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1117                intel_ring_emit(ring, DR4);
1118        } else {
1119                ret = intel_ring_begin(ring, 6);
1120                if (ret)
1121                        return ret;
1122
1123                intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
1124                intel_ring_emit(ring, DR1);
1125                intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1126                intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1127                intel_ring_emit(ring, DR4);
1128                intel_ring_emit(ring, 0);
1129        }
1130        intel_ring_advance(ring);
1131
1132        return 0;
1133}
1134
1135static struct drm_i915_gem_object*
1136i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1137                          struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1138                          struct eb_vmas *eb,
1139                          struct drm_i915_gem_object *batch_obj,
1140                          u32 batch_start_offset,
1141                          u32 batch_len,
1142                          bool is_master)
1143{
1144        struct drm_i915_gem_object *shadow_batch_obj;
1145        struct i915_vma *vma;
1146        int ret;
1147
1148        shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
1149                                                   PAGE_ALIGN(batch_len));
1150        if (IS_ERR(shadow_batch_obj))
1151                return shadow_batch_obj;
1152
1153        ret = i915_parse_cmds(ring,
1154                              batch_obj,
1155                              shadow_batch_obj,
1156                              batch_start_offset,
1157                              batch_len,
1158                              is_master);
1159        if (ret)
1160                goto err;
1161
1162        ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1163        if (ret)
1164                goto err;
1165
1166        i915_gem_object_unpin_pages(shadow_batch_obj);
1167
1168        memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1169
1170        vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1171        vma->exec_entry = shadow_exec_entry;
1172        vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1173        drm_gem_object_reference(&shadow_batch_obj->base);
1174        list_add_tail(&vma->exec_list, &eb->vmas);
1175
1176        shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1177
1178        return shadow_batch_obj;
1179
1180err:
1181        i915_gem_object_unpin_pages(shadow_batch_obj);
1182        if (ret == -EACCES) /* unhandled chained batch */
1183                return batch_obj;
1184        else
1185                return ERR_PTR(ret);
1186}
1187
1188int
1189i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1190                               struct intel_engine_cs *ring,
1191                               struct intel_context *ctx,
1192                               struct drm_i915_gem_execbuffer2 *args,
1193                               struct list_head *vmas,
1194                               struct drm_i915_gem_object *batch_obj,
1195                               u64 exec_start, u32 dispatch_flags)
1196{
1197        struct drm_clip_rect *cliprects = NULL;
1198        struct drm_i915_private *dev_priv = dev->dev_private;
1199        u64 exec_len;
1200        int instp_mode;
1201        u32 instp_mask;
1202        int i, ret = 0;
1203
1204        if (args->num_cliprects != 0) {
1205                if (ring != &dev_priv->ring[RCS]) {
1206                        DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1207                        return -EINVAL;
1208                }
1209
1210                if (INTEL_INFO(dev)->gen >= 5) {
1211                        DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1212                        return -EINVAL;
1213                }
1214
1215                if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1216                        DRM_DEBUG("execbuf with %u cliprects\n",
1217                                  args->num_cliprects);
1218                        return -EINVAL;
1219                }
1220
1221                cliprects = kcalloc(args->num_cliprects,
1222                                    sizeof(*cliprects),
1223                                    GFP_KERNEL);
1224                if (cliprects == NULL) {
1225                        ret = -ENOMEM;
1226                        goto error;
1227                }
1228
1229                if (copy_from_user(cliprects,
1230                                   to_user_ptr(args->cliprects_ptr),
1231                                   sizeof(*cliprects)*args->num_cliprects)) {
1232                        ret = -EFAULT;
1233                        goto error;
1234                }
1235        } else {
1236                if (args->DR4 == 0xffffffff) {
1237                        DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1238                        args->DR4 = 0;
1239                }
1240
1241                if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1242                        DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1243                        return -EINVAL;
1244                }
1245        }
1246
1247        ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1248        if (ret)
1249                goto error;
1250
1251        ret = i915_switch_context(ring, ctx);
1252        if (ret)
1253                goto error;
1254
1255        WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1256             "%s didn't clear reload\n", ring->name);
1257
1258        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1259        instp_mask = I915_EXEC_CONSTANTS_MASK;
1260        switch (instp_mode) {
1261        case I915_EXEC_CONSTANTS_REL_GENERAL:
1262        case I915_EXEC_CONSTANTS_ABSOLUTE:
1263        case I915_EXEC_CONSTANTS_REL_SURFACE:
1264                if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1265                        DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1266                        ret = -EINVAL;
1267                        goto error;
1268                }
1269
1270                if (instp_mode != dev_priv->relative_constants_mode) {
1271                        if (INTEL_INFO(dev)->gen < 4) {
1272                                DRM_DEBUG("no rel constants on pre-gen4\n");
1273                                ret = -EINVAL;
1274                                goto error;
1275                        }
1276
1277                        if (INTEL_INFO(dev)->gen > 5 &&
1278                            instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1279                                DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1280                                ret = -EINVAL;
1281                                goto error;
1282                        }
1283
1284                        /* The HW changed the meaning on this bit on gen6 */
1285                        if (INTEL_INFO(dev)->gen >= 6)
1286                                instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1287                }
1288                break;
1289        default:
1290                DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1291                ret = -EINVAL;
1292                goto error;
1293        }
1294
1295        if (ring == &dev_priv->ring[RCS] &&
1296                        instp_mode != dev_priv->relative_constants_mode) {
1297                ret = intel_ring_begin(ring, 4);
1298                if (ret)
1299                        goto error;
1300
1301                intel_ring_emit(ring, MI_NOOP);
1302                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1303                intel_ring_emit(ring, INSTPM);
1304                intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1305                intel_ring_advance(ring);
1306
1307                dev_priv->relative_constants_mode = instp_mode;
1308        }
1309
1310        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1311                ret = i915_reset_gen7_sol_offsets(dev, ring);
1312                if (ret)
1313                        goto error;
1314        }
1315
1316        exec_len = args->batch_len;
1317        if (cliprects) {
1318                for (i = 0; i < args->num_cliprects; i++) {
1319                        ret = i915_emit_box(ring, &cliprects[i],
1320                                            args->DR1, args->DR4);
1321                        if (ret)
1322                                goto error;
1323
1324                        ret = ring->dispatch_execbuffer(ring,
1325                                                        exec_start, exec_len,
1326                                                        dispatch_flags);
1327                        if (ret)
1328                                goto error;
1329                }
1330        } else {
1331                ret = ring->dispatch_execbuffer(ring,
1332                                                exec_start, exec_len,
1333                                                dispatch_flags);
1334                if (ret)
1335                        return ret;
1336        }
1337
1338        trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
1339
1340        i915_gem_execbuffer_move_to_active(vmas, ring);
1341        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1342
1343error:
1344        kfree(cliprects);
1345        return ret;
1346}
1347
1348/**
1349 * Find one BSD ring to dispatch the corresponding BSD command.
1350 * The Ring ID is returned.
1351 */
1352static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1353                                  struct drm_file *file)
1354{
1355        struct drm_i915_private *dev_priv = dev->dev_private;
1356        struct drm_i915_file_private *file_priv = file->driver_priv;
1357
1358        /* Check whether the file_priv is using one ring */
1359        if (file_priv->bsd_ring)
1360                return file_priv->bsd_ring->id;
1361        else {
1362                /* If no, use the ping-pong mechanism to select one ring */
1363                int ring_id;
1364
1365                mutex_lock(&dev->struct_mutex);
1366                if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1367                        ring_id = VCS;
1368                        dev_priv->mm.bsd_ring_dispatch_index = 1;
1369                } else {
1370                        ring_id = VCS2;
1371                        dev_priv->mm.bsd_ring_dispatch_index = 0;
1372                }
1373                file_priv->bsd_ring = &dev_priv->ring[ring_id];
1374                mutex_unlock(&dev->struct_mutex);
1375                return ring_id;
1376        }
1377}
1378
1379static struct drm_i915_gem_object *
1380eb_get_batch(struct eb_vmas *eb)
1381{
1382        struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1383
1384        /*
1385         * SNA is doing fancy tricks with compressing batch buffers, which leads
1386         * to negative relocation deltas. Usually that works out ok since the
1387         * relocate address is still positive, except when the batch is placed
1388         * very low in the GTT. Ensure this doesn't happen.
1389         *
1390         * Note that actual hangs have only been observed on gen7, but for
1391         * paranoia do it everywhere.
1392         */
1393        vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1394
1395        return vma->obj;
1396}
1397
1398static int
1399i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1400                       struct drm_file *file,
1401                       struct drm_i915_gem_execbuffer2 *args,
1402                       struct drm_i915_gem_exec_object2 *exec)
1403{
1404        struct drm_i915_private *dev_priv = dev->dev_private;
1405        struct eb_vmas *eb;
1406        struct drm_i915_gem_object *batch_obj;
1407        struct drm_i915_gem_exec_object2 shadow_exec_entry;
1408        struct intel_engine_cs *ring;
1409        struct intel_context *ctx;
1410        struct i915_address_space *vm;
1411        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1412        u64 exec_start = args->batch_start_offset;
1413        u32 dispatch_flags;
1414        int ret;
1415        bool need_relocs;
1416
1417        if (!i915_gem_check_execbuffer(args))
1418                return -EINVAL;
1419
1420        ret = validate_exec_list(dev, exec, args->buffer_count);
1421        if (ret)
1422                return ret;
1423
1424        dispatch_flags = 0;
1425        if (args->flags & I915_EXEC_SECURE) {
1426                if (!file->is_master || !capable(CAP_SYS_ADMIN))
1427                    return -EPERM;
1428
1429                dispatch_flags |= I915_DISPATCH_SECURE;
1430        }
1431        if (args->flags & I915_EXEC_IS_PINNED)
1432                dispatch_flags |= I915_DISPATCH_PINNED;
1433
1434        if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1435                DRM_DEBUG("execbuf with unknown ring: %d\n",
1436                          (int)(args->flags & I915_EXEC_RING_MASK));
1437                return -EINVAL;
1438        }
1439
1440        if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
1441            ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1442                DRM_DEBUG("execbuf with non bsd ring but with invalid "
1443                        "bsd dispatch flags: %d\n", (int)(args->flags));
1444                return -EINVAL;
1445        } 
1446
1447        if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1448                ring = &dev_priv->ring[RCS];
1449        else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1450                if (HAS_BSD2(dev)) {
1451                        int ring_id;
1452
1453                        switch (args->flags & I915_EXEC_BSD_MASK) {
1454                        case I915_EXEC_BSD_DEFAULT:
1455                                ring_id = gen8_dispatch_bsd_ring(dev, file);
1456                                ring = &dev_priv->ring[ring_id];
1457                                break;
1458                        case I915_EXEC_BSD_RING1:
1459                                ring = &dev_priv->ring[VCS];
1460                                break;
1461                        case I915_EXEC_BSD_RING2:
1462                                ring = &dev_priv->ring[VCS2];
1463                                break;
1464                        default:
1465                                DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
1466                                          (int)(args->flags & I915_EXEC_BSD_MASK));
1467                                return -EINVAL;
1468                        }
1469                } else
1470                        ring = &dev_priv->ring[VCS];
1471        } else
1472                ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1473
1474        if (!intel_ring_initialized(ring)) {
1475                DRM_DEBUG("execbuf with invalid ring: %d\n",
1476                          (int)(args->flags & I915_EXEC_RING_MASK));
1477                return -EINVAL;
1478        }
1479
1480        if (args->buffer_count < 1) {
1481                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1482                return -EINVAL;
1483        }
1484
1485        intel_runtime_pm_get(dev_priv);
1486
1487        ret = i915_mutex_lock_interruptible(dev);
1488        if (ret)
1489                goto pre_mutex_err;
1490
1491        ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1492        if (IS_ERR(ctx)) {
1493                mutex_unlock(&dev->struct_mutex);
1494                ret = PTR_ERR(ctx);
1495                goto pre_mutex_err;
1496        }
1497
1498        i915_gem_context_reference(ctx);
1499
1500        if (ctx->ppgtt)
1501                vm = &ctx->ppgtt->base;
1502        else
1503                vm = &dev_priv->gtt.base;
1504
1505        eb = eb_create(args);
1506        if (eb == NULL) {
1507                i915_gem_context_unreference(ctx);
1508                mutex_unlock(&dev->struct_mutex);
1509                ret = -ENOMEM;
1510                goto pre_mutex_err;
1511        }
1512
1513        /* Look up object handles */
1514        ret = eb_lookup_vmas(eb, exec, args, vm, file);
1515        if (ret)
1516                goto err;
1517
1518        /* take note of the batch buffer before we might reorder the lists */
1519        batch_obj = eb_get_batch(eb);
1520
1521        /* Move the objects en-masse into the GTT, evicting if necessary. */
1522        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1523        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1524        if (ret)
1525                goto err;
1526
1527        /* The objects are in their final locations, apply the relocations. */
1528        if (need_relocs)
1529                ret = i915_gem_execbuffer_relocate(eb);
1530        if (ret) {
1531                if (ret == -EFAULT) {
1532                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1533                                                                eb, exec);
1534                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1535                }
1536                if (ret)
1537                        goto err;
1538        }
1539
1540        /* Set the pending read domains for the batch buffer to COMMAND */
1541        if (batch_obj->base.pending_write_domain) {
1542                DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1543                ret = -EINVAL;
1544                goto err;
1545        }
1546
1547        if (i915_needs_cmd_parser(ring) && args->batch_len) {
1548                struct drm_i915_gem_object *parsed_batch_obj;
1549
1550                parsed_batch_obj = i915_gem_execbuffer_parse(ring,
1551                                                      &shadow_exec_entry,
1552                                                      eb,
1553                                                      batch_obj,
1554                                                      args->batch_start_offset,
1555                                                      args->batch_len,
1556                                                      file->is_master);
1557                if (IS_ERR(parsed_batch_obj)) {
1558                        ret = PTR_ERR(parsed_batch_obj);
1559                        goto err;
1560                }
1561
1562                /*
1563                 * parsed_batch_obj == batch_obj means batch not fully parsed:
1564                 * Accept, but don't promote to secure.
1565                 */
1566
1567                if (parsed_batch_obj != batch_obj) {
1568                        /*
1569                         * Batch parsed and accepted:
1570                         *
1571                         * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1572                         * bit from MI_BATCH_BUFFER_START commands issued in
1573                         * the dispatch_execbuffer implementations. We
1574                         * specifically don't want that set on batches the
1575                         * command parser has accepted.
1576                         */
1577                        dispatch_flags |= I915_DISPATCH_SECURE;
1578                        exec_start = 0;
1579                        batch_obj = parsed_batch_obj;
1580                }
1581        }
1582
1583        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1584
1585        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1586         * batch" bit. Hence we need to pin secure batches into the global gtt.
1587         * hsw should have this fixed, but bdw mucks it up again. */
1588        if (dispatch_flags & I915_DISPATCH_SECURE) {
1589                /*
1590                 * So on first glance it looks freaky that we pin the batch here
1591                 * outside of the reservation loop. But:
1592                 * - The batch is already pinned into the relevant ppgtt, so we
1593                 *   already have the backing storage fully allocated.
1594                 * - No other BO uses the global gtt (well contexts, but meh),
1595                 *   so we don't really have issues with multiple objects not
1596                 *   fitting due to fragmentation.
1597                 * So this is actually safe.
1598                 */
1599                ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1600                if (ret)
1601                        goto err;
1602
1603                exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1604        } else
1605                exec_start += i915_gem_obj_offset(batch_obj, vm);
1606
1607        ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
1608                                          &eb->vmas, batch_obj, exec_start,
1609                                          dispatch_flags);
1610
1611        /*
1612         * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1613         * batch vma for correctness. For less ugly and less fragility this
1614         * needs to be adjusted to also track the ggtt batch vma properly as
1615         * active.
1616         */
1617        if (dispatch_flags & I915_DISPATCH_SECURE)
1618                i915_gem_object_ggtt_unpin(batch_obj);
1619err:
1620        /* the request owns the ref now */
1621        i915_gem_context_unreference(ctx);
1622        eb_destroy(eb);
1623
1624        mutex_unlock(&dev->struct_mutex);
1625
1626pre_mutex_err:
1627        /* intel_gpu_busy should also get a ref, so it will free when the device
1628         * is really idle. */
1629        intel_runtime_pm_put(dev_priv);
1630        return ret;
1631}
1632
1633/*
1634 * Legacy execbuffer just creates an exec2 list from the original exec object
1635 * list array and passes it to the real function.
1636 */
1637int
1638i915_gem_execbuffer(struct drm_device *dev, void *data,
1639                    struct drm_file *file)
1640{
1641        struct drm_i915_gem_execbuffer *args = data;
1642        struct drm_i915_gem_execbuffer2 exec2;
1643        struct drm_i915_gem_exec_object *exec_list = NULL;
1644        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1645        int ret, i;
1646
1647        if (args->buffer_count < 1) {
1648                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1649                return -EINVAL;
1650        }
1651
1652        /* Copy in the exec list from userland */
1653        exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1654        exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1655        if (exec_list == NULL || exec2_list == NULL) {
1656                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1657                          args->buffer_count);
1658                drm_free_large(exec_list);
1659                drm_free_large(exec2_list);
1660                return -ENOMEM;
1661        }
1662        ret = copy_from_user(exec_list,
1663                             to_user_ptr(args->buffers_ptr),
1664                             sizeof(*exec_list) * args->buffer_count);
1665        if (ret != 0) {
1666                DRM_DEBUG("copy %d exec entries failed %d\n",
1667                          args->buffer_count, ret);
1668                drm_free_large(exec_list);
1669                drm_free_large(exec2_list);
1670                return -EFAULT;
1671        }
1672
1673        for (i = 0; i < args->buffer_count; i++) {
1674                exec2_list[i].handle = exec_list[i].handle;
1675                exec2_list[i].relocation_count = exec_list[i].relocation_count;
1676                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1677                exec2_list[i].alignment = exec_list[i].alignment;
1678                exec2_list[i].offset = exec_list[i].offset;
1679                if (INTEL_INFO(dev)->gen < 4)
1680                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1681                else
1682                        exec2_list[i].flags = 0;
1683        }
1684
1685        exec2.buffers_ptr = args->buffers_ptr;
1686        exec2.buffer_count = args->buffer_count;
1687        exec2.batch_start_offset = args->batch_start_offset;
1688        exec2.batch_len = args->batch_len;
1689        exec2.DR1 = args->DR1;
1690        exec2.DR4 = args->DR4;
1691        exec2.num_cliprects = args->num_cliprects;
1692        exec2.cliprects_ptr = args->cliprects_ptr;
1693        exec2.flags = I915_EXEC_RENDER;
1694        i915_execbuffer2_set_context_id(exec2, 0);
1695
1696        ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1697        if (!ret) {
1698                struct drm_i915_gem_exec_object __user *user_exec_list =
1699                        to_user_ptr(args->buffers_ptr);
1700
1701                /* Copy the new buffer offsets back to the user's exec list. */
1702                for (i = 0; i < args->buffer_count; i++) {
1703                        ret = __copy_to_user(&user_exec_list[i].offset,
1704                                             &exec2_list[i].offset,
1705                                             sizeof(user_exec_list[i].offset));
1706                        if (ret) {
1707                                ret = -EFAULT;
1708                                DRM_DEBUG("failed to copy %d exec entries "
1709                                          "back to user (%d)\n",
1710                                          args->buffer_count, ret);
1711                                break;
1712                        }
1713                }
1714        }
1715
1716        drm_free_large(exec_list);
1717        drm_free_large(exec2_list);
1718        return ret;
1719}
1720
1721int
1722i915_gem_execbuffer2(struct drm_device *dev, void *data,
1723                     struct drm_file *file)
1724{
1725        struct drm_i915_gem_execbuffer2 *args = data;
1726        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1727        int ret;
1728
1729        if (args->buffer_count < 1 ||
1730            args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1731                DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1732                return -EINVAL;
1733        }
1734
1735        if (args->rsvd2 != 0) {
1736                DRM_DEBUG("dirty rvsd2 field\n");
1737                return -EINVAL;
1738        }
1739
1740        exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1741                             GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1742        if (exec2_list == NULL)
1743                exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1744                                           args->buffer_count);
1745        if (exec2_list == NULL) {
1746                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1747                          args->buffer_count);
1748                return -ENOMEM;
1749        }
1750        ret = copy_from_user(exec2_list,
1751                             to_user_ptr(args->buffers_ptr),
1752                             sizeof(*exec2_list) * args->buffer_count);
1753        if (ret != 0) {
1754                DRM_DEBUG("copy %d exec entries failed %d\n",
1755                          args->buffer_count, ret);
1756                drm_free_large(exec2_list);
1757                return -EFAULT;
1758        }
1759
1760        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1761        if (!ret) {
1762                /* Copy the new buffer offsets back to the user's exec list. */
1763                struct drm_i915_gem_exec_object2 __user *user_exec_list =
1764                                   to_user_ptr(args->buffers_ptr);
1765                int i;
1766
1767                for (i = 0; i < args->buffer_count; i++) {
1768                        ret = __copy_to_user(&user_exec_list[i].offset,
1769                                             &exec2_list[i].offset,
1770                                             sizeof(user_exec_list[i].offset));
1771                        if (ret) {
1772                                ret = -EFAULT;
1773                                DRM_DEBUG("failed to copy %d exec entries "
1774                                          "back to user\n",
1775                                          args->buffer_count);
1776                                break;
1777                        }
1778                }
1779        }
1780
1781        drm_free_large(exec2_list);
1782        return ret;
1783}
1784