linux/drivers/gpu/drm/i915/i915_gem_execbuffer.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008,2010 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Chris Wilson <chris@chris-wilson.co.uk>
  26 *
  27 */
  28
  29#include <drm/drmP.h>
  30#include <drm/i915_drm.h>
  31#include "i915_drv.h"
  32#include "i915_trace.h"
  33#include "intel_drv.h"
  34#include <linux/dma_remapping.h>
  35#include <linux/uaccess.h>
  36
  37#define  __EXEC_OBJECT_HAS_PIN (1<<31)
  38#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
  39#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
  40#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
  41
  42#define BATCH_OFFSET_BIAS (256*1024)
  43
  44struct eb_vmas {
  45        struct list_head vmas;
  46        int and;
  47        union {
  48                struct i915_vma *lut[0];
  49                struct hlist_head buckets[0];
  50        };
  51};
  52
  53static struct eb_vmas *
  54eb_create(struct drm_i915_gem_execbuffer2 *args)
  55{
  56        struct eb_vmas *eb = NULL;
  57
  58        if (args->flags & I915_EXEC_HANDLE_LUT) {
  59                unsigned size = args->buffer_count;
  60                size *= sizeof(struct i915_vma *);
  61                size += sizeof(struct eb_vmas);
  62                eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  63        }
  64
  65        if (eb == NULL) {
  66                unsigned size = args->buffer_count;
  67                unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  68                BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  69                while (count > 2*size)
  70                        count >>= 1;
  71                eb = kzalloc(count*sizeof(struct hlist_head) +
  72                             sizeof(struct eb_vmas),
  73                             GFP_TEMPORARY);
  74                if (eb == NULL)
  75                        return eb;
  76
  77                eb->and = count - 1;
  78        } else
  79                eb->and = -args->buffer_count;
  80
  81        INIT_LIST_HEAD(&eb->vmas);
  82        return eb;
  83}
  84
  85static void
  86eb_reset(struct eb_vmas *eb)
  87{
  88        if (eb->and >= 0)
  89                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  90}
  91
  92static int
  93eb_lookup_vmas(struct eb_vmas *eb,
  94               struct drm_i915_gem_exec_object2 *exec,
  95               const struct drm_i915_gem_execbuffer2 *args,
  96               struct i915_address_space *vm,
  97               struct drm_file *file)
  98{
  99        struct drm_i915_gem_object *obj;
 100        struct list_head objects;
 101        int i, ret;
 102
 103        INIT_LIST_HEAD(&objects);
 104        spin_lock(&file->table_lock);
 105        /* Grab a reference to the object and release the lock so we can lookup
 106         * or create the VMA without using GFP_ATOMIC */
 107        for (i = 0; i < args->buffer_count; i++) {
 108                obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 109                if (obj == NULL) {
 110                        spin_unlock(&file->table_lock);
 111                        DRM_DEBUG("Invalid object handle %d at index %d\n",
 112                                   exec[i].handle, i);
 113                        ret = -ENOENT;
 114                        goto err;
 115                }
 116
 117                if (!list_empty(&obj->obj_exec_link)) {
 118                        spin_unlock(&file->table_lock);
 119                        DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 120                                   obj, exec[i].handle, i);
 121                        ret = -EINVAL;
 122                        goto err;
 123                }
 124
 125                drm_gem_object_reference(&obj->base);
 126                list_add_tail(&obj->obj_exec_link, &objects);
 127        }
 128        spin_unlock(&file->table_lock);
 129
 130        i = 0;
 131        while (!list_empty(&objects)) {
 132                struct i915_vma *vma;
 133
 134                obj = list_first_entry(&objects,
 135                                       struct drm_i915_gem_object,
 136                                       obj_exec_link);
 137
 138                /*
 139                 * NOTE: We can leak any vmas created here when something fails
 140                 * later on. But that's no issue since vma_unbind can deal with
 141                 * vmas which are not actually bound. And since only
 142                 * lookup_or_create exists as an interface to get at the vma
 143                 * from the (obj, vm) we don't run the risk of creating
 144                 * duplicated vmas for the same vm.
 145                 */
 146                vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
 147                if (IS_ERR(vma)) {
 148                        DRM_DEBUG("Failed to lookup VMA\n");
 149                        ret = PTR_ERR(vma);
 150                        goto err;
 151                }
 152
 153                /* Transfer ownership from the objects list to the vmas list. */
 154                list_add_tail(&vma->exec_list, &eb->vmas);
 155                list_del_init(&obj->obj_exec_link);
 156
 157                vma->exec_entry = &exec[i];
 158                if (eb->and < 0) {
 159                        eb->lut[i] = vma;
 160                } else {
 161                        uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
 162                        vma->exec_handle = handle;
 163                        hlist_add_head(&vma->exec_node,
 164                                       &eb->buckets[handle & eb->and]);
 165                }
 166                ++i;
 167        }
 168
 169        return 0;
 170
 171
 172err:
 173        while (!list_empty(&objects)) {
 174                obj = list_first_entry(&objects,
 175                                       struct drm_i915_gem_object,
 176                                       obj_exec_link);
 177                list_del_init(&obj->obj_exec_link);
 178                drm_gem_object_unreference(&obj->base);
 179        }
 180        /*
 181         * Objects already transfered to the vmas list will be unreferenced by
 182         * eb_destroy.
 183         */
 184
 185        return ret;
 186}
 187
 188static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 189{
 190        if (eb->and < 0) {
 191                if (handle >= -eb->and)
 192                        return NULL;
 193                return eb->lut[handle];
 194        } else {
 195                struct hlist_head *head;
 196                struct hlist_node *node;
 197
 198                head = &eb->buckets[handle & eb->and];
 199                hlist_for_each(node, head) {
 200                        struct i915_vma *vma;
 201
 202                        vma = hlist_entry(node, struct i915_vma, exec_node);
 203                        if (vma->exec_handle == handle)
 204                                return vma;
 205                }
 206                return NULL;
 207        }
 208}
 209
 210static void
 211i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 212{
 213        struct drm_i915_gem_exec_object2 *entry;
 214        struct drm_i915_gem_object *obj = vma->obj;
 215
 216        if (!drm_mm_node_allocated(&vma->node))
 217                return;
 218
 219        entry = vma->exec_entry;
 220
 221        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 222                i915_gem_object_unpin_fence(obj);
 223
 224        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
 225                vma->pin_count--;
 226
 227        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 228}
 229
 230static void eb_destroy(struct eb_vmas *eb)
 231{
 232        while (!list_empty(&eb->vmas)) {
 233                struct i915_vma *vma;
 234
 235                vma = list_first_entry(&eb->vmas,
 236                                       struct i915_vma,
 237                                       exec_list);
 238                list_del_init(&vma->exec_list);
 239                i915_gem_execbuffer_unreserve_vma(vma);
 240                drm_gem_object_unreference(&vma->obj->base);
 241        }
 242        kfree(eb);
 243}
 244
 245static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 246{
 247        return (HAS_LLC(obj->base.dev) ||
 248                obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 249                obj->cache_level != I915_CACHE_NONE);
 250}
 251
 252static int
 253relocate_entry_cpu(struct drm_i915_gem_object *obj,
 254                   struct drm_i915_gem_relocation_entry *reloc,
 255                   uint64_t target_offset)
 256{
 257        struct drm_device *dev = obj->base.dev;
 258        uint32_t page_offset = offset_in_page(reloc->offset);
 259        uint64_t delta = reloc->delta + target_offset;
 260        char *vaddr;
 261        int ret;
 262
 263        ret = i915_gem_object_set_to_cpu_domain(obj, true);
 264        if (ret)
 265                return ret;
 266
 267        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 268                                reloc->offset >> PAGE_SHIFT));
 269        *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
 270
 271        if (INTEL_INFO(dev)->gen >= 8) {
 272                page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 273
 274                if (page_offset == 0) {
 275                        kunmap_atomic(vaddr);
 276                        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 277                            (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
 278                }
 279
 280                *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
 281        }
 282
 283        kunmap_atomic(vaddr);
 284
 285        return 0;
 286}
 287
 288static int
 289relocate_entry_gtt(struct drm_i915_gem_object *obj,
 290                   struct drm_i915_gem_relocation_entry *reloc,
 291                   uint64_t target_offset)
 292{
 293        struct drm_device *dev = obj->base.dev;
 294        struct drm_i915_private *dev_priv = dev->dev_private;
 295        uint64_t delta = reloc->delta + target_offset;
 296        uint64_t offset;
 297        void __iomem *reloc_page;
 298        int ret;
 299
 300        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 301        if (ret)
 302                return ret;
 303
 304        ret = i915_gem_object_put_fence(obj);
 305        if (ret)
 306                return ret;
 307
 308        /* Map the page containing the relocation we're going to perform.  */
 309        offset = i915_gem_obj_ggtt_offset(obj);
 310        offset += reloc->offset;
 311        reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 312                                              offset & PAGE_MASK);
 313        iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 314
 315        if (INTEL_INFO(dev)->gen >= 8) {
 316                offset += sizeof(uint32_t);
 317
 318                if (offset_in_page(offset) == 0) {
 319                        io_mapping_unmap_atomic(reloc_page);
 320                        reloc_page =
 321                                io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 322                                                         offset);
 323                }
 324
 325                iowrite32(upper_32_bits(delta),
 326                          reloc_page + offset_in_page(offset));
 327        }
 328
 329        io_mapping_unmap_atomic(reloc_page);
 330
 331        return 0;
 332}
 333
 334static void
 335clflush_write32(void *addr, uint32_t value)
 336{
 337        /* This is not a fast path, so KISS. */
 338        drm_clflush_virt_range(addr, sizeof(uint32_t));
 339        *(uint32_t *)addr = value;
 340        drm_clflush_virt_range(addr, sizeof(uint32_t));
 341}
 342
 343static int
 344relocate_entry_clflush(struct drm_i915_gem_object *obj,
 345                       struct drm_i915_gem_relocation_entry *reloc,
 346                       uint64_t target_offset)
 347{
 348        struct drm_device *dev = obj->base.dev;
 349        uint32_t page_offset = offset_in_page(reloc->offset);
 350        uint64_t delta = (int)reloc->delta + target_offset;
 351        char *vaddr;
 352        int ret;
 353
 354        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 355        if (ret)
 356                return ret;
 357
 358        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 359                                reloc->offset >> PAGE_SHIFT));
 360        clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 361
 362        if (INTEL_INFO(dev)->gen >= 8) {
 363                page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 364
 365                if (page_offset == 0) {
 366                        kunmap_atomic(vaddr);
 367                        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 368                            (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
 369                }
 370
 371                clflush_write32(vaddr + page_offset, upper_32_bits(delta));
 372        }
 373
 374        kunmap_atomic(vaddr);
 375
 376        return 0;
 377}
 378
 379static int
 380i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 381                                   struct eb_vmas *eb,
 382                                   struct drm_i915_gem_relocation_entry *reloc)
 383{
 384        struct drm_device *dev = obj->base.dev;
 385        struct drm_gem_object *target_obj;
 386        struct drm_i915_gem_object *target_i915_obj;
 387        struct i915_vma *target_vma;
 388        uint64_t target_offset;
 389        int ret;
 390
 391        /* we've already hold a reference to all valid objects */
 392        target_vma = eb_get_vma(eb, reloc->target_handle);
 393        if (unlikely(target_vma == NULL))
 394                return -ENOENT;
 395        target_i915_obj = target_vma->obj;
 396        target_obj = &target_vma->obj->base;
 397
 398        target_offset = target_vma->node.start;
 399
 400        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 401         * pipe_control writes because the gpu doesn't properly redirect them
 402         * through the ppgtt for non_secure batchbuffers. */
 403        if (unlikely(IS_GEN6(dev) &&
 404            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
 405                ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
 406                                    PIN_GLOBAL);
 407                if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
 408                        return ret;
 409        }
 410
 411        /* Validate that the target is in a valid r/w GPU domain */
 412        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 413                DRM_DEBUG("reloc with multiple write domains: "
 414                          "obj %p target %d offset %d "
 415                          "read %08x write %08x",
 416                          obj, reloc->target_handle,
 417                          (int) reloc->offset,
 418                          reloc->read_domains,
 419                          reloc->write_domain);
 420                return -EINVAL;
 421        }
 422        if (unlikely((reloc->write_domain | reloc->read_domains)
 423                     & ~I915_GEM_GPU_DOMAINS)) {
 424                DRM_DEBUG("reloc with read/write non-GPU domains: "
 425                          "obj %p target %d offset %d "
 426                          "read %08x write %08x",
 427                          obj, reloc->target_handle,
 428                          (int) reloc->offset,
 429                          reloc->read_domains,
 430                          reloc->write_domain);
 431                return -EINVAL;
 432        }
 433
 434        target_obj->pending_read_domains |= reloc->read_domains;
 435        target_obj->pending_write_domain |= reloc->write_domain;
 436
 437        /* If the relocation already has the right value in it, no
 438         * more work needs to be done.
 439         */
 440        if (target_offset == reloc->presumed_offset)
 441                return 0;
 442
 443        /* Check that the relocation address is valid... */
 444        if (unlikely(reloc->offset >
 445                obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
 446                DRM_DEBUG("Relocation beyond object bounds: "
 447                          "obj %p target %d offset %d size %d.\n",
 448                          obj, reloc->target_handle,
 449                          (int) reloc->offset,
 450                          (int) obj->base.size);
 451                return -EINVAL;
 452        }
 453        if (unlikely(reloc->offset & 3)) {
 454                DRM_DEBUG("Relocation not 4-byte aligned: "
 455                          "obj %p target %d offset %d.\n",
 456                          obj, reloc->target_handle,
 457                          (int) reloc->offset);
 458                return -EINVAL;
 459        }
 460
 461        /* We can't wait for rendering with pagefaults disabled */
 462        if (obj->active && pagefault_disabled())
 463                return -EFAULT;
 464
 465        if (use_cpu_reloc(obj))
 466                ret = relocate_entry_cpu(obj, reloc, target_offset);
 467        else if (obj->map_and_fenceable)
 468                ret = relocate_entry_gtt(obj, reloc, target_offset);
 469        else if (cpu_has_clflush)
 470                ret = relocate_entry_clflush(obj, reloc, target_offset);
 471        else {
 472                WARN_ONCE(1, "Impossible case in relocation handling\n");
 473                ret = -ENODEV;
 474        }
 475
 476        if (ret)
 477                return ret;
 478
 479        /* and update the user's relocation entry */
 480        reloc->presumed_offset = target_offset;
 481
 482        return 0;
 483}
 484
 485static int
 486i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 487                                 struct eb_vmas *eb)
 488{
 489#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 490        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 491        struct drm_i915_gem_relocation_entry __user *user_relocs;
 492        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 493        int remain, ret;
 494
 495        user_relocs = to_user_ptr(entry->relocs_ptr);
 496
 497        remain = entry->relocation_count;
 498        while (remain) {
 499                struct drm_i915_gem_relocation_entry *r = stack_reloc;
 500                int count = remain;
 501                if (count > ARRAY_SIZE(stack_reloc))
 502                        count = ARRAY_SIZE(stack_reloc);
 503                remain -= count;
 504
 505                if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
 506                        return -EFAULT;
 507
 508                do {
 509                        u64 offset = r->presumed_offset;
 510
 511                        ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
 512                        if (ret)
 513                                return ret;
 514
 515                        if (r->presumed_offset != offset &&
 516                            __copy_to_user_inatomic(&user_relocs->presumed_offset,
 517                                                    &r->presumed_offset,
 518                                                    sizeof(r->presumed_offset))) {
 519                                return -EFAULT;
 520                        }
 521
 522                        user_relocs++;
 523                        r++;
 524                } while (--count);
 525        }
 526
 527        return 0;
 528#undef N_RELOC
 529}
 530
 531static int
 532i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 533                                      struct eb_vmas *eb,
 534                                      struct drm_i915_gem_relocation_entry *relocs)
 535{
 536        const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 537        int i, ret;
 538
 539        for (i = 0; i < entry->relocation_count; i++) {
 540                ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
 541                if (ret)
 542                        return ret;
 543        }
 544
 545        return 0;
 546}
 547
 548static int
 549i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 550{
 551        struct i915_vma *vma;
 552        int ret = 0;
 553
 554        /* This is the fast path and we cannot handle a pagefault whilst
 555         * holding the struct mutex lest the user pass in the relocations
 556         * contained within a mmaped bo. For in such a case we, the page
 557         * fault handler would call i915_gem_fault() and we would try to
 558         * acquire the struct mutex again. Obviously this is bad and so
 559         * lockdep complains vehemently.
 560         */
 561        pagefault_disable();
 562        list_for_each_entry(vma, &eb->vmas, exec_list) {
 563                ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 564                if (ret)
 565                        break;
 566        }
 567        pagefault_enable();
 568
 569        return ret;
 570}
 571
 572static bool only_mappable_for_reloc(unsigned int flags)
 573{
 574        return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
 575                __EXEC_OBJECT_NEEDS_MAP;
 576}
 577
 578static int
 579i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 580                                struct intel_engine_cs *ring,
 581                                bool *need_reloc)
 582{
 583        struct drm_i915_gem_object *obj = vma->obj;
 584        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 585        uint64_t flags;
 586        int ret;
 587
 588        flags = PIN_USER;
 589        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
 590                flags |= PIN_GLOBAL;
 591
 592        if (!drm_mm_node_allocated(&vma->node)) {
 593                /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
 594                 * limit address to the first 4GBs for unflagged objects.
 595                 */
 596                if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
 597                        flags |= PIN_ZONE_4G;
 598                if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
 599                        flags |= PIN_GLOBAL | PIN_MAPPABLE;
 600                if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
 601                        flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
 602                if ((flags & PIN_MAPPABLE) == 0)
 603                        flags |= PIN_HIGH;
 604        }
 605
 606        ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
 607        if ((ret == -ENOSPC  || ret == -E2BIG) &&
 608            only_mappable_for_reloc(entry->flags))
 609                ret = i915_gem_object_pin(obj, vma->vm,
 610                                          entry->alignment,
 611                                          flags & ~PIN_MAPPABLE);
 612        if (ret)
 613                return ret;
 614
 615        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 616
 617        if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 618                ret = i915_gem_object_get_fence(obj);
 619                if (ret)
 620                        return ret;
 621
 622                if (i915_gem_object_pin_fence(obj))
 623                        entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 624        }
 625
 626        if (entry->offset != vma->node.start) {
 627                entry->offset = vma->node.start;
 628                *need_reloc = true;
 629        }
 630
 631        if (entry->flags & EXEC_OBJECT_WRITE) {
 632                obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
 633                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
 634        }
 635
 636        return 0;
 637}
 638
 639static bool
 640need_reloc_mappable(struct i915_vma *vma)
 641{
 642        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 643
 644        if (entry->relocation_count == 0)
 645                return false;
 646
 647        if (!i915_is_ggtt(vma->vm))
 648                return false;
 649
 650        /* See also use_cpu_reloc() */
 651        if (HAS_LLC(vma->obj->base.dev))
 652                return false;
 653
 654        if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
 655                return false;
 656
 657        return true;
 658}
 659
 660static bool
 661eb_vma_misplaced(struct i915_vma *vma)
 662{
 663        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 664        struct drm_i915_gem_object *obj = vma->obj;
 665
 666        WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
 667               !i915_is_ggtt(vma->vm));
 668
 669        if (entry->alignment &&
 670            vma->node.start & (entry->alignment - 1))
 671                return true;
 672
 673        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
 674            vma->node.start < BATCH_OFFSET_BIAS)
 675                return true;
 676
 677        /* avoid costly ping-pong once a batch bo ended up non-mappable */
 678        if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
 679                return !only_mappable_for_reloc(entry->flags);
 680
 681        if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
 682            (vma->node.start + vma->node.size - 1) >> 32)
 683                return true;
 684
 685        return false;
 686}
 687
 688static int
 689i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 690                            struct list_head *vmas,
 691                            struct intel_context *ctx,
 692                            bool *need_relocs)
 693{
 694        struct drm_i915_gem_object *obj;
 695        struct i915_vma *vma;
 696        struct i915_address_space *vm;
 697        struct list_head ordered_vmas;
 698        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 699        int retry;
 700
 701        i915_gem_retire_requests_ring(ring);
 702
 703        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 704
 705        INIT_LIST_HEAD(&ordered_vmas);
 706        while (!list_empty(vmas)) {
 707                struct drm_i915_gem_exec_object2 *entry;
 708                bool need_fence, need_mappable;
 709
 710                vma = list_first_entry(vmas, struct i915_vma, exec_list);
 711                obj = vma->obj;
 712                entry = vma->exec_entry;
 713
 714                if (ctx->flags & CONTEXT_NO_ZEROMAP)
 715                        entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
 716
 717                if (!has_fenced_gpu_access)
 718                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
 719                need_fence =
 720                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 721                        obj->tiling_mode != I915_TILING_NONE;
 722                need_mappable = need_fence || need_reloc_mappable(vma);
 723
 724                if (need_mappable) {
 725                        entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
 726                        list_move(&vma->exec_list, &ordered_vmas);
 727                } else
 728                        list_move_tail(&vma->exec_list, &ordered_vmas);
 729
 730                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 731                obj->base.pending_write_domain = 0;
 732        }
 733        list_splice(&ordered_vmas, vmas);
 734
 735        /* Attempt to pin all of the buffers into the GTT.
 736         * This is done in 3 phases:
 737         *
 738         * 1a. Unbind all objects that do not match the GTT constraints for
 739         *     the execbuffer (fenceable, mappable, alignment etc).
 740         * 1b. Increment pin count for already bound objects.
 741         * 2.  Bind new objects.
 742         * 3.  Decrement pin count.
 743         *
 744         * This avoid unnecessary unbinding of later objects in order to make
 745         * room for the earlier objects *unless* we need to defragment.
 746         */
 747        retry = 0;
 748        do {
 749                int ret = 0;
 750
 751                /* Unbind any ill-fitting objects or pin. */
 752                list_for_each_entry(vma, vmas, exec_list) {
 753                        if (!drm_mm_node_allocated(&vma->node))
 754                                continue;
 755
 756                        if (eb_vma_misplaced(vma))
 757                                ret = i915_vma_unbind(vma);
 758                        else
 759                                ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 760                        if (ret)
 761                                goto err;
 762                }
 763
 764                /* Bind fresh objects */
 765                list_for_each_entry(vma, vmas, exec_list) {
 766                        if (drm_mm_node_allocated(&vma->node))
 767                                continue;
 768
 769                        ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 770                        if (ret)
 771                                goto err;
 772                }
 773
 774err:
 775                if (ret != -ENOSPC || retry++)
 776                        return ret;
 777
 778                /* Decrement pin count for bound objects */
 779                list_for_each_entry(vma, vmas, exec_list)
 780                        i915_gem_execbuffer_unreserve_vma(vma);
 781
 782                ret = i915_gem_evict_vm(vm, true);
 783                if (ret)
 784                        return ret;
 785        } while (1);
 786}
 787
 788static int
 789i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 790                                  struct drm_i915_gem_execbuffer2 *args,
 791                                  struct drm_file *file,
 792                                  struct intel_engine_cs *ring,
 793                                  struct eb_vmas *eb,
 794                                  struct drm_i915_gem_exec_object2 *exec,
 795                                  struct intel_context *ctx)
 796{
 797        struct drm_i915_gem_relocation_entry *reloc;
 798        struct i915_address_space *vm;
 799        struct i915_vma *vma;
 800        bool need_relocs;
 801        int *reloc_offset;
 802        int i, total, ret;
 803        unsigned count = args->buffer_count;
 804
 805        vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 806
 807        /* We may process another execbuffer during the unlock... */
 808        while (!list_empty(&eb->vmas)) {
 809                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
 810                list_del_init(&vma->exec_list);
 811                i915_gem_execbuffer_unreserve_vma(vma);
 812                drm_gem_object_unreference(&vma->obj->base);
 813        }
 814
 815        mutex_unlock(&dev->struct_mutex);
 816
 817        total = 0;
 818        for (i = 0; i < count; i++)
 819                total += exec[i].relocation_count;
 820
 821        reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
 822        reloc = drm_malloc_ab(total, sizeof(*reloc));
 823        if (reloc == NULL || reloc_offset == NULL) {
 824                drm_free_large(reloc);
 825                drm_free_large(reloc_offset);
 826                mutex_lock(&dev->struct_mutex);
 827                return -ENOMEM;
 828        }
 829
 830        total = 0;
 831        for (i = 0; i < count; i++) {
 832                struct drm_i915_gem_relocation_entry __user *user_relocs;
 833                u64 invalid_offset = (u64)-1;
 834                int j;
 835
 836                user_relocs = to_user_ptr(exec[i].relocs_ptr);
 837
 838                if (copy_from_user(reloc+total, user_relocs,
 839                                   exec[i].relocation_count * sizeof(*reloc))) {
 840                        ret = -EFAULT;
 841                        mutex_lock(&dev->struct_mutex);
 842                        goto err;
 843                }
 844
 845                /* As we do not update the known relocation offsets after
 846                 * relocating (due to the complexities in lock handling),
 847                 * we need to mark them as invalid now so that we force the
 848                 * relocation processing next time. Just in case the target
 849                 * object is evicted and then rebound into its old
 850                 * presumed_offset before the next execbuffer - if that
 851                 * happened we would make the mistake of assuming that the
 852                 * relocations were valid.
 853                 */
 854                for (j = 0; j < exec[i].relocation_count; j++) {
 855                        if (__copy_to_user(&user_relocs[j].presumed_offset,
 856                                           &invalid_offset,
 857                                           sizeof(invalid_offset))) {
 858                                ret = -EFAULT;
 859                                mutex_lock(&dev->struct_mutex);
 860                                goto err;
 861                        }
 862                }
 863
 864                reloc_offset[i] = total;
 865                total += exec[i].relocation_count;
 866        }
 867
 868        ret = i915_mutex_lock_interruptible(dev);
 869        if (ret) {
 870                mutex_lock(&dev->struct_mutex);
 871                goto err;
 872        }
 873
 874        /* reacquire the objects */
 875        eb_reset(eb);
 876        ret = eb_lookup_vmas(eb, exec, args, vm, file);
 877        if (ret)
 878                goto err;
 879
 880        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
 881        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 882        if (ret)
 883                goto err;
 884
 885        list_for_each_entry(vma, &eb->vmas, exec_list) {
 886                int offset = vma->exec_entry - exec;
 887                ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
 888                                                            reloc + reloc_offset[offset]);
 889                if (ret)
 890                        goto err;
 891        }
 892
 893        /* Leave the user relocations as are, this is the painfully slow path,
 894         * and we want to avoid the complication of dropping the lock whilst
 895         * having buffers reserved in the aperture and so causing spurious
 896         * ENOSPC for random operations.
 897         */
 898
 899err:
 900        drm_free_large(reloc);
 901        drm_free_large(reloc_offset);
 902        return ret;
 903}
 904
 905static int
 906i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 907                                struct list_head *vmas)
 908{
 909        const unsigned other_rings = ~intel_ring_flag(req->ring);
 910        struct i915_vma *vma;
 911        uint32_t flush_domains = 0;
 912        bool flush_chipset = false;
 913        int ret;
 914
 915        list_for_each_entry(vma, vmas, exec_list) {
 916                struct drm_i915_gem_object *obj = vma->obj;
 917
 918                if (obj->active & other_rings) {
 919                        ret = i915_gem_object_sync(obj, req->ring, &req);
 920                        if (ret)
 921                                return ret;
 922                }
 923
 924                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 925                        flush_chipset |= i915_gem_clflush_object(obj, false);
 926
 927                flush_domains |= obj->base.write_domain;
 928        }
 929
 930        if (flush_chipset)
 931                i915_gem_chipset_flush(req->ring->dev);
 932
 933        if (flush_domains & I915_GEM_DOMAIN_GTT)
 934                wmb();
 935
 936        /* Unconditionally invalidate gpu caches and ensure that we do flush
 937         * any residual writes from the previous batch.
 938         */
 939        return intel_ring_invalidate_all_caches(req);
 940}
 941
 942static bool
 943i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 944{
 945        if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
 946                return false;
 947
 948        /* Kernel clipping was a DRI1 misfeature */
 949        if (exec->num_cliprects || exec->cliprects_ptr)
 950                return false;
 951
 952        if (exec->DR4 == 0xffffffff) {
 953                DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
 954                exec->DR4 = 0;
 955        }
 956        if (exec->DR1 || exec->DR4)
 957                return false;
 958
 959        if ((exec->batch_start_offset | exec->batch_len) & 0x7)
 960                return false;
 961
 962        return true;
 963}
 964
 965static int
 966validate_exec_list(struct drm_device *dev,
 967                   struct drm_i915_gem_exec_object2 *exec,
 968                   int count)
 969{
 970        unsigned relocs_total = 0;
 971        unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 972        unsigned invalid_flags;
 973        int i;
 974
 975        invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
 976        if (USES_FULL_PPGTT(dev))
 977                invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
 978
 979        for (i = 0; i < count; i++) {
 980                char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
 981                int length; /* limited by fault_in_pages_readable() */
 982
 983                if (exec[i].flags & invalid_flags)
 984                        return -EINVAL;
 985
 986                if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
 987                        return -EINVAL;
 988
 989                /* First check for malicious input causing overflow in
 990                 * the worst case where we need to allocate the entire
 991                 * relocation tree as a single array.
 992                 */
 993                if (exec[i].relocation_count > relocs_max - relocs_total)
 994                        return -EINVAL;
 995                relocs_total += exec[i].relocation_count;
 996
 997                length = exec[i].relocation_count *
 998                        sizeof(struct drm_i915_gem_relocation_entry);
 999                /*
1000                 * We must check that the entire relocation array is safe
1001                 * to read, but since we may need to update the presumed
1002                 * offsets during execution, check for full write access.
1003                 */
1004                if (!access_ok(VERIFY_WRITE, ptr, length))
1005                        return -EFAULT;
1006
1007                if (likely(!i915.prefault_disable)) {
1008                        if (fault_in_multipages_readable(ptr, length))
1009                                return -EFAULT;
1010                }
1011        }
1012
1013        return 0;
1014}
1015
1016static struct intel_context *
1017i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1018                          struct intel_engine_cs *ring, const u32 ctx_id)
1019{
1020        struct intel_context *ctx = NULL;
1021        struct i915_ctx_hang_stats *hs;
1022
1023        if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1024                return ERR_PTR(-EINVAL);
1025
1026        ctx = i915_gem_context_get(file->driver_priv, ctx_id);
1027        if (IS_ERR(ctx))
1028                return ctx;
1029
1030        hs = &ctx->hang_stats;
1031        if (hs->banned) {
1032                DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1033                return ERR_PTR(-EIO);
1034        }
1035
1036        if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1037                int ret = intel_lr_context_deferred_alloc(ctx, ring);
1038                if (ret) {
1039                        DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1040                        return ERR_PTR(ret);
1041                }
1042        }
1043
1044        return ctx;
1045}
1046
1047void
1048i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1049                                   struct drm_i915_gem_request *req)
1050{
1051        struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1052        struct i915_vma *vma;
1053
1054        list_for_each_entry(vma, vmas, exec_list) {
1055                struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1056                struct drm_i915_gem_object *obj = vma->obj;
1057                u32 old_read = obj->base.read_domains;
1058                u32 old_write = obj->base.write_domain;
1059
1060                obj->dirty = 1; /* be paranoid  */
1061                obj->base.write_domain = obj->base.pending_write_domain;
1062                if (obj->base.write_domain == 0)
1063                        obj->base.pending_read_domains |= obj->base.read_domains;
1064                obj->base.read_domains = obj->base.pending_read_domains;
1065
1066                i915_vma_move_to_active(vma, req);
1067                if (obj->base.write_domain) {
1068                        i915_gem_request_assign(&obj->last_write_req, req);
1069
1070                        intel_fb_obj_invalidate(obj, ORIGIN_CS);
1071
1072                        /* update for the implicit flush after a batch */
1073                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1074                }
1075                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1076                        i915_gem_request_assign(&obj->last_fenced_req, req);
1077                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1078                                struct drm_i915_private *dev_priv = to_i915(ring->dev);
1079                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1080                                               &dev_priv->mm.fence_list);
1081                        }
1082                }
1083
1084                trace_i915_gem_object_change_domain(obj, old_read, old_write);
1085        }
1086}
1087
1088void
1089i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1090{
1091        /* Unconditionally force add_request to emit a full flush. */
1092        params->ring->gpu_caches_dirty = true;
1093
1094        /* Add a breadcrumb for the completion of the batch buffer */
1095        __i915_add_request(params->request, params->batch_obj, true);
1096}
1097
1098static int
1099i915_reset_gen7_sol_offsets(struct drm_device *dev,
1100                            struct drm_i915_gem_request *req)
1101{
1102        struct intel_engine_cs *ring = req->ring;
1103        struct drm_i915_private *dev_priv = dev->dev_private;
1104        int ret, i;
1105
1106        if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1107                DRM_DEBUG("sol reset is gen7/rcs only\n");
1108                return -EINVAL;
1109        }
1110
1111        ret = intel_ring_begin(req, 4 * 3);
1112        if (ret)
1113                return ret;
1114
1115        for (i = 0; i < 4; i++) {
1116                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1117                intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1118                intel_ring_emit(ring, 0);
1119        }
1120
1121        intel_ring_advance(ring);
1122
1123        return 0;
1124}
1125
1126static struct drm_i915_gem_object*
1127i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1128                          struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1129                          struct eb_vmas *eb,
1130                          struct drm_i915_gem_object *batch_obj,
1131                          u32 batch_start_offset,
1132                          u32 batch_len,
1133                          bool is_master)
1134{
1135        struct drm_i915_gem_object *shadow_batch_obj;
1136        struct i915_vma *vma;
1137        int ret;
1138
1139        shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
1140                                                   PAGE_ALIGN(batch_len));
1141        if (IS_ERR(shadow_batch_obj))
1142                return shadow_batch_obj;
1143
1144        ret = i915_parse_cmds(ring,
1145                              batch_obj,
1146                              shadow_batch_obj,
1147                              batch_start_offset,
1148                              batch_len,
1149                              is_master);
1150        if (ret)
1151                goto err;
1152
1153        ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1154        if (ret)
1155                goto err;
1156
1157        i915_gem_object_unpin_pages(shadow_batch_obj);
1158
1159        memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1160
1161        vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1162        vma->exec_entry = shadow_exec_entry;
1163        vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1164        drm_gem_object_reference(&shadow_batch_obj->base);
1165        list_add_tail(&vma->exec_list, &eb->vmas);
1166
1167        shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1168
1169        return shadow_batch_obj;
1170
1171err:
1172        i915_gem_object_unpin_pages(shadow_batch_obj);
1173        if (ret == -EACCES) /* unhandled chained batch */
1174                return batch_obj;
1175        else
1176                return ERR_PTR(ret);
1177}
1178
1179int
1180i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1181                               struct drm_i915_gem_execbuffer2 *args,
1182                               struct list_head *vmas)
1183{
1184        struct drm_device *dev = params->dev;
1185        struct intel_engine_cs *ring = params->ring;
1186        struct drm_i915_private *dev_priv = dev->dev_private;
1187        u64 exec_start, exec_len;
1188        int instp_mode;
1189        u32 instp_mask;
1190        int ret;
1191
1192        ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1193        if (ret)
1194                return ret;
1195
1196        ret = i915_switch_context(params->request);
1197        if (ret)
1198                return ret;
1199
1200        WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1201             "%s didn't clear reload\n", ring->name);
1202
1203        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1204        instp_mask = I915_EXEC_CONSTANTS_MASK;
1205        switch (instp_mode) {
1206        case I915_EXEC_CONSTANTS_REL_GENERAL:
1207        case I915_EXEC_CONSTANTS_ABSOLUTE:
1208        case I915_EXEC_CONSTANTS_REL_SURFACE:
1209                if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1210                        DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1211                        return -EINVAL;
1212                }
1213
1214                if (instp_mode != dev_priv->relative_constants_mode) {
1215                        if (INTEL_INFO(dev)->gen < 4) {
1216                                DRM_DEBUG("no rel constants on pre-gen4\n");
1217                                return -EINVAL;
1218                        }
1219
1220                        if (INTEL_INFO(dev)->gen > 5 &&
1221                            instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1222                                DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1223                                return -EINVAL;
1224                        }
1225
1226                        /* The HW changed the meaning on this bit on gen6 */
1227                        if (INTEL_INFO(dev)->gen >= 6)
1228                                instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1229                }
1230                break;
1231        default:
1232                DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1233                return -EINVAL;
1234        }
1235
1236        if (ring == &dev_priv->ring[RCS] &&
1237            instp_mode != dev_priv->relative_constants_mode) {
1238                ret = intel_ring_begin(params->request, 4);
1239                if (ret)
1240                        return ret;
1241
1242                intel_ring_emit(ring, MI_NOOP);
1243                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1244                intel_ring_emit(ring, INSTPM);
1245                intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1246                intel_ring_advance(ring);
1247
1248                dev_priv->relative_constants_mode = instp_mode;
1249        }
1250
1251        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1252                ret = i915_reset_gen7_sol_offsets(dev, params->request);
1253                if (ret)
1254                        return ret;
1255        }
1256
1257        exec_len   = args->batch_len;
1258        exec_start = params->batch_obj_vm_offset +
1259                     params->args_batch_start_offset;
1260
1261        ret = ring->dispatch_execbuffer(params->request,
1262                                        exec_start, exec_len,
1263                                        params->dispatch_flags);
1264        if (ret)
1265                return ret;
1266
1267        trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1268
1269        i915_gem_execbuffer_move_to_active(vmas, params->request);
1270        i915_gem_execbuffer_retire_commands(params);
1271
1272        return 0;
1273}
1274
1275/**
1276 * Find one BSD ring to dispatch the corresponding BSD command.
1277 * The Ring ID is returned.
1278 */
1279static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1280                                  struct drm_file *file)
1281{
1282        struct drm_i915_private *dev_priv = dev->dev_private;
1283        struct drm_i915_file_private *file_priv = file->driver_priv;
1284
1285        /* Check whether the file_priv is using one ring */
1286        if (file_priv->bsd_ring)
1287                return file_priv->bsd_ring->id;
1288        else {
1289                /* If no, use the ping-pong mechanism to select one ring */
1290                int ring_id;
1291
1292                mutex_lock(&dev->struct_mutex);
1293                if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1294                        ring_id = VCS;
1295                        dev_priv->mm.bsd_ring_dispatch_index = 1;
1296                } else {
1297                        ring_id = VCS2;
1298                        dev_priv->mm.bsd_ring_dispatch_index = 0;
1299                }
1300                file_priv->bsd_ring = &dev_priv->ring[ring_id];
1301                mutex_unlock(&dev->struct_mutex);
1302                return ring_id;
1303        }
1304}
1305
1306static struct drm_i915_gem_object *
1307eb_get_batch(struct eb_vmas *eb)
1308{
1309        struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1310
1311        /*
1312         * SNA is doing fancy tricks with compressing batch buffers, which leads
1313         * to negative relocation deltas. Usually that works out ok since the
1314         * relocate address is still positive, except when the batch is placed
1315         * very low in the GTT. Ensure this doesn't happen.
1316         *
1317         * Note that actual hangs have only been observed on gen7, but for
1318         * paranoia do it everywhere.
1319         */
1320        vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1321
1322        return vma->obj;
1323}
1324
1325static int
1326i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1327                       struct drm_file *file,
1328                       struct drm_i915_gem_execbuffer2 *args,
1329                       struct drm_i915_gem_exec_object2 *exec)
1330{
1331        struct drm_i915_private *dev_priv = dev->dev_private;
1332        struct eb_vmas *eb;
1333        struct drm_i915_gem_object *batch_obj;
1334        struct drm_i915_gem_exec_object2 shadow_exec_entry;
1335        struct intel_engine_cs *ring;
1336        struct intel_context *ctx;
1337        struct i915_address_space *vm;
1338        struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1339        struct i915_execbuffer_params *params = &params_master;
1340        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1341        u32 dispatch_flags;
1342        int ret;
1343        bool need_relocs;
1344
1345        if (!i915_gem_check_execbuffer(args))
1346                return -EINVAL;
1347
1348        ret = validate_exec_list(dev, exec, args->buffer_count);
1349        if (ret)
1350                return ret;
1351
1352        dispatch_flags = 0;
1353        if (args->flags & I915_EXEC_SECURE) {
1354                if (!file->is_master || !capable(CAP_SYS_ADMIN))
1355                    return -EPERM;
1356
1357                dispatch_flags |= I915_DISPATCH_SECURE;
1358        }
1359        if (args->flags & I915_EXEC_IS_PINNED)
1360                dispatch_flags |= I915_DISPATCH_PINNED;
1361
1362        if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1363                DRM_DEBUG("execbuf with unknown ring: %d\n",
1364                          (int)(args->flags & I915_EXEC_RING_MASK));
1365                return -EINVAL;
1366        }
1367
1368        if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
1369            ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1370                DRM_DEBUG("execbuf with non bsd ring but with invalid "
1371                        "bsd dispatch flags: %d\n", (int)(args->flags));
1372                return -EINVAL;
1373        } 
1374
1375        if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1376                ring = &dev_priv->ring[RCS];
1377        else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1378                if (HAS_BSD2(dev)) {
1379                        int ring_id;
1380
1381                        switch (args->flags & I915_EXEC_BSD_MASK) {
1382                        case I915_EXEC_BSD_DEFAULT:
1383                                ring_id = gen8_dispatch_bsd_ring(dev, file);
1384                                ring = &dev_priv->ring[ring_id];
1385                                break;
1386                        case I915_EXEC_BSD_RING1:
1387                                ring = &dev_priv->ring[VCS];
1388                                break;
1389                        case I915_EXEC_BSD_RING2:
1390                                ring = &dev_priv->ring[VCS2];
1391                                break;
1392                        default:
1393                                DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
1394                                          (int)(args->flags & I915_EXEC_BSD_MASK));
1395                                return -EINVAL;
1396                        }
1397                } else
1398                        ring = &dev_priv->ring[VCS];
1399        } else
1400                ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1401
1402        if (!intel_ring_initialized(ring)) {
1403                DRM_DEBUG("execbuf with invalid ring: %d\n",
1404                          (int)(args->flags & I915_EXEC_RING_MASK));
1405                return -EINVAL;
1406        }
1407
1408        if (args->buffer_count < 1) {
1409                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1410                return -EINVAL;
1411        }
1412
1413        if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1414                if (!HAS_RESOURCE_STREAMER(dev)) {
1415                        DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1416                        return -EINVAL;
1417                }
1418                if (ring->id != RCS) {
1419                        DRM_DEBUG("RS is not available on %s\n",
1420                                 ring->name);
1421                        return -EINVAL;
1422                }
1423
1424                dispatch_flags |= I915_DISPATCH_RS;
1425        }
1426
1427        intel_runtime_pm_get(dev_priv);
1428
1429        ret = i915_mutex_lock_interruptible(dev);
1430        if (ret)
1431                goto pre_mutex_err;
1432
1433        ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1434        if (IS_ERR(ctx)) {
1435                mutex_unlock(&dev->struct_mutex);
1436                ret = PTR_ERR(ctx);
1437                goto pre_mutex_err;
1438        }
1439
1440        i915_gem_context_reference(ctx);
1441
1442        if (ctx->ppgtt)
1443                vm = &ctx->ppgtt->base;
1444        else
1445                vm = &dev_priv->gtt.base;
1446
1447        memset(&params_master, 0x00, sizeof(params_master));
1448
1449        eb = eb_create(args);
1450        if (eb == NULL) {
1451                i915_gem_context_unreference(ctx);
1452                mutex_unlock(&dev->struct_mutex);
1453                ret = -ENOMEM;
1454                goto pre_mutex_err;
1455        }
1456
1457        /* Look up object handles */
1458        ret = eb_lookup_vmas(eb, exec, args, vm, file);
1459        if (ret)
1460                goto err;
1461
1462        /* take note of the batch buffer before we might reorder the lists */
1463        batch_obj = eb_get_batch(eb);
1464
1465        /* Move the objects en-masse into the GTT, evicting if necessary. */
1466        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1467        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
1468        if (ret)
1469                goto err;
1470
1471        /* The objects are in their final locations, apply the relocations. */
1472        if (need_relocs)
1473                ret = i915_gem_execbuffer_relocate(eb);
1474        if (ret) {
1475                if (ret == -EFAULT) {
1476                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1477                                                                eb, exec, ctx);
1478                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1479                }
1480                if (ret)
1481                        goto err;
1482        }
1483
1484        /* Set the pending read domains for the batch buffer to COMMAND */
1485        if (batch_obj->base.pending_write_domain) {
1486                DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1487                ret = -EINVAL;
1488                goto err;
1489        }
1490
1491        params->args_batch_start_offset = args->batch_start_offset;
1492        if (i915_needs_cmd_parser(ring) && args->batch_len) {
1493                struct drm_i915_gem_object *parsed_batch_obj;
1494
1495                parsed_batch_obj = i915_gem_execbuffer_parse(ring,
1496                                                      &shadow_exec_entry,
1497                                                      eb,
1498                                                      batch_obj,
1499                                                      args->batch_start_offset,
1500                                                      args->batch_len,
1501                                                      file->is_master);
1502                if (IS_ERR(parsed_batch_obj)) {
1503                        ret = PTR_ERR(parsed_batch_obj);
1504                        goto err;
1505                }
1506
1507                /*
1508                 * parsed_batch_obj == batch_obj means batch not fully parsed:
1509                 * Accept, but don't promote to secure.
1510                 */
1511
1512                if (parsed_batch_obj != batch_obj) {
1513                        /*
1514                         * Batch parsed and accepted:
1515                         *
1516                         * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1517                         * bit from MI_BATCH_BUFFER_START commands issued in
1518                         * the dispatch_execbuffer implementations. We
1519                         * specifically don't want that set on batches the
1520                         * command parser has accepted.
1521                         */
1522                        dispatch_flags |= I915_DISPATCH_SECURE;
1523                        params->args_batch_start_offset = 0;
1524                        batch_obj = parsed_batch_obj;
1525                }
1526        }
1527
1528        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1529
1530        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1531         * batch" bit. Hence we need to pin secure batches into the global gtt.
1532         * hsw should have this fixed, but bdw mucks it up again. */
1533        if (dispatch_flags & I915_DISPATCH_SECURE) {
1534                /*
1535                 * So on first glance it looks freaky that we pin the batch here
1536                 * outside of the reservation loop. But:
1537                 * - The batch is already pinned into the relevant ppgtt, so we
1538                 *   already have the backing storage fully allocated.
1539                 * - No other BO uses the global gtt (well contexts, but meh),
1540                 *   so we don't really have issues with multiple objects not
1541                 *   fitting due to fragmentation.
1542                 * So this is actually safe.
1543                 */
1544                ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1545                if (ret)
1546                        goto err;
1547
1548                params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1549        } else
1550                params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1551
1552        /* Allocate a request for this batch buffer nice and early. */
1553        ret = i915_gem_request_alloc(ring, ctx, &params->request);
1554        if (ret)
1555                goto err_batch_unpin;
1556
1557        ret = i915_gem_request_add_to_client(params->request, file);
1558        if (ret)
1559                goto err_batch_unpin;
1560
1561        /*
1562         * Save assorted stuff away to pass through to *_submission().
1563         * NB: This data should be 'persistent' and not local as it will
1564         * kept around beyond the duration of the IOCTL once the GPU
1565         * scheduler arrives.
1566         */
1567        params->dev                     = dev;
1568        params->file                    = file;
1569        params->ring                    = ring;
1570        params->dispatch_flags          = dispatch_flags;
1571        params->batch_obj               = batch_obj;
1572        params->ctx                     = ctx;
1573
1574        ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1575
1576err_batch_unpin:
1577        /*
1578         * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1579         * batch vma for correctness. For less ugly and less fragility this
1580         * needs to be adjusted to also track the ggtt batch vma properly as
1581         * active.
1582         */
1583        if (dispatch_flags & I915_DISPATCH_SECURE)
1584                i915_gem_object_ggtt_unpin(batch_obj);
1585
1586err:
1587        /* the request owns the ref now */
1588        i915_gem_context_unreference(ctx);
1589        eb_destroy(eb);
1590
1591        /*
1592         * If the request was created but not successfully submitted then it
1593         * must be freed again. If it was submitted then it is being tracked
1594         * on the active request list and no clean up is required here.
1595         */
1596        if (ret && params->request)
1597                i915_gem_request_cancel(params->request);
1598
1599        mutex_unlock(&dev->struct_mutex);
1600
1601pre_mutex_err:
1602        /* intel_gpu_busy should also get a ref, so it will free when the device
1603         * is really idle. */
1604        intel_runtime_pm_put(dev_priv);
1605        return ret;
1606}
1607
1608/*
1609 * Legacy execbuffer just creates an exec2 list from the original exec object
1610 * list array and passes it to the real function.
1611 */
1612int
1613i915_gem_execbuffer(struct drm_device *dev, void *data,
1614                    struct drm_file *file)
1615{
1616        struct drm_i915_gem_execbuffer *args = data;
1617        struct drm_i915_gem_execbuffer2 exec2;
1618        struct drm_i915_gem_exec_object *exec_list = NULL;
1619        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1620        int ret, i;
1621
1622        if (args->buffer_count < 1) {
1623                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1624                return -EINVAL;
1625        }
1626
1627        /* Copy in the exec list from userland */
1628        exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1629        exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1630        if (exec_list == NULL || exec2_list == NULL) {
1631                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1632                          args->buffer_count);
1633                drm_free_large(exec_list);
1634                drm_free_large(exec2_list);
1635                return -ENOMEM;
1636        }
1637        ret = copy_from_user(exec_list,
1638                             to_user_ptr(args->buffers_ptr),
1639                             sizeof(*exec_list) * args->buffer_count);
1640        if (ret != 0) {
1641                DRM_DEBUG("copy %d exec entries failed %d\n",
1642                          args->buffer_count, ret);
1643                drm_free_large(exec_list);
1644                drm_free_large(exec2_list);
1645                return -EFAULT;
1646        }
1647
1648        for (i = 0; i < args->buffer_count; i++) {
1649                exec2_list[i].handle = exec_list[i].handle;
1650                exec2_list[i].relocation_count = exec_list[i].relocation_count;
1651                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1652                exec2_list[i].alignment = exec_list[i].alignment;
1653                exec2_list[i].offset = exec_list[i].offset;
1654                if (INTEL_INFO(dev)->gen < 4)
1655                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1656                else
1657                        exec2_list[i].flags = 0;
1658        }
1659
1660        exec2.buffers_ptr = args->buffers_ptr;
1661        exec2.buffer_count = args->buffer_count;
1662        exec2.batch_start_offset = args->batch_start_offset;
1663        exec2.batch_len = args->batch_len;
1664        exec2.DR1 = args->DR1;
1665        exec2.DR4 = args->DR4;
1666        exec2.num_cliprects = args->num_cliprects;
1667        exec2.cliprects_ptr = args->cliprects_ptr;
1668        exec2.flags = I915_EXEC_RENDER;
1669        i915_execbuffer2_set_context_id(exec2, 0);
1670
1671        ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1672        if (!ret) {
1673                struct drm_i915_gem_exec_object __user *user_exec_list =
1674                        to_user_ptr(args->buffers_ptr);
1675
1676                /* Copy the new buffer offsets back to the user's exec list. */
1677                for (i = 0; i < args->buffer_count; i++) {
1678                        ret = __copy_to_user(&user_exec_list[i].offset,
1679                                             &exec2_list[i].offset,
1680                                             sizeof(user_exec_list[i].offset));
1681                        if (ret) {
1682                                ret = -EFAULT;
1683                                DRM_DEBUG("failed to copy %d exec entries "
1684                                          "back to user (%d)\n",
1685                                          args->buffer_count, ret);
1686                                break;
1687                        }
1688                }
1689        }
1690
1691        drm_free_large(exec_list);
1692        drm_free_large(exec2_list);
1693        return ret;
1694}
1695
1696int
1697i915_gem_execbuffer2(struct drm_device *dev, void *data,
1698                     struct drm_file *file)
1699{
1700        struct drm_i915_gem_execbuffer2 *args = data;
1701        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1702        int ret;
1703
1704        if (args->buffer_count < 1 ||
1705            args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1706                DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1707                return -EINVAL;
1708        }
1709
1710        if (args->rsvd2 != 0) {
1711                DRM_DEBUG("dirty rvsd2 field\n");
1712                return -EINVAL;
1713        }
1714
1715        exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1716                             GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1717        if (exec2_list == NULL)
1718                exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1719                                           args->buffer_count);
1720        if (exec2_list == NULL) {
1721                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1722                          args->buffer_count);
1723                return -ENOMEM;
1724        }
1725        ret = copy_from_user(exec2_list,
1726                             to_user_ptr(args->buffers_ptr),
1727                             sizeof(*exec2_list) * args->buffer_count);
1728        if (ret != 0) {
1729                DRM_DEBUG("copy %d exec entries failed %d\n",
1730                          args->buffer_count, ret);
1731                drm_free_large(exec2_list);
1732                return -EFAULT;
1733        }
1734
1735        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1736        if (!ret) {
1737                /* Copy the new buffer offsets back to the user's exec list. */
1738                struct drm_i915_gem_exec_object2 __user *user_exec_list =
1739                                   to_user_ptr(args->buffers_ptr);
1740                int i;
1741
1742                for (i = 0; i < args->buffer_count; i++) {
1743                        ret = __copy_to_user(&user_exec_list[i].offset,
1744                                             &exec2_list[i].offset,
1745                                             sizeof(user_exec_list[i].offset));
1746                        if (ret) {
1747                                ret = -EFAULT;
1748                                DRM_DEBUG("failed to copy %d exec entries "
1749                                          "back to user\n",
1750                                          args->buffer_count);
1751                                break;
1752                        }
1753                }
1754        }
1755
1756        drm_free_large(exec2_list);
1757        return ret;
1758}
1759