linux/drivers/gpu/drm/i915/i915_gem_execbuffer.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008,2010 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Chris Wilson <chris@chris-wilson.co.uk>
  26 *
  27 */
  28
  29#include <drm/drmP.h>
  30#include <drm/i915_drm.h>
  31#include "i915_drv.h"
  32#include "i915_trace.h"
  33#include "intel_drv.h"
  34#include <linux/dma_remapping.h>
  35#include <linux/uaccess.h>
  36
  37#define  __EXEC_OBJECT_HAS_PIN (1<<31)
  38#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
  39#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
  40#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
  41
  42#define BATCH_OFFSET_BIAS (256*1024)
  43
  44struct eb_vmas {
  45        struct list_head vmas;
  46        int and;
  47        union {
  48                struct i915_vma *lut[0];
  49                struct hlist_head buckets[0];
  50        };
  51};
  52
  53static struct eb_vmas *
  54eb_create(struct drm_i915_gem_execbuffer2 *args)
  55{
  56        struct eb_vmas *eb = NULL;
  57
  58        if (args->flags & I915_EXEC_HANDLE_LUT) {
  59                unsigned size = args->buffer_count;
  60                size *= sizeof(struct i915_vma *);
  61                size += sizeof(struct eb_vmas);
  62                eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  63        }
  64
  65        if (eb == NULL) {
  66                unsigned size = args->buffer_count;
  67                unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  68                BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  69                while (count > 2*size)
  70                        count >>= 1;
  71                eb = kzalloc(count*sizeof(struct hlist_head) +
  72                             sizeof(struct eb_vmas),
  73                             GFP_TEMPORARY);
  74                if (eb == NULL)
  75                        return eb;
  76
  77                eb->and = count - 1;
  78        } else
  79                eb->and = -args->buffer_count;
  80
  81        INIT_LIST_HEAD(&eb->vmas);
  82        return eb;
  83}
  84
  85static void
  86eb_reset(struct eb_vmas *eb)
  87{
  88        if (eb->and >= 0)
  89                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  90}
  91
  92static int
  93eb_lookup_vmas(struct eb_vmas *eb,
  94               struct drm_i915_gem_exec_object2 *exec,
  95               const struct drm_i915_gem_execbuffer2 *args,
  96               struct i915_address_space *vm,
  97               struct drm_file *file)
  98{
  99        struct drm_i915_gem_object *obj;
 100        struct list_head objects;
 101        int i, ret;
 102
 103        INIT_LIST_HEAD(&objects);
 104        spin_lock(&file->table_lock);
 105        /* Grab a reference to the object and release the lock so we can lookup
 106         * or create the VMA without using GFP_ATOMIC */
 107        for (i = 0; i < args->buffer_count; i++) {
 108                obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 109                if (obj == NULL) {
 110                        spin_unlock(&file->table_lock);
 111                        DRM_DEBUG("Invalid object handle %d at index %d\n",
 112                                   exec[i].handle, i);
 113                        ret = -ENOENT;
 114                        goto err;
 115                }
 116
 117                if (!list_empty(&obj->obj_exec_link)) {
 118                        spin_unlock(&file->table_lock);
 119                        DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 120                                   obj, exec[i].handle, i);
 121                        ret = -EINVAL;
 122                        goto err;
 123                }
 124
 125                drm_gem_object_reference(&obj->base);
 126                list_add_tail(&obj->obj_exec_link, &objects);
 127        }
 128        spin_unlock(&file->table_lock);
 129
 130        i = 0;
 131        while (!list_empty(&objects)) {
 132                struct i915_vma *vma;
 133
 134                obj = list_first_entry(&objects,
 135                                       struct drm_i915_gem_object,
 136                                       obj_exec_link);
 137
 138                /*
 139                 * NOTE: We can leak any vmas created here when something fails
 140                 * later on. But that's no issue since vma_unbind can deal with
 141                 * vmas which are not actually bound. And since only
 142                 * lookup_or_create exists as an interface to get at the vma
 143                 * from the (obj, vm) we don't run the risk of creating
 144                 * duplicated vmas for the same vm.
 145                 */
 146                vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
 147                if (IS_ERR(vma)) {
 148                        DRM_DEBUG("Failed to lookup VMA\n");
 149                        ret = PTR_ERR(vma);
 150                        goto err;
 151                }
 152
 153                /* Transfer ownership from the objects list to the vmas list. */
 154                list_add_tail(&vma->exec_list, &eb->vmas);
 155                list_del_init(&obj->obj_exec_link);
 156
 157                vma->exec_entry = &exec[i];
 158                if (eb->and < 0) {
 159                        eb->lut[i] = vma;
 160                } else {
 161                        uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
 162                        vma->exec_handle = handle;
 163                        hlist_add_head(&vma->exec_node,
 164                                       &eb->buckets[handle & eb->and]);
 165                }
 166                ++i;
 167        }
 168
 169        return 0;
 170
 171
 172err:
 173        while (!list_empty(&objects)) {
 174                obj = list_first_entry(&objects,
 175                                       struct drm_i915_gem_object,
 176                                       obj_exec_link);
 177                list_del_init(&obj->obj_exec_link);
 178                drm_gem_object_unreference(&obj->base);
 179        }
 180        /*
 181         * Objects already transfered to the vmas list will be unreferenced by
 182         * eb_destroy.
 183         */
 184
 185        return ret;
 186}
 187
 188static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 189{
 190        if (eb->and < 0) {
 191                if (handle >= -eb->and)
 192                        return NULL;
 193                return eb->lut[handle];
 194        } else {
 195                struct hlist_head *head;
 196                struct i915_vma *vma;
 197
 198                head = &eb->buckets[handle & eb->and];
 199                hlist_for_each_entry(vma, head, exec_node) {
 200                        if (vma->exec_handle == handle)
 201                                return vma;
 202                }
 203                return NULL;
 204        }
 205}
 206
 207static void
 208i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 209{
 210        struct drm_i915_gem_exec_object2 *entry;
 211        struct drm_i915_gem_object *obj = vma->obj;
 212
 213        if (!drm_mm_node_allocated(&vma->node))
 214                return;
 215
 216        entry = vma->exec_entry;
 217
 218        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 219                i915_gem_object_unpin_fence(obj);
 220
 221        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
 222                vma->pin_count--;
 223
 224        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 225}
 226
 227static void eb_destroy(struct eb_vmas *eb)
 228{
 229        while (!list_empty(&eb->vmas)) {
 230                struct i915_vma *vma;
 231
 232                vma = list_first_entry(&eb->vmas,
 233                                       struct i915_vma,
 234                                       exec_list);
 235                list_del_init(&vma->exec_list);
 236                i915_gem_execbuffer_unreserve_vma(vma);
 237                drm_gem_object_unreference(&vma->obj->base);
 238        }
 239        kfree(eb);
 240}
 241
 242static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 243{
 244        return (HAS_LLC(obj->base.dev) ||
 245                obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 246                obj->cache_level != I915_CACHE_NONE);
 247}
 248
 249/* Used to convert any address to canonical form.
 250 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 251 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 252 * addresses to be in a canonical form:
 253 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 254 * canonical form [63:48] == [47]."
 255 */
 256#define GEN8_HIGH_ADDRESS_BIT 47
 257static inline uint64_t gen8_canonical_addr(uint64_t address)
 258{
 259        return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
 260}
 261
 262static inline uint64_t gen8_noncanonical_addr(uint64_t address)
 263{
 264        return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
 265}
 266
 267static inline uint64_t
 268relocation_target(struct drm_i915_gem_relocation_entry *reloc,
 269                  uint64_t target_offset)
 270{
 271        return gen8_canonical_addr((int)reloc->delta + target_offset);
 272}
 273
 274static int
 275relocate_entry_cpu(struct drm_i915_gem_object *obj,
 276                   struct drm_i915_gem_relocation_entry *reloc,
 277                   uint64_t target_offset)
 278{
 279        struct drm_device *dev = obj->base.dev;
 280        uint32_t page_offset = offset_in_page(reloc->offset);
 281        uint64_t delta = relocation_target(reloc, target_offset);
 282        char *vaddr;
 283        int ret;
 284
 285        ret = i915_gem_object_set_to_cpu_domain(obj, true);
 286        if (ret)
 287                return ret;
 288
 289        vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
 290                                reloc->offset >> PAGE_SHIFT));
 291        *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
 292
 293        if (INTEL_INFO(dev)->gen >= 8) {
 294                page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 295
 296                if (page_offset == 0) {
 297                        kunmap_atomic(vaddr);
 298                        vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
 299                            (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
 300                }
 301
 302                *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
 303        }
 304
 305        kunmap_atomic(vaddr);
 306
 307        return 0;
 308}
 309
 310static int
 311relocate_entry_gtt(struct drm_i915_gem_object *obj,
 312                   struct drm_i915_gem_relocation_entry *reloc,
 313                   uint64_t target_offset)
 314{
 315        struct drm_device *dev = obj->base.dev;
 316        struct drm_i915_private *dev_priv = dev->dev_private;
 317        uint64_t delta = relocation_target(reloc, target_offset);
 318        uint64_t offset;
 319        void __iomem *reloc_page;
 320        int ret;
 321
 322        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 323        if (ret)
 324                return ret;
 325
 326        ret = i915_gem_object_put_fence(obj);
 327        if (ret)
 328                return ret;
 329
 330        /* Map the page containing the relocation we're going to perform.  */
 331        offset = i915_gem_obj_ggtt_offset(obj);
 332        offset += reloc->offset;
 333        reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 334                                              offset & PAGE_MASK);
 335        iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 336
 337        if (INTEL_INFO(dev)->gen >= 8) {
 338                offset += sizeof(uint32_t);
 339
 340                if (offset_in_page(offset) == 0) {
 341                        io_mapping_unmap_atomic(reloc_page);
 342                        reloc_page =
 343                                io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 344                                                         offset);
 345                }
 346
 347                iowrite32(upper_32_bits(delta),
 348                          reloc_page + offset_in_page(offset));
 349        }
 350
 351        io_mapping_unmap_atomic(reloc_page);
 352
 353        return 0;
 354}
 355
 356static void
 357clflush_write32(void *addr, uint32_t value)
 358{
 359        /* This is not a fast path, so KISS. */
 360        drm_clflush_virt_range(addr, sizeof(uint32_t));
 361        *(uint32_t *)addr = value;
 362        drm_clflush_virt_range(addr, sizeof(uint32_t));
 363}
 364
 365static int
 366relocate_entry_clflush(struct drm_i915_gem_object *obj,
 367                       struct drm_i915_gem_relocation_entry *reloc,
 368                       uint64_t target_offset)
 369{
 370        struct drm_device *dev = obj->base.dev;
 371        uint32_t page_offset = offset_in_page(reloc->offset);
 372        uint64_t delta = relocation_target(reloc, target_offset);
 373        char *vaddr;
 374        int ret;
 375
 376        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 377        if (ret)
 378                return ret;
 379
 380        vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
 381                                reloc->offset >> PAGE_SHIFT));
 382        clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 383
 384        if (INTEL_INFO(dev)->gen >= 8) {
 385                page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 386
 387                if (page_offset == 0) {
 388                        kunmap_atomic(vaddr);
 389                        vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
 390                            (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
 391                }
 392
 393                clflush_write32(vaddr + page_offset, upper_32_bits(delta));
 394        }
 395
 396        kunmap_atomic(vaddr);
 397
 398        return 0;
 399}
 400
 401static int
 402i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 403                                   struct eb_vmas *eb,
 404                                   struct drm_i915_gem_relocation_entry *reloc)
 405{
 406        struct drm_device *dev = obj->base.dev;
 407        struct drm_gem_object *target_obj;
 408        struct drm_i915_gem_object *target_i915_obj;
 409        struct i915_vma *target_vma;
 410        uint64_t target_offset;
 411        int ret;
 412
 413        /* we've already hold a reference to all valid objects */
 414        target_vma = eb_get_vma(eb, reloc->target_handle);
 415        if (unlikely(target_vma == NULL))
 416                return -ENOENT;
 417        target_i915_obj = target_vma->obj;
 418        target_obj = &target_vma->obj->base;
 419
 420        target_offset = gen8_canonical_addr(target_vma->node.start);
 421
 422        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 423         * pipe_control writes because the gpu doesn't properly redirect them
 424         * through the ppgtt for non_secure batchbuffers. */
 425        if (unlikely(IS_GEN6(dev) &&
 426            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
 427                ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
 428                                    PIN_GLOBAL);
 429                if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
 430                        return ret;
 431        }
 432
 433        /* Validate that the target is in a valid r/w GPU domain */
 434        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 435                DRM_DEBUG("reloc with multiple write domains: "
 436                          "obj %p target %d offset %d "
 437                          "read %08x write %08x",
 438                          obj, reloc->target_handle,
 439                          (int) reloc->offset,
 440                          reloc->read_domains,
 441                          reloc->write_domain);
 442                return -EINVAL;
 443        }
 444        if (unlikely((reloc->write_domain | reloc->read_domains)
 445                     & ~I915_GEM_GPU_DOMAINS)) {
 446                DRM_DEBUG("reloc with read/write non-GPU domains: "
 447                          "obj %p target %d offset %d "
 448                          "read %08x write %08x",
 449                          obj, reloc->target_handle,
 450                          (int) reloc->offset,
 451                          reloc->read_domains,
 452                          reloc->write_domain);
 453                return -EINVAL;
 454        }
 455
 456        target_obj->pending_read_domains |= reloc->read_domains;
 457        target_obj->pending_write_domain |= reloc->write_domain;
 458
 459        /* If the relocation already has the right value in it, no
 460         * more work needs to be done.
 461         */
 462        if (target_offset == reloc->presumed_offset)
 463                return 0;
 464
 465        /* Check that the relocation address is valid... */
 466        if (unlikely(reloc->offset >
 467                obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
 468                DRM_DEBUG("Relocation beyond object bounds: "
 469                          "obj %p target %d offset %d size %d.\n",
 470                          obj, reloc->target_handle,
 471                          (int) reloc->offset,
 472                          (int) obj->base.size);
 473                return -EINVAL;
 474        }
 475        if (unlikely(reloc->offset & 3)) {
 476                DRM_DEBUG("Relocation not 4-byte aligned: "
 477                          "obj %p target %d offset %d.\n",
 478                          obj, reloc->target_handle,
 479                          (int) reloc->offset);
 480                return -EINVAL;
 481        }
 482
 483        /* We can't wait for rendering with pagefaults disabled */
 484        if (obj->active && pagefault_disabled())
 485                return -EFAULT;
 486
 487        if (use_cpu_reloc(obj))
 488                ret = relocate_entry_cpu(obj, reloc, target_offset);
 489        else if (obj->map_and_fenceable)
 490                ret = relocate_entry_gtt(obj, reloc, target_offset);
 491        else if (cpu_has_clflush)
 492                ret = relocate_entry_clflush(obj, reloc, target_offset);
 493        else {
 494                WARN_ONCE(1, "Impossible case in relocation handling\n");
 495                ret = -ENODEV;
 496        }
 497
 498        if (ret)
 499                return ret;
 500
 501        /* and update the user's relocation entry */
 502        reloc->presumed_offset = target_offset;
 503
 504        return 0;
 505}
 506
 507static int
 508i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 509                                 struct eb_vmas *eb)
 510{
 511#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 512        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 513        struct drm_i915_gem_relocation_entry __user *user_relocs;
 514        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 515        int remain, ret;
 516
 517        user_relocs = to_user_ptr(entry->relocs_ptr);
 518
 519        remain = entry->relocation_count;
 520        while (remain) {
 521                struct drm_i915_gem_relocation_entry *r = stack_reloc;
 522                int count = remain;
 523                if (count > ARRAY_SIZE(stack_reloc))
 524                        count = ARRAY_SIZE(stack_reloc);
 525                remain -= count;
 526
 527                if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
 528                        return -EFAULT;
 529
 530                do {
 531                        u64 offset = r->presumed_offset;
 532
 533                        ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
 534                        if (ret)
 535                                return ret;
 536
 537                        if (r->presumed_offset != offset &&
 538                            __copy_to_user_inatomic(&user_relocs->presumed_offset,
 539                                                    &r->presumed_offset,
 540                                                    sizeof(r->presumed_offset))) {
 541                                return -EFAULT;
 542                        }
 543
 544                        user_relocs++;
 545                        r++;
 546                } while (--count);
 547        }
 548
 549        return 0;
 550#undef N_RELOC
 551}
 552
 553static int
 554i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 555                                      struct eb_vmas *eb,
 556                                      struct drm_i915_gem_relocation_entry *relocs)
 557{
 558        const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 559        int i, ret;
 560
 561        for (i = 0; i < entry->relocation_count; i++) {
 562                ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
 563                if (ret)
 564                        return ret;
 565        }
 566
 567        return 0;
 568}
 569
 570static int
 571i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 572{
 573        struct i915_vma *vma;
 574        int ret = 0;
 575
 576        /* This is the fast path and we cannot handle a pagefault whilst
 577         * holding the struct mutex lest the user pass in the relocations
 578         * contained within a mmaped bo. For in such a case we, the page
 579         * fault handler would call i915_gem_fault() and we would try to
 580         * acquire the struct mutex again. Obviously this is bad and so
 581         * lockdep complains vehemently.
 582         */
 583        pagefault_disable();
 584        list_for_each_entry(vma, &eb->vmas, exec_list) {
 585                ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 586                if (ret)
 587                        break;
 588        }
 589        pagefault_enable();
 590
 591        return ret;
 592}
 593
 594static bool only_mappable_for_reloc(unsigned int flags)
 595{
 596        return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
 597                __EXEC_OBJECT_NEEDS_MAP;
 598}
 599
 600static int
 601i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 602                                struct intel_engine_cs *ring,
 603                                bool *need_reloc)
 604{
 605        struct drm_i915_gem_object *obj = vma->obj;
 606        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 607        uint64_t flags;
 608        int ret;
 609
 610        flags = PIN_USER;
 611        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
 612                flags |= PIN_GLOBAL;
 613
 614        if (!drm_mm_node_allocated(&vma->node)) {
 615                /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
 616                 * limit address to the first 4GBs for unflagged objects.
 617                 */
 618                if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
 619                        flags |= PIN_ZONE_4G;
 620                if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
 621                        flags |= PIN_GLOBAL | PIN_MAPPABLE;
 622                if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
 623                        flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
 624                if (entry->flags & EXEC_OBJECT_PINNED)
 625                        flags |= entry->offset | PIN_OFFSET_FIXED;
 626                if ((flags & PIN_MAPPABLE) == 0)
 627                        flags |= PIN_HIGH;
 628        }
 629
 630        ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
 631        if ((ret == -ENOSPC  || ret == -E2BIG) &&
 632            only_mappable_for_reloc(entry->flags))
 633                ret = i915_gem_object_pin(obj, vma->vm,
 634                                          entry->alignment,
 635                                          flags & ~PIN_MAPPABLE);
 636        if (ret)
 637                return ret;
 638
 639        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 640
 641        if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 642                ret = i915_gem_object_get_fence(obj);
 643                if (ret)
 644                        return ret;
 645
 646                if (i915_gem_object_pin_fence(obj))
 647                        entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 648        }
 649
 650        if (entry->offset != vma->node.start) {
 651                entry->offset = vma->node.start;
 652                *need_reloc = true;
 653        }
 654
 655        if (entry->flags & EXEC_OBJECT_WRITE) {
 656                obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
 657                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
 658        }
 659
 660        return 0;
 661}
 662
 663static bool
 664need_reloc_mappable(struct i915_vma *vma)
 665{
 666        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 667
 668        if (entry->relocation_count == 0)
 669                return false;
 670
 671        if (!vma->is_ggtt)
 672                return false;
 673
 674        /* See also use_cpu_reloc() */
 675        if (HAS_LLC(vma->obj->base.dev))
 676                return false;
 677
 678        if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
 679                return false;
 680
 681        return true;
 682}
 683
 684static bool
 685eb_vma_misplaced(struct i915_vma *vma)
 686{
 687        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 688        struct drm_i915_gem_object *obj = vma->obj;
 689
 690        WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
 691
 692        if (entry->alignment &&
 693            vma->node.start & (entry->alignment - 1))
 694                return true;
 695
 696        if (entry->flags & EXEC_OBJECT_PINNED &&
 697            vma->node.start != entry->offset)
 698                return true;
 699
 700        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
 701            vma->node.start < BATCH_OFFSET_BIAS)
 702                return true;
 703
 704        /* avoid costly ping-pong once a batch bo ended up non-mappable */
 705        if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
 706                return !only_mappable_for_reloc(entry->flags);
 707
 708        if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
 709            (vma->node.start + vma->node.size - 1) >> 32)
 710                return true;
 711
 712        return false;
 713}
 714
 715static int
 716i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 717                            struct list_head *vmas,
 718                            struct intel_context *ctx,
 719                            bool *need_relocs)
 720{
 721        struct drm_i915_gem_object *obj;
 722        struct i915_vma *vma;
 723        struct i915_address_space *vm;
 724        struct list_head ordered_vmas;
 725        struct list_head pinned_vmas;
 726        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 727        int retry;
 728
 729        i915_gem_retire_requests_ring(ring);
 730
 731        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 732
 733        INIT_LIST_HEAD(&ordered_vmas);
 734        INIT_LIST_HEAD(&pinned_vmas);
 735        while (!list_empty(vmas)) {
 736                struct drm_i915_gem_exec_object2 *entry;
 737                bool need_fence, need_mappable;
 738
 739                vma = list_first_entry(vmas, struct i915_vma, exec_list);
 740                obj = vma->obj;
 741                entry = vma->exec_entry;
 742
 743                if (ctx->flags & CONTEXT_NO_ZEROMAP)
 744                        entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
 745
 746                if (!has_fenced_gpu_access)
 747                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
 748                need_fence =
 749                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 750                        obj->tiling_mode != I915_TILING_NONE;
 751                need_mappable = need_fence || need_reloc_mappable(vma);
 752
 753                if (entry->flags & EXEC_OBJECT_PINNED)
 754                        list_move_tail(&vma->exec_list, &pinned_vmas);
 755                else if (need_mappable) {
 756                        entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
 757                        list_move(&vma->exec_list, &ordered_vmas);
 758                } else
 759                        list_move_tail(&vma->exec_list, &ordered_vmas);
 760
 761                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 762                obj->base.pending_write_domain = 0;
 763        }
 764        list_splice(&ordered_vmas, vmas);
 765        list_splice(&pinned_vmas, vmas);
 766
 767        /* Attempt to pin all of the buffers into the GTT.
 768         * This is done in 3 phases:
 769         *
 770         * 1a. Unbind all objects that do not match the GTT constraints for
 771         *     the execbuffer (fenceable, mappable, alignment etc).
 772         * 1b. Increment pin count for already bound objects.
 773         * 2.  Bind new objects.
 774         * 3.  Decrement pin count.
 775         *
 776         * This avoid unnecessary unbinding of later objects in order to make
 777         * room for the earlier objects *unless* we need to defragment.
 778         */
 779        retry = 0;
 780        do {
 781                int ret = 0;
 782
 783                /* Unbind any ill-fitting objects or pin. */
 784                list_for_each_entry(vma, vmas, exec_list) {
 785                        if (!drm_mm_node_allocated(&vma->node))
 786                                continue;
 787
 788                        if (eb_vma_misplaced(vma))
 789                                ret = i915_vma_unbind(vma);
 790                        else
 791                                ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 792                        if (ret)
 793                                goto err;
 794                }
 795
 796                /* Bind fresh objects */
 797                list_for_each_entry(vma, vmas, exec_list) {
 798                        if (drm_mm_node_allocated(&vma->node))
 799                                continue;
 800
 801                        ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 802                        if (ret)
 803                                goto err;
 804                }
 805
 806err:
 807                if (ret != -ENOSPC || retry++)
 808                        return ret;
 809
 810                /* Decrement pin count for bound objects */
 811                list_for_each_entry(vma, vmas, exec_list)
 812                        i915_gem_execbuffer_unreserve_vma(vma);
 813
 814                ret = i915_gem_evict_vm(vm, true);
 815                if (ret)
 816                        return ret;
 817        } while (1);
 818}
 819
 820static int
 821i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 822                                  struct drm_i915_gem_execbuffer2 *args,
 823                                  struct drm_file *file,
 824                                  struct intel_engine_cs *ring,
 825                                  struct eb_vmas *eb,
 826                                  struct drm_i915_gem_exec_object2 *exec,
 827                                  struct intel_context *ctx)
 828{
 829        struct drm_i915_gem_relocation_entry *reloc;
 830        struct i915_address_space *vm;
 831        struct i915_vma *vma;
 832        bool need_relocs;
 833        int *reloc_offset;
 834        int i, total, ret;
 835        unsigned count = args->buffer_count;
 836
 837        vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 838
 839        /* We may process another execbuffer during the unlock... */
 840        while (!list_empty(&eb->vmas)) {
 841                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
 842                list_del_init(&vma->exec_list);
 843                i915_gem_execbuffer_unreserve_vma(vma);
 844                drm_gem_object_unreference(&vma->obj->base);
 845        }
 846
 847        mutex_unlock(&dev->struct_mutex);
 848
 849        total = 0;
 850        for (i = 0; i < count; i++)
 851                total += exec[i].relocation_count;
 852
 853        reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
 854        reloc = drm_malloc_ab(total, sizeof(*reloc));
 855        if (reloc == NULL || reloc_offset == NULL) {
 856                drm_free_large(reloc);
 857                drm_free_large(reloc_offset);
 858                mutex_lock(&dev->struct_mutex);
 859                return -ENOMEM;
 860        }
 861
 862        total = 0;
 863        for (i = 0; i < count; i++) {
 864                struct drm_i915_gem_relocation_entry __user *user_relocs;
 865                u64 invalid_offset = (u64)-1;
 866                int j;
 867
 868                user_relocs = to_user_ptr(exec[i].relocs_ptr);
 869
 870                if (copy_from_user(reloc+total, user_relocs,
 871                                   exec[i].relocation_count * sizeof(*reloc))) {
 872                        ret = -EFAULT;
 873                        mutex_lock(&dev->struct_mutex);
 874                        goto err;
 875                }
 876
 877                /* As we do not update the known relocation offsets after
 878                 * relocating (due to the complexities in lock handling),
 879                 * we need to mark them as invalid now so that we force the
 880                 * relocation processing next time. Just in case the target
 881                 * object is evicted and then rebound into its old
 882                 * presumed_offset before the next execbuffer - if that
 883                 * happened we would make the mistake of assuming that the
 884                 * relocations were valid.
 885                 */
 886                for (j = 0; j < exec[i].relocation_count; j++) {
 887                        if (__copy_to_user(&user_relocs[j].presumed_offset,
 888                                           &invalid_offset,
 889                                           sizeof(invalid_offset))) {
 890                                ret = -EFAULT;
 891                                mutex_lock(&dev->struct_mutex);
 892                                goto err;
 893                        }
 894                }
 895
 896                reloc_offset[i] = total;
 897                total += exec[i].relocation_count;
 898        }
 899
 900        ret = i915_mutex_lock_interruptible(dev);
 901        if (ret) {
 902                mutex_lock(&dev->struct_mutex);
 903                goto err;
 904        }
 905
 906        /* reacquire the objects */
 907        eb_reset(eb);
 908        ret = eb_lookup_vmas(eb, exec, args, vm, file);
 909        if (ret)
 910                goto err;
 911
 912        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
 913        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 914        if (ret)
 915                goto err;
 916
 917        list_for_each_entry(vma, &eb->vmas, exec_list) {
 918                int offset = vma->exec_entry - exec;
 919                ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
 920                                                            reloc + reloc_offset[offset]);
 921                if (ret)
 922                        goto err;
 923        }
 924
 925        /* Leave the user relocations as are, this is the painfully slow path,
 926         * and we want to avoid the complication of dropping the lock whilst
 927         * having buffers reserved in the aperture and so causing spurious
 928         * ENOSPC for random operations.
 929         */
 930
 931err:
 932        drm_free_large(reloc);
 933        drm_free_large(reloc_offset);
 934        return ret;
 935}
 936
 937static int
 938i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 939                                struct list_head *vmas)
 940{
 941        const unsigned other_rings = ~intel_ring_flag(req->ring);
 942        struct i915_vma *vma;
 943        uint32_t flush_domains = 0;
 944        bool flush_chipset = false;
 945        int ret;
 946
 947        list_for_each_entry(vma, vmas, exec_list) {
 948                struct drm_i915_gem_object *obj = vma->obj;
 949
 950                if (obj->active & other_rings) {
 951                        ret = i915_gem_object_sync(obj, req->ring, &req);
 952                        if (ret)
 953                                return ret;
 954                }
 955
 956                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 957                        flush_chipset |= i915_gem_clflush_object(obj, false);
 958
 959                flush_domains |= obj->base.write_domain;
 960        }
 961
 962        if (flush_chipset)
 963                i915_gem_chipset_flush(req->ring->dev);
 964
 965        if (flush_domains & I915_GEM_DOMAIN_GTT)
 966                wmb();
 967
 968        /* Unconditionally invalidate gpu caches and ensure that we do flush
 969         * any residual writes from the previous batch.
 970         */
 971        return intel_ring_invalidate_all_caches(req);
 972}
 973
 974static bool
 975i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 976{
 977        if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
 978                return false;
 979
 980        /* Kernel clipping was a DRI1 misfeature */
 981        if (exec->num_cliprects || exec->cliprects_ptr)
 982                return false;
 983
 984        if (exec->DR4 == 0xffffffff) {
 985                DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
 986                exec->DR4 = 0;
 987        }
 988        if (exec->DR1 || exec->DR4)
 989                return false;
 990
 991        if ((exec->batch_start_offset | exec->batch_len) & 0x7)
 992                return false;
 993
 994        return true;
 995}
 996
 997static int
 998validate_exec_list(struct drm_device *dev,
 999                   struct drm_i915_gem_exec_object2 *exec,
1000                   int count)
1001{
1002        unsigned relocs_total = 0;
1003        unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1004        unsigned invalid_flags;
1005        int i;
1006
1007        invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1008        if (USES_FULL_PPGTT(dev))
1009                invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1010
1011        for (i = 0; i < count; i++) {
1012                char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
1013                int length; /* limited by fault_in_pages_readable() */
1014
1015                if (exec[i].flags & invalid_flags)
1016                        return -EINVAL;
1017
1018                /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1019                 * any non-page-aligned or non-canonical addresses.
1020                 */
1021                if (exec[i].flags & EXEC_OBJECT_PINNED) {
1022                        if (exec[i].offset !=
1023                            gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1024                                return -EINVAL;
1025
1026                        /* From drm_mm perspective address space is continuous,
1027                         * so from this point we're always using non-canonical
1028                         * form internally.
1029                         */
1030                        exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1031                }
1032
1033                if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1034                        return -EINVAL;
1035
1036                /* First check for malicious input causing overflow in
1037                 * the worst case where we need to allocate the entire
1038                 * relocation tree as a single array.
1039                 */
1040                if (exec[i].relocation_count > relocs_max - relocs_total)
1041                        return -EINVAL;
1042                relocs_total += exec[i].relocation_count;
1043
1044                length = exec[i].relocation_count *
1045                        sizeof(struct drm_i915_gem_relocation_entry);
1046                /*
1047                 * We must check that the entire relocation array is safe
1048                 * to read, but since we may need to update the presumed
1049                 * offsets during execution, check for full write access.
1050                 */
1051                if (!access_ok(VERIFY_WRITE, ptr, length))
1052                        return -EFAULT;
1053
1054                if (likely(!i915.prefault_disable)) {
1055                        if (fault_in_multipages_readable(ptr, length))
1056                                return -EFAULT;
1057                }
1058        }
1059
1060        return 0;
1061}
1062
1063static struct intel_context *
1064i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1065                          struct intel_engine_cs *ring, const u32 ctx_id)
1066{
1067        struct intel_context *ctx = NULL;
1068        struct i915_ctx_hang_stats *hs;
1069
1070        if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1071                return ERR_PTR(-EINVAL);
1072
1073        ctx = i915_gem_context_get(file->driver_priv, ctx_id);
1074        if (IS_ERR(ctx))
1075                return ctx;
1076
1077        hs = &ctx->hang_stats;
1078        if (hs->banned) {
1079                DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1080                return ERR_PTR(-EIO);
1081        }
1082
1083        if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1084                int ret = intel_lr_context_deferred_alloc(ctx, ring);
1085                if (ret) {
1086                        DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1087                        return ERR_PTR(ret);
1088                }
1089        }
1090
1091        return ctx;
1092}
1093
1094void
1095i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1096                                   struct drm_i915_gem_request *req)
1097{
1098        struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1099        struct i915_vma *vma;
1100
1101        list_for_each_entry(vma, vmas, exec_list) {
1102                struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1103                struct drm_i915_gem_object *obj = vma->obj;
1104                u32 old_read = obj->base.read_domains;
1105                u32 old_write = obj->base.write_domain;
1106
1107                obj->dirty = 1; /* be paranoid  */
1108                obj->base.write_domain = obj->base.pending_write_domain;
1109                if (obj->base.write_domain == 0)
1110                        obj->base.pending_read_domains |= obj->base.read_domains;
1111                obj->base.read_domains = obj->base.pending_read_domains;
1112
1113                i915_vma_move_to_active(vma, req);
1114                if (obj->base.write_domain) {
1115                        i915_gem_request_assign(&obj->last_write_req, req);
1116
1117                        intel_fb_obj_invalidate(obj, ORIGIN_CS);
1118
1119                        /* update for the implicit flush after a batch */
1120                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1121                }
1122                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1123                        i915_gem_request_assign(&obj->last_fenced_req, req);
1124                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1125                                struct drm_i915_private *dev_priv = to_i915(ring->dev);
1126                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1127                                               &dev_priv->mm.fence_list);
1128                        }
1129                }
1130
1131                trace_i915_gem_object_change_domain(obj, old_read, old_write);
1132        }
1133}
1134
1135void
1136i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1137{
1138        /* Unconditionally force add_request to emit a full flush. */
1139        params->ring->gpu_caches_dirty = true;
1140
1141        /* Add a breadcrumb for the completion of the batch buffer */
1142        __i915_add_request(params->request, params->batch_obj, true);
1143}
1144
1145static int
1146i915_reset_gen7_sol_offsets(struct drm_device *dev,
1147                            struct drm_i915_gem_request *req)
1148{
1149        struct intel_engine_cs *ring = req->ring;
1150        struct drm_i915_private *dev_priv = dev->dev_private;
1151        int ret, i;
1152
1153        if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1154                DRM_DEBUG("sol reset is gen7/rcs only\n");
1155                return -EINVAL;
1156        }
1157
1158        ret = intel_ring_begin(req, 4 * 3);
1159        if (ret)
1160                return ret;
1161
1162        for (i = 0; i < 4; i++) {
1163                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1164                intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1165                intel_ring_emit(ring, 0);
1166        }
1167
1168        intel_ring_advance(ring);
1169
1170        return 0;
1171}
1172
1173static struct drm_i915_gem_object*
1174i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
1175                          struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1176                          struct eb_vmas *eb,
1177                          struct drm_i915_gem_object *batch_obj,
1178                          u32 batch_start_offset,
1179                          u32 batch_len,
1180                          bool is_master)
1181{
1182        struct drm_i915_gem_object *shadow_batch_obj;
1183        struct i915_vma *vma;
1184        int ret;
1185
1186        shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
1187                                                   PAGE_ALIGN(batch_len));
1188        if (IS_ERR(shadow_batch_obj))
1189                return shadow_batch_obj;
1190
1191        ret = i915_parse_cmds(ring,
1192                              batch_obj,
1193                              shadow_batch_obj,
1194                              batch_start_offset,
1195                              batch_len,
1196                              is_master);
1197        if (ret)
1198                goto err;
1199
1200        ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1201        if (ret)
1202                goto err;
1203
1204        i915_gem_object_unpin_pages(shadow_batch_obj);
1205
1206        memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1207
1208        vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1209        vma->exec_entry = shadow_exec_entry;
1210        vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1211        drm_gem_object_reference(&shadow_batch_obj->base);
1212        list_add_tail(&vma->exec_list, &eb->vmas);
1213
1214        shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1215
1216        return shadow_batch_obj;
1217
1218err:
1219        i915_gem_object_unpin_pages(shadow_batch_obj);
1220        if (ret == -EACCES) /* unhandled chained batch */
1221                return batch_obj;
1222        else
1223                return ERR_PTR(ret);
1224}
1225
1226int
1227i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1228                               struct drm_i915_gem_execbuffer2 *args,
1229                               struct list_head *vmas)
1230{
1231        struct drm_device *dev = params->dev;
1232        struct intel_engine_cs *ring = params->ring;
1233        struct drm_i915_private *dev_priv = dev->dev_private;
1234        u64 exec_start, exec_len;
1235        int instp_mode;
1236        u32 instp_mask;
1237        int ret;
1238
1239        ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1240        if (ret)
1241                return ret;
1242
1243        ret = i915_switch_context(params->request);
1244        if (ret)
1245                return ret;
1246
1247        WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1248             "%s didn't clear reload\n", ring->name);
1249
1250        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1251        instp_mask = I915_EXEC_CONSTANTS_MASK;
1252        switch (instp_mode) {
1253        case I915_EXEC_CONSTANTS_REL_GENERAL:
1254        case I915_EXEC_CONSTANTS_ABSOLUTE:
1255        case I915_EXEC_CONSTANTS_REL_SURFACE:
1256                if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1257                        DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1258                        return -EINVAL;
1259                }
1260
1261                if (instp_mode != dev_priv->relative_constants_mode) {
1262                        if (INTEL_INFO(dev)->gen < 4) {
1263                                DRM_DEBUG("no rel constants on pre-gen4\n");
1264                                return -EINVAL;
1265                        }
1266
1267                        if (INTEL_INFO(dev)->gen > 5 &&
1268                            instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1269                                DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1270                                return -EINVAL;
1271                        }
1272
1273                        /* The HW changed the meaning on this bit on gen6 */
1274                        if (INTEL_INFO(dev)->gen >= 6)
1275                                instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1276                }
1277                break;
1278        default:
1279                DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1280                return -EINVAL;
1281        }
1282
1283        if (ring == &dev_priv->ring[RCS] &&
1284            instp_mode != dev_priv->relative_constants_mode) {
1285                ret = intel_ring_begin(params->request, 4);
1286                if (ret)
1287                        return ret;
1288
1289                intel_ring_emit(ring, MI_NOOP);
1290                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1291                intel_ring_emit_reg(ring, INSTPM);
1292                intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1293                intel_ring_advance(ring);
1294
1295                dev_priv->relative_constants_mode = instp_mode;
1296        }
1297
1298        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1299                ret = i915_reset_gen7_sol_offsets(dev, params->request);
1300                if (ret)
1301                        return ret;
1302        }
1303
1304        exec_len   = args->batch_len;
1305        exec_start = params->batch_obj_vm_offset +
1306                     params->args_batch_start_offset;
1307
1308        if (exec_len == 0)
1309                exec_len = params->batch_obj->base.size;
1310
1311        ret = ring->dispatch_execbuffer(params->request,
1312                                        exec_start, exec_len,
1313                                        params->dispatch_flags);
1314        if (ret)
1315                return ret;
1316
1317        trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1318
1319        i915_gem_execbuffer_move_to_active(vmas, params->request);
1320        i915_gem_execbuffer_retire_commands(params);
1321
1322        return 0;
1323}
1324
1325/**
1326 * Find one BSD ring to dispatch the corresponding BSD command.
1327 * The ring index is returned.
1328 */
1329static unsigned int
1330gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1331{
1332        struct drm_i915_file_private *file_priv = file->driver_priv;
1333
1334        /* Check whether the file_priv has already selected one ring. */
1335        if ((int)file_priv->bsd_ring < 0) {
1336                /* If not, use the ping-pong mechanism to select one. */
1337                mutex_lock(&dev_priv->dev->struct_mutex);
1338                file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1339                dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1340                mutex_unlock(&dev_priv->dev->struct_mutex);
1341        }
1342
1343        return file_priv->bsd_ring;
1344}
1345
1346static struct drm_i915_gem_object *
1347eb_get_batch(struct eb_vmas *eb)
1348{
1349        struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1350
1351        /*
1352         * SNA is doing fancy tricks with compressing batch buffers, which leads
1353         * to negative relocation deltas. Usually that works out ok since the
1354         * relocate address is still positive, except when the batch is placed
1355         * very low in the GTT. Ensure this doesn't happen.
1356         *
1357         * Note that actual hangs have only been observed on gen7, but for
1358         * paranoia do it everywhere.
1359         */
1360        if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
1361                vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1362
1363        return vma->obj;
1364}
1365
1366#define I915_USER_RINGS (4)
1367
1368static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
1369        [I915_EXEC_DEFAULT]     = RCS,
1370        [I915_EXEC_RENDER]      = RCS,
1371        [I915_EXEC_BLT]         = BCS,
1372        [I915_EXEC_BSD]         = VCS,
1373        [I915_EXEC_VEBOX]       = VECS
1374};
1375
1376static int
1377eb_select_ring(struct drm_i915_private *dev_priv,
1378               struct drm_file *file,
1379               struct drm_i915_gem_execbuffer2 *args,
1380               struct intel_engine_cs **ring)
1381{
1382        unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1383
1384        if (user_ring_id > I915_USER_RINGS) {
1385                DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1386                return -EINVAL;
1387        }
1388
1389        if ((user_ring_id != I915_EXEC_BSD) &&
1390            ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1391                DRM_DEBUG("execbuf with non bsd ring but with invalid "
1392                          "bsd dispatch flags: %d\n", (int)(args->flags));
1393                return -EINVAL;
1394        }
1395
1396        if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1397                unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1398
1399                if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1400                        bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
1401                } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1402                           bsd_idx <= I915_EXEC_BSD_RING2) {
1403                        bsd_idx >>= I915_EXEC_BSD_SHIFT;
1404                        bsd_idx--;
1405                } else {
1406                        DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1407                                  bsd_idx);
1408                        return -EINVAL;
1409                }
1410
1411                *ring = &dev_priv->ring[_VCS(bsd_idx)];
1412        } else {
1413                *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
1414        }
1415
1416        if (!intel_ring_initialized(*ring)) {
1417                DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1418                return -EINVAL;
1419        }
1420
1421        return 0;
1422}
1423
1424static int
1425i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1426                       struct drm_file *file,
1427                       struct drm_i915_gem_execbuffer2 *args,
1428                       struct drm_i915_gem_exec_object2 *exec)
1429{
1430        struct drm_i915_private *dev_priv = dev->dev_private;
1431        struct drm_i915_gem_request *req = NULL;
1432        struct eb_vmas *eb;
1433        struct drm_i915_gem_object *batch_obj;
1434        struct drm_i915_gem_exec_object2 shadow_exec_entry;
1435        struct intel_engine_cs *ring;
1436        struct intel_context *ctx;
1437        struct i915_address_space *vm;
1438        struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1439        struct i915_execbuffer_params *params = &params_master;
1440        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1441        u32 dispatch_flags;
1442        int ret;
1443        bool need_relocs;
1444
1445        if (!i915_gem_check_execbuffer(args))
1446                return -EINVAL;
1447
1448        ret = validate_exec_list(dev, exec, args->buffer_count);
1449        if (ret)
1450                return ret;
1451
1452        dispatch_flags = 0;
1453        if (args->flags & I915_EXEC_SECURE) {
1454                if (!file->is_master || !capable(CAP_SYS_ADMIN))
1455                    return -EPERM;
1456
1457                dispatch_flags |= I915_DISPATCH_SECURE;
1458        }
1459        if (args->flags & I915_EXEC_IS_PINNED)
1460                dispatch_flags |= I915_DISPATCH_PINNED;
1461
1462        ret = eb_select_ring(dev_priv, file, args, &ring);
1463        if (ret)
1464                return ret;
1465
1466        if (args->buffer_count < 1) {
1467                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1468                return -EINVAL;
1469        }
1470
1471        if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1472                if (!HAS_RESOURCE_STREAMER(dev)) {
1473                        DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1474                        return -EINVAL;
1475                }
1476                if (ring->id != RCS) {
1477                        DRM_DEBUG("RS is not available on %s\n",
1478                                 ring->name);
1479                        return -EINVAL;
1480                }
1481
1482                dispatch_flags |= I915_DISPATCH_RS;
1483        }
1484
1485        intel_runtime_pm_get(dev_priv);
1486
1487        ret = i915_mutex_lock_interruptible(dev);
1488        if (ret)
1489                goto pre_mutex_err;
1490
1491        ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1492        if (IS_ERR(ctx)) {
1493                mutex_unlock(&dev->struct_mutex);
1494                ret = PTR_ERR(ctx);
1495                goto pre_mutex_err;
1496        }
1497
1498        i915_gem_context_reference(ctx);
1499
1500        if (ctx->ppgtt)
1501                vm = &ctx->ppgtt->base;
1502        else
1503                vm = &dev_priv->gtt.base;
1504
1505        memset(&params_master, 0x00, sizeof(params_master));
1506
1507        eb = eb_create(args);
1508        if (eb == NULL) {
1509                i915_gem_context_unreference(ctx);
1510                mutex_unlock(&dev->struct_mutex);
1511                ret = -ENOMEM;
1512                goto pre_mutex_err;
1513        }
1514
1515        /* Look up object handles */
1516        ret = eb_lookup_vmas(eb, exec, args, vm, file);
1517        if (ret)
1518                goto err;
1519
1520        /* take note of the batch buffer before we might reorder the lists */
1521        batch_obj = eb_get_batch(eb);
1522
1523        /* Move the objects en-masse into the GTT, evicting if necessary. */
1524        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1525        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
1526        if (ret)
1527                goto err;
1528
1529        /* The objects are in their final locations, apply the relocations. */
1530        if (need_relocs)
1531                ret = i915_gem_execbuffer_relocate(eb);
1532        if (ret) {
1533                if (ret == -EFAULT) {
1534                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1535                                                                eb, exec, ctx);
1536                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1537                }
1538                if (ret)
1539                        goto err;
1540        }
1541
1542        /* Set the pending read domains for the batch buffer to COMMAND */
1543        if (batch_obj->base.pending_write_domain) {
1544                DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1545                ret = -EINVAL;
1546                goto err;
1547        }
1548
1549        params->args_batch_start_offset = args->batch_start_offset;
1550        if (i915_needs_cmd_parser(ring) && args->batch_len) {
1551                struct drm_i915_gem_object *parsed_batch_obj;
1552
1553                parsed_batch_obj = i915_gem_execbuffer_parse(ring,
1554                                                      &shadow_exec_entry,
1555                                                      eb,
1556                                                      batch_obj,
1557                                                      args->batch_start_offset,
1558                                                      args->batch_len,
1559                                                      file->is_master);
1560                if (IS_ERR(parsed_batch_obj)) {
1561                        ret = PTR_ERR(parsed_batch_obj);
1562                        goto err;
1563                }
1564
1565                /*
1566                 * parsed_batch_obj == batch_obj means batch not fully parsed:
1567                 * Accept, but don't promote to secure.
1568                 */
1569
1570                if (parsed_batch_obj != batch_obj) {
1571                        /*
1572                         * Batch parsed and accepted:
1573                         *
1574                         * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1575                         * bit from MI_BATCH_BUFFER_START commands issued in
1576                         * the dispatch_execbuffer implementations. We
1577                         * specifically don't want that set on batches the
1578                         * command parser has accepted.
1579                         */
1580                        dispatch_flags |= I915_DISPATCH_SECURE;
1581                        params->args_batch_start_offset = 0;
1582                        batch_obj = parsed_batch_obj;
1583                }
1584        }
1585
1586        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1587
1588        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1589         * batch" bit. Hence we need to pin secure batches into the global gtt.
1590         * hsw should have this fixed, but bdw mucks it up again. */
1591        if (dispatch_flags & I915_DISPATCH_SECURE) {
1592                /*
1593                 * So on first glance it looks freaky that we pin the batch here
1594                 * outside of the reservation loop. But:
1595                 * - The batch is already pinned into the relevant ppgtt, so we
1596                 *   already have the backing storage fully allocated.
1597                 * - No other BO uses the global gtt (well contexts, but meh),
1598                 *   so we don't really have issues with multiple objects not
1599                 *   fitting due to fragmentation.
1600                 * So this is actually safe.
1601                 */
1602                ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1603                if (ret)
1604                        goto err;
1605
1606                params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1607        } else
1608                params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1609
1610        /* Allocate a request for this batch buffer nice and early. */
1611        req = i915_gem_request_alloc(ring, ctx);
1612        if (IS_ERR(req)) {
1613                ret = PTR_ERR(req);
1614                goto err_batch_unpin;
1615        }
1616
1617        ret = i915_gem_request_add_to_client(req, file);
1618        if (ret)
1619                goto err_batch_unpin;
1620
1621        /*
1622         * Save assorted stuff away to pass through to *_submission().
1623         * NB: This data should be 'persistent' and not local as it will
1624         * kept around beyond the duration of the IOCTL once the GPU
1625         * scheduler arrives.
1626         */
1627        params->dev                     = dev;
1628        params->file                    = file;
1629        params->ring                    = ring;
1630        params->dispatch_flags          = dispatch_flags;
1631        params->batch_obj               = batch_obj;
1632        params->ctx                     = ctx;
1633        params->request                 = req;
1634
1635        ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1636
1637err_batch_unpin:
1638        /*
1639         * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1640         * batch vma for correctness. For less ugly and less fragility this
1641         * needs to be adjusted to also track the ggtt batch vma properly as
1642         * active.
1643         */
1644        if (dispatch_flags & I915_DISPATCH_SECURE)
1645                i915_gem_object_ggtt_unpin(batch_obj);
1646
1647err:
1648        /* the request owns the ref now */
1649        i915_gem_context_unreference(ctx);
1650        eb_destroy(eb);
1651
1652        /*
1653         * If the request was created but not successfully submitted then it
1654         * must be freed again. If it was submitted then it is being tracked
1655         * on the active request list and no clean up is required here.
1656         */
1657        if (ret && !IS_ERR_OR_NULL(req))
1658                i915_gem_request_cancel(req);
1659
1660        mutex_unlock(&dev->struct_mutex);
1661
1662pre_mutex_err:
1663        /* intel_gpu_busy should also get a ref, so it will free when the device
1664         * is really idle. */
1665        intel_runtime_pm_put(dev_priv);
1666        return ret;
1667}
1668
1669/*
1670 * Legacy execbuffer just creates an exec2 list from the original exec object
1671 * list array and passes it to the real function.
1672 */
1673int
1674i915_gem_execbuffer(struct drm_device *dev, void *data,
1675                    struct drm_file *file)
1676{
1677        struct drm_i915_gem_execbuffer *args = data;
1678        struct drm_i915_gem_execbuffer2 exec2;
1679        struct drm_i915_gem_exec_object *exec_list = NULL;
1680        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1681        int ret, i;
1682
1683        if (args->buffer_count < 1) {
1684                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1685                return -EINVAL;
1686        }
1687
1688        /* Copy in the exec list from userland */
1689        exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1690        exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1691        if (exec_list == NULL || exec2_list == NULL) {
1692                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1693                          args->buffer_count);
1694                drm_free_large(exec_list);
1695                drm_free_large(exec2_list);
1696                return -ENOMEM;
1697        }
1698        ret = copy_from_user(exec_list,
1699                             to_user_ptr(args->buffers_ptr),
1700                             sizeof(*exec_list) * args->buffer_count);
1701        if (ret != 0) {
1702                DRM_DEBUG("copy %d exec entries failed %d\n",
1703                          args->buffer_count, ret);
1704                drm_free_large(exec_list);
1705                drm_free_large(exec2_list);
1706                return -EFAULT;
1707        }
1708
1709        for (i = 0; i < args->buffer_count; i++) {
1710                exec2_list[i].handle = exec_list[i].handle;
1711                exec2_list[i].relocation_count = exec_list[i].relocation_count;
1712                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1713                exec2_list[i].alignment = exec_list[i].alignment;
1714                exec2_list[i].offset = exec_list[i].offset;
1715                if (INTEL_INFO(dev)->gen < 4)
1716                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1717                else
1718                        exec2_list[i].flags = 0;
1719        }
1720
1721        exec2.buffers_ptr = args->buffers_ptr;
1722        exec2.buffer_count = args->buffer_count;
1723        exec2.batch_start_offset = args->batch_start_offset;
1724        exec2.batch_len = args->batch_len;
1725        exec2.DR1 = args->DR1;
1726        exec2.DR4 = args->DR4;
1727        exec2.num_cliprects = args->num_cliprects;
1728        exec2.cliprects_ptr = args->cliprects_ptr;
1729        exec2.flags = I915_EXEC_RENDER;
1730        i915_execbuffer2_set_context_id(exec2, 0);
1731
1732        ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1733        if (!ret) {
1734                struct drm_i915_gem_exec_object __user *user_exec_list =
1735                        to_user_ptr(args->buffers_ptr);
1736
1737                /* Copy the new buffer offsets back to the user's exec list. */
1738                for (i = 0; i < args->buffer_count; i++) {
1739                        exec2_list[i].offset =
1740                                gen8_canonical_addr(exec2_list[i].offset);
1741                        ret = __copy_to_user(&user_exec_list[i].offset,
1742                                             &exec2_list[i].offset,
1743                                             sizeof(user_exec_list[i].offset));
1744                        if (ret) {
1745                                ret = -EFAULT;
1746                                DRM_DEBUG("failed to copy %d exec entries "
1747                                          "back to user (%d)\n",
1748                                          args->buffer_count, ret);
1749                                break;
1750                        }
1751                }
1752        }
1753
1754        drm_free_large(exec_list);
1755        drm_free_large(exec2_list);
1756        return ret;
1757}
1758
1759int
1760i915_gem_execbuffer2(struct drm_device *dev, void *data,
1761                     struct drm_file *file)
1762{
1763        struct drm_i915_gem_execbuffer2 *args = data;
1764        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1765        int ret;
1766
1767        if (args->buffer_count < 1 ||
1768            args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1769                DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1770                return -EINVAL;
1771        }
1772
1773        if (args->rsvd2 != 0) {
1774                DRM_DEBUG("dirty rvsd2 field\n");
1775                return -EINVAL;
1776        }
1777
1778        exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1779                             GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1780        if (exec2_list == NULL)
1781                exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1782                                           args->buffer_count);
1783        if (exec2_list == NULL) {
1784                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1785                          args->buffer_count);
1786                return -ENOMEM;
1787        }
1788        ret = copy_from_user(exec2_list,
1789                             to_user_ptr(args->buffers_ptr),
1790                             sizeof(*exec2_list) * args->buffer_count);
1791        if (ret != 0) {
1792                DRM_DEBUG("copy %d exec entries failed %d\n",
1793                          args->buffer_count, ret);
1794                drm_free_large(exec2_list);
1795                return -EFAULT;
1796        }
1797
1798        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1799        if (!ret) {
1800                /* Copy the new buffer offsets back to the user's exec list. */
1801                struct drm_i915_gem_exec_object2 __user *user_exec_list =
1802                                   to_user_ptr(args->buffers_ptr);
1803                int i;
1804
1805                for (i = 0; i < args->buffer_count; i++) {
1806                        exec2_list[i].offset =
1807                                gen8_canonical_addr(exec2_list[i].offset);
1808                        ret = __copy_to_user(&user_exec_list[i].offset,
1809                                             &exec2_list[i].offset,
1810                                             sizeof(user_exec_list[i].offset));
1811                        if (ret) {
1812                                ret = -EFAULT;
1813                                DRM_DEBUG("failed to copy %d exec entries "
1814                                          "back to user\n",
1815                                          args->buffer_count);
1816                                break;
1817                        }
1818                }
1819        }
1820
1821        drm_free_large(exec2_list);
1822        return ret;
1823}
1824