linux/drivers/gpu/drm/i915/i915_gem_execbuffer.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008,2010 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Chris Wilson <chris@chris-wilson.co.uk>
  26 *
  27 */
  28
  29#include <drm/drmP.h>
  30#include <drm/i915_drm.h>
  31#include "i915_drv.h"
  32#include "i915_trace.h"
  33#include "intel_drv.h"
  34#include <linux/dma_remapping.h>
  35
  36#define  __EXEC_OBJECT_HAS_PIN (1<<31)
  37#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
  38#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
  39
  40#define BATCH_OFFSET_BIAS (256*1024)
  41
  42struct eb_vmas {
  43        struct list_head vmas;
  44        int and;
  45        union {
  46                struct i915_vma *lut[0];
  47                struct hlist_head buckets[0];
  48        };
  49};
  50
  51static struct eb_vmas *
  52eb_create(struct drm_i915_gem_execbuffer2 *args)
  53{
  54        struct eb_vmas *eb = NULL;
  55
  56        if (args->flags & I915_EXEC_HANDLE_LUT) {
  57                unsigned size = args->buffer_count;
  58                size *= sizeof(struct i915_vma *);
  59                size += sizeof(struct eb_vmas);
  60                eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  61        }
  62
  63        if (eb == NULL) {
  64                unsigned size = args->buffer_count;
  65                unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  66                BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  67                while (count > 2*size)
  68                        count >>= 1;
  69                eb = kzalloc(count*sizeof(struct hlist_head) +
  70                             sizeof(struct eb_vmas),
  71                             GFP_TEMPORARY);
  72                if (eb == NULL)
  73                        return eb;
  74
  75                eb->and = count - 1;
  76        } else
  77                eb->and = -args->buffer_count;
  78
  79        INIT_LIST_HEAD(&eb->vmas);
  80        return eb;
  81}
  82
  83static void
  84eb_reset(struct eb_vmas *eb)
  85{
  86        if (eb->and >= 0)
  87                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  88}
  89
  90static int
  91eb_lookup_vmas(struct eb_vmas *eb,
  92               struct drm_i915_gem_exec_object2 *exec,
  93               const struct drm_i915_gem_execbuffer2 *args,
  94               struct i915_address_space *vm,
  95               struct drm_file *file)
  96{
  97        struct drm_i915_private *dev_priv = vm->dev->dev_private;
  98        struct drm_i915_gem_object *obj;
  99        struct list_head objects;
 100        int i, ret;
 101
 102        INIT_LIST_HEAD(&objects);
 103        spin_lock(&file->table_lock);
 104        /* Grab a reference to the object and release the lock so we can lookup
 105         * or create the VMA without using GFP_ATOMIC */
 106        for (i = 0; i < args->buffer_count; i++) {
 107                obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 108                if (obj == NULL) {
 109                        spin_unlock(&file->table_lock);
 110                        DRM_DEBUG("Invalid object handle %d at index %d\n",
 111                                   exec[i].handle, i);
 112                        ret = -ENOENT;
 113                        goto err;
 114                }
 115
 116                if (!list_empty(&obj->obj_exec_link)) {
 117                        spin_unlock(&file->table_lock);
 118                        DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 119                                   obj, exec[i].handle, i);
 120                        ret = -EINVAL;
 121                        goto err;
 122                }
 123
 124                drm_gem_object_reference(&obj->base);
 125                list_add_tail(&obj->obj_exec_link, &objects);
 126        }
 127        spin_unlock(&file->table_lock);
 128
 129        i = 0;
 130        while (!list_empty(&objects)) {
 131                struct i915_vma *vma;
 132                struct i915_address_space *bind_vm = vm;
 133
 134                if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
 135                    USES_FULL_PPGTT(vm->dev)) {
 136                        ret = -EINVAL;
 137                        goto err;
 138                }
 139
 140                /* If we have secure dispatch, or the userspace assures us that
 141                 * they know what they're doing, use the GGTT VM.
 142                 */
 143                if (((args->flags & I915_EXEC_SECURE) &&
 144                    (i == (args->buffer_count - 1))))
 145                        bind_vm = &dev_priv->gtt.base;
 146
 147                obj = list_first_entry(&objects,
 148                                       struct drm_i915_gem_object,
 149                                       obj_exec_link);
 150
 151                /*
 152                 * NOTE: We can leak any vmas created here when something fails
 153                 * later on. But that's no issue since vma_unbind can deal with
 154                 * vmas which are not actually bound. And since only
 155                 * lookup_or_create exists as an interface to get at the vma
 156                 * from the (obj, vm) we don't run the risk of creating
 157                 * duplicated vmas for the same vm.
 158                 */
 159                vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
 160                if (IS_ERR(vma)) {
 161                        DRM_DEBUG("Failed to lookup VMA\n");
 162                        ret = PTR_ERR(vma);
 163                        goto err;
 164                }
 165
 166                /* Transfer ownership from the objects list to the vmas list. */
 167                list_add_tail(&vma->exec_list, &eb->vmas);
 168                list_del_init(&obj->obj_exec_link);
 169
 170                vma->exec_entry = &exec[i];
 171                if (eb->and < 0) {
 172                        eb->lut[i] = vma;
 173                } else {
 174                        uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
 175                        vma->exec_handle = handle;
 176                        hlist_add_head(&vma->exec_node,
 177                                       &eb->buckets[handle & eb->and]);
 178                }
 179                ++i;
 180        }
 181
 182        return 0;
 183
 184
 185err:
 186        while (!list_empty(&objects)) {
 187                obj = list_first_entry(&objects,
 188                                       struct drm_i915_gem_object,
 189                                       obj_exec_link);
 190                list_del_init(&obj->obj_exec_link);
 191                drm_gem_object_unreference(&obj->base);
 192        }
 193        /*
 194         * Objects already transfered to the vmas list will be unreferenced by
 195         * eb_destroy.
 196         */
 197
 198        return ret;
 199}
 200
 201static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 202{
 203        if (eb->and < 0) {
 204                if (handle >= -eb->and)
 205                        return NULL;
 206                return eb->lut[handle];
 207        } else {
 208                struct hlist_head *head;
 209                struct hlist_node *node;
 210
 211                head = &eb->buckets[handle & eb->and];
 212                hlist_for_each(node, head) {
 213                        struct i915_vma *vma;
 214
 215                        vma = hlist_entry(node, struct i915_vma, exec_node);
 216                        if (vma->exec_handle == handle)
 217                                return vma;
 218                }
 219                return NULL;
 220        }
 221}
 222
 223static void
 224i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 225{
 226        struct drm_i915_gem_exec_object2 *entry;
 227        struct drm_i915_gem_object *obj = vma->obj;
 228
 229        if (!drm_mm_node_allocated(&vma->node))
 230                return;
 231
 232        entry = vma->exec_entry;
 233
 234        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 235                i915_gem_object_unpin_fence(obj);
 236
 237        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
 238                vma->pin_count--;
 239
 240        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 241}
 242
 243static void eb_destroy(struct eb_vmas *eb)
 244{
 245        while (!list_empty(&eb->vmas)) {
 246                struct i915_vma *vma;
 247
 248                vma = list_first_entry(&eb->vmas,
 249                                       struct i915_vma,
 250                                       exec_list);
 251                list_del_init(&vma->exec_list);
 252                i915_gem_execbuffer_unreserve_vma(vma);
 253                drm_gem_object_unreference(&vma->obj->base);
 254        }
 255        kfree(eb);
 256}
 257
 258static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 259{
 260        return (HAS_LLC(obj->base.dev) ||
 261                obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 262                !obj->map_and_fenceable ||
 263                obj->cache_level != I915_CACHE_NONE);
 264}
 265
 266static int
 267relocate_entry_cpu(struct drm_i915_gem_object *obj,
 268                   struct drm_i915_gem_relocation_entry *reloc)
 269{
 270        struct drm_device *dev = obj->base.dev;
 271        uint32_t page_offset = offset_in_page(reloc->offset);
 272        char *vaddr;
 273        int ret;
 274
 275        ret = i915_gem_object_set_to_cpu_domain(obj, true);
 276        if (ret)
 277                return ret;
 278
 279        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 280                                reloc->offset >> PAGE_SHIFT));
 281        *(uint32_t *)(vaddr + page_offset) = reloc->delta;
 282
 283        if (INTEL_INFO(dev)->gen >= 8) {
 284                page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 285
 286                if (page_offset == 0) {
 287                        kunmap_atomic(vaddr);
 288                        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 289                            (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
 290                }
 291
 292                *(uint32_t *)(vaddr + page_offset) = 0;
 293        }
 294
 295        kunmap_atomic(vaddr);
 296
 297        return 0;
 298}
 299
 300static int
 301relocate_entry_gtt(struct drm_i915_gem_object *obj,
 302                   struct drm_i915_gem_relocation_entry *reloc)
 303{
 304        struct drm_device *dev = obj->base.dev;
 305        struct drm_i915_private *dev_priv = dev->dev_private;
 306        uint32_t __iomem *reloc_entry;
 307        void __iomem *reloc_page;
 308        int ret;
 309
 310        ret = i915_gem_object_set_to_gtt_domain(obj, true);
 311        if (ret)
 312                return ret;
 313
 314        ret = i915_gem_object_put_fence(obj);
 315        if (ret)
 316                return ret;
 317
 318        /* Map the page containing the relocation we're going to perform.  */
 319        reloc->offset += i915_gem_obj_ggtt_offset(obj);
 320        reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 321                        reloc->offset & PAGE_MASK);
 322        reloc_entry = (uint32_t __iomem *)
 323                (reloc_page + offset_in_page(reloc->offset));
 324        iowrite32(reloc->delta, reloc_entry);
 325
 326        if (INTEL_INFO(dev)->gen >= 8) {
 327                reloc_entry += 1;
 328
 329                if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
 330                        io_mapping_unmap_atomic(reloc_page);
 331                        reloc_page = io_mapping_map_atomic_wc(
 332                                        dev_priv->gtt.mappable,
 333                                        reloc->offset + sizeof(uint32_t));
 334                        reloc_entry = reloc_page;
 335                }
 336
 337                iowrite32(0, reloc_entry);
 338        }
 339
 340        io_mapping_unmap_atomic(reloc_page);
 341
 342        return 0;
 343}
 344
 345static int
 346i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 347                                   struct eb_vmas *eb,
 348                                   struct drm_i915_gem_relocation_entry *reloc)
 349{
 350        struct drm_device *dev = obj->base.dev;
 351        struct drm_gem_object *target_obj;
 352        struct drm_i915_gem_object *target_i915_obj;
 353        struct i915_vma *target_vma;
 354        uint32_t target_offset;
 355        int ret;
 356
 357        /* we've already hold a reference to all valid objects */
 358        target_vma = eb_get_vma(eb, reloc->target_handle);
 359        if (unlikely(target_vma == NULL))
 360                return -ENOENT;
 361        target_i915_obj = target_vma->obj;
 362        target_obj = &target_vma->obj->base;
 363
 364        target_offset = target_vma->node.start;
 365
 366        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 367         * pipe_control writes because the gpu doesn't properly redirect them
 368         * through the ppgtt for non_secure batchbuffers. */
 369        if (unlikely(IS_GEN6(dev) &&
 370            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
 371            !target_i915_obj->has_global_gtt_mapping)) {
 372                struct i915_vma *vma =
 373                        list_first_entry(&target_i915_obj->vma_list,
 374                                         typeof(*vma), vma_link);
 375                vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
 376        }
 377
 378        /* Validate that the target is in a valid r/w GPU domain */
 379        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 380                DRM_DEBUG("reloc with multiple write domains: "
 381                          "obj %p target %d offset %d "
 382                          "read %08x write %08x",
 383                          obj, reloc->target_handle,
 384                          (int) reloc->offset,
 385                          reloc->read_domains,
 386                          reloc->write_domain);
 387                return -EINVAL;
 388        }
 389        if (unlikely((reloc->write_domain | reloc->read_domains)
 390                     & ~I915_GEM_GPU_DOMAINS)) {
 391                DRM_DEBUG("reloc with read/write non-GPU domains: "
 392                          "obj %p target %d offset %d "
 393                          "read %08x write %08x",
 394                          obj, reloc->target_handle,
 395                          (int) reloc->offset,
 396                          reloc->read_domains,
 397                          reloc->write_domain);
 398                return -EINVAL;
 399        }
 400
 401        target_obj->pending_read_domains |= reloc->read_domains;
 402        target_obj->pending_write_domain |= reloc->write_domain;
 403
 404        /* If the relocation already has the right value in it, no
 405         * more work needs to be done.
 406         */
 407        if (target_offset == reloc->presumed_offset)
 408                return 0;
 409
 410        /* Check that the relocation address is valid... */
 411        if (unlikely(reloc->offset >
 412                obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
 413                DRM_DEBUG("Relocation beyond object bounds: "
 414                          "obj %p target %d offset %d size %d.\n",
 415                          obj, reloc->target_handle,
 416                          (int) reloc->offset,
 417                          (int) obj->base.size);
 418                return -EINVAL;
 419        }
 420        if (unlikely(reloc->offset & 3)) {
 421                DRM_DEBUG("Relocation not 4-byte aligned: "
 422                          "obj %p target %d offset %d.\n",
 423                          obj, reloc->target_handle,
 424                          (int) reloc->offset);
 425                return -EINVAL;
 426        }
 427
 428        /* We can't wait for rendering with pagefaults disabled */
 429        if (obj->active && in_atomic())
 430                return -EFAULT;
 431
 432        reloc->delta += target_offset;
 433        if (use_cpu_reloc(obj))
 434                ret = relocate_entry_cpu(obj, reloc);
 435        else
 436                ret = relocate_entry_gtt(obj, reloc);
 437
 438        if (ret)
 439                return ret;
 440
 441        /* and update the user's relocation entry */
 442        reloc->presumed_offset = target_offset;
 443
 444        return 0;
 445}
 446
 447static int
 448i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 449                                 struct eb_vmas *eb)
 450{
 451#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 452        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 453        struct drm_i915_gem_relocation_entry __user *user_relocs;
 454        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 455        int remain, ret;
 456
 457        user_relocs = to_user_ptr(entry->relocs_ptr);
 458
 459        remain = entry->relocation_count;
 460        while (remain) {
 461                struct drm_i915_gem_relocation_entry *r = stack_reloc;
 462                int count = remain;
 463                if (count > ARRAY_SIZE(stack_reloc))
 464                        count = ARRAY_SIZE(stack_reloc);
 465                remain -= count;
 466
 467                if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
 468                        return -EFAULT;
 469
 470                do {
 471                        u64 offset = r->presumed_offset;
 472
 473                        ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
 474                        if (ret)
 475                                return ret;
 476
 477                        if (r->presumed_offset != offset &&
 478                            __copy_to_user_inatomic(&user_relocs->presumed_offset,
 479                                                    &r->presumed_offset,
 480                                                    sizeof(r->presumed_offset))) {
 481                                return -EFAULT;
 482                        }
 483
 484                        user_relocs++;
 485                        r++;
 486                } while (--count);
 487        }
 488
 489        return 0;
 490#undef N_RELOC
 491}
 492
 493static int
 494i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 495                                      struct eb_vmas *eb,
 496                                      struct drm_i915_gem_relocation_entry *relocs)
 497{
 498        const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 499        int i, ret;
 500
 501        for (i = 0; i < entry->relocation_count; i++) {
 502                ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
 503                if (ret)
 504                        return ret;
 505        }
 506
 507        return 0;
 508}
 509
 510static int
 511i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 512{
 513        struct i915_vma *vma;
 514        int ret = 0;
 515
 516        /* This is the fast path and we cannot handle a pagefault whilst
 517         * holding the struct mutex lest the user pass in the relocations
 518         * contained within a mmaped bo. For in such a case we, the page
 519         * fault handler would call i915_gem_fault() and we would try to
 520         * acquire the struct mutex again. Obviously this is bad and so
 521         * lockdep complains vehemently.
 522         */
 523        pagefault_disable();
 524        list_for_each_entry(vma, &eb->vmas, exec_list) {
 525                ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 526                if (ret)
 527                        break;
 528        }
 529        pagefault_enable();
 530
 531        return ret;
 532}
 533
 534static int
 535need_reloc_mappable(struct i915_vma *vma)
 536{
 537        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 538        return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
 539                i915_is_ggtt(vma->vm);
 540}
 541
 542static int
 543i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 544                                struct intel_ring_buffer *ring,
 545                                bool *need_reloc)
 546{
 547        struct drm_i915_gem_object *obj = vma->obj;
 548        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 549        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 550        bool need_fence;
 551        uint64_t flags;
 552        int ret;
 553
 554        flags = 0;
 555
 556        need_fence =
 557                has_fenced_gpu_access &&
 558                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 559                obj->tiling_mode != I915_TILING_NONE;
 560        if (need_fence || need_reloc_mappable(vma))
 561                flags |= PIN_MAPPABLE;
 562
 563        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
 564                flags |= PIN_GLOBAL;
 565        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
 566                flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
 567
 568        ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
 569        if (ret)
 570                return ret;
 571
 572        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 573
 574        if (has_fenced_gpu_access) {
 575                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 576                        ret = i915_gem_object_get_fence(obj);
 577                        if (ret)
 578                                return ret;
 579
 580                        if (i915_gem_object_pin_fence(obj))
 581                                entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 582
 583                        obj->pending_fenced_gpu_access = true;
 584                }
 585        }
 586
 587        if (entry->offset != vma->node.start) {
 588                entry->offset = vma->node.start;
 589                *need_reloc = true;
 590        }
 591
 592        if (entry->flags & EXEC_OBJECT_WRITE) {
 593                obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
 594                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
 595        }
 596
 597        return 0;
 598}
 599
 600static bool
 601eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
 602{
 603        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 604        struct drm_i915_gem_object *obj = vma->obj;
 605        bool need_fence, need_mappable;
 606
 607        need_fence =
 608                has_fenced_gpu_access &&
 609                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 610                obj->tiling_mode != I915_TILING_NONE;
 611        need_mappable = need_fence || need_reloc_mappable(vma);
 612
 613        WARN_ON((need_mappable || need_fence) &&
 614               !i915_is_ggtt(vma->vm));
 615
 616        if (entry->alignment &&
 617            vma->node.start & (entry->alignment - 1))
 618                return true;
 619
 620        if (need_mappable && !obj->map_and_fenceable)
 621                return true;
 622
 623        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
 624            vma->node.start < BATCH_OFFSET_BIAS)
 625                return true;
 626
 627        return false;
 628}
 629
 630static int
 631i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 632                            struct list_head *vmas,
 633                            bool *need_relocs)
 634{
 635        struct drm_i915_gem_object *obj;
 636        struct i915_vma *vma;
 637        struct i915_address_space *vm;
 638        struct list_head ordered_vmas;
 639        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 640        int retry;
 641
 642        if (list_empty(vmas))
 643                return 0;
 644
 645        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 646
 647        INIT_LIST_HEAD(&ordered_vmas);
 648        while (!list_empty(vmas)) {
 649                struct drm_i915_gem_exec_object2 *entry;
 650                bool need_fence, need_mappable;
 651
 652                vma = list_first_entry(vmas, struct i915_vma, exec_list);
 653                obj = vma->obj;
 654                entry = vma->exec_entry;
 655
 656                need_fence =
 657                        has_fenced_gpu_access &&
 658                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 659                        obj->tiling_mode != I915_TILING_NONE;
 660                need_mappable = need_fence || need_reloc_mappable(vma);
 661
 662                if (need_mappable)
 663                        list_move(&vma->exec_list, &ordered_vmas);
 664                else
 665                        list_move_tail(&vma->exec_list, &ordered_vmas);
 666
 667                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 668                obj->base.pending_write_domain = 0;
 669                obj->pending_fenced_gpu_access = false;
 670        }
 671        list_splice(&ordered_vmas, vmas);
 672
 673        /* Attempt to pin all of the buffers into the GTT.
 674         * This is done in 3 phases:
 675         *
 676         * 1a. Unbind all objects that do not match the GTT constraints for
 677         *     the execbuffer (fenceable, mappable, alignment etc).
 678         * 1b. Increment pin count for already bound objects.
 679         * 2.  Bind new objects.
 680         * 3.  Decrement pin count.
 681         *
 682         * This avoid unnecessary unbinding of later objects in order to make
 683         * room for the earlier objects *unless* we need to defragment.
 684         */
 685        retry = 0;
 686        do {
 687                int ret = 0;
 688
 689                /* Unbind any ill-fitting objects or pin. */
 690                list_for_each_entry(vma, vmas, exec_list) {
 691                        if (!drm_mm_node_allocated(&vma->node))
 692                                continue;
 693
 694                        if (eb_vma_misplaced(vma, has_fenced_gpu_access))
 695                                ret = i915_vma_unbind(vma);
 696                        else
 697                                ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 698                        if (ret)
 699                                goto err;
 700                }
 701
 702                /* Bind fresh objects */
 703                list_for_each_entry(vma, vmas, exec_list) {
 704                        if (drm_mm_node_allocated(&vma->node))
 705                                continue;
 706
 707                        ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 708                        if (ret)
 709                                goto err;
 710                }
 711
 712err:
 713                if (ret != -ENOSPC || retry++)
 714                        return ret;
 715
 716                /* Decrement pin count for bound objects */
 717                list_for_each_entry(vma, vmas, exec_list)
 718                        i915_gem_execbuffer_unreserve_vma(vma);
 719
 720                ret = i915_gem_evict_vm(vm, true);
 721                if (ret)
 722                        return ret;
 723        } while (1);
 724}
 725
 726static int
 727i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 728                                  struct drm_i915_gem_execbuffer2 *args,
 729                                  struct drm_file *file,
 730                                  struct intel_ring_buffer *ring,
 731                                  struct eb_vmas *eb,
 732                                  struct drm_i915_gem_exec_object2 *exec)
 733{
 734        struct drm_i915_gem_relocation_entry *reloc;
 735        struct i915_address_space *vm;
 736        struct i915_vma *vma;
 737        bool need_relocs;
 738        int *reloc_offset;
 739        int i, total, ret;
 740        unsigned count = args->buffer_count;
 741
 742        if (WARN_ON(list_empty(&eb->vmas)))
 743                return 0;
 744
 745        vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 746
 747        /* We may process another execbuffer during the unlock... */
 748        while (!list_empty(&eb->vmas)) {
 749                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
 750                list_del_init(&vma->exec_list);
 751                i915_gem_execbuffer_unreserve_vma(vma);
 752                drm_gem_object_unreference(&vma->obj->base);
 753        }
 754
 755        mutex_unlock(&dev->struct_mutex);
 756
 757        total = 0;
 758        for (i = 0; i < count; i++)
 759                total += exec[i].relocation_count;
 760
 761        reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
 762        reloc = drm_malloc_ab(total, sizeof(*reloc));
 763        if (reloc == NULL || reloc_offset == NULL) {
 764                drm_free_large(reloc);
 765                drm_free_large(reloc_offset);
 766                mutex_lock(&dev->struct_mutex);
 767                return -ENOMEM;
 768        }
 769
 770        total = 0;
 771        for (i = 0; i < count; i++) {
 772                struct drm_i915_gem_relocation_entry __user *user_relocs;
 773                u64 invalid_offset = (u64)-1;
 774                int j;
 775
 776                user_relocs = to_user_ptr(exec[i].relocs_ptr);
 777
 778                if (copy_from_user(reloc+total, user_relocs,
 779                                   exec[i].relocation_count * sizeof(*reloc))) {
 780                        ret = -EFAULT;
 781                        mutex_lock(&dev->struct_mutex);
 782                        goto err;
 783                }
 784
 785                /* As we do not update the known relocation offsets after
 786                 * relocating (due to the complexities in lock handling),
 787                 * we need to mark them as invalid now so that we force the
 788                 * relocation processing next time. Just in case the target
 789                 * object is evicted and then rebound into its old
 790                 * presumed_offset before the next execbuffer - if that
 791                 * happened we would make the mistake of assuming that the
 792                 * relocations were valid.
 793                 */
 794                for (j = 0; j < exec[i].relocation_count; j++) {
 795                        if (__copy_to_user(&user_relocs[j].presumed_offset,
 796                                           &invalid_offset,
 797                                           sizeof(invalid_offset))) {
 798                                ret = -EFAULT;
 799                                mutex_lock(&dev->struct_mutex);
 800                                goto err;
 801                        }
 802                }
 803
 804                reloc_offset[i] = total;
 805                total += exec[i].relocation_count;
 806        }
 807
 808        ret = i915_mutex_lock_interruptible(dev);
 809        if (ret) {
 810                mutex_lock(&dev->struct_mutex);
 811                goto err;
 812        }
 813
 814        /* reacquire the objects */
 815        eb_reset(eb);
 816        ret = eb_lookup_vmas(eb, exec, args, vm, file);
 817        if (ret)
 818                goto err;
 819
 820        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
 821        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 822        if (ret)
 823                goto err;
 824
 825        list_for_each_entry(vma, &eb->vmas, exec_list) {
 826                int offset = vma->exec_entry - exec;
 827                ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
 828                                                            reloc + reloc_offset[offset]);
 829                if (ret)
 830                        goto err;
 831        }
 832
 833        /* Leave the user relocations as are, this is the painfully slow path,
 834         * and we want to avoid the complication of dropping the lock whilst
 835         * having buffers reserved in the aperture and so causing spurious
 836         * ENOSPC for random operations.
 837         */
 838
 839err:
 840        drm_free_large(reloc);
 841        drm_free_large(reloc_offset);
 842        return ret;
 843}
 844
 845static int
 846i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 847                                struct list_head *vmas)
 848{
 849        struct i915_vma *vma;
 850        uint32_t flush_domains = 0;
 851        bool flush_chipset = false;
 852        int ret;
 853
 854        list_for_each_entry(vma, vmas, exec_list) {
 855                struct drm_i915_gem_object *obj = vma->obj;
 856                ret = i915_gem_object_sync(obj, ring);
 857                if (ret)
 858                        return ret;
 859
 860                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 861                        flush_chipset |= i915_gem_clflush_object(obj, false);
 862
 863                flush_domains |= obj->base.write_domain;
 864        }
 865
 866        if (flush_chipset)
 867                i915_gem_chipset_flush(ring->dev);
 868
 869        if (flush_domains & I915_GEM_DOMAIN_GTT)
 870                wmb();
 871
 872        /* Unconditionally invalidate gpu caches and ensure that we do flush
 873         * any residual writes from the previous batch.
 874         */
 875        return intel_ring_invalidate_all_caches(ring);
 876}
 877
 878static bool
 879i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 880{
 881        if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
 882                return false;
 883
 884        return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
 885}
 886
 887static int
 888validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 889                   int count)
 890{
 891        int i;
 892        unsigned relocs_total = 0;
 893        unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 894
 895        for (i = 0; i < count; i++) {
 896                char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
 897                int length; /* limited by fault_in_pages_readable() */
 898
 899                if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
 900                        return -EINVAL;
 901
 902                /* First check for malicious input causing overflow in
 903                 * the worst case where we need to allocate the entire
 904                 * relocation tree as a single array.
 905                 */
 906                if (exec[i].relocation_count > relocs_max - relocs_total)
 907                        return -EINVAL;
 908                relocs_total += exec[i].relocation_count;
 909
 910                length = exec[i].relocation_count *
 911                        sizeof(struct drm_i915_gem_relocation_entry);
 912                /*
 913                 * We must check that the entire relocation array is safe
 914                 * to read, but since we may need to update the presumed
 915                 * offsets during execution, check for full write access.
 916                 */
 917                if (!access_ok(VERIFY_WRITE, ptr, length))
 918                        return -EFAULT;
 919
 920                if (likely(!i915.prefault_disable)) {
 921                        if (fault_in_multipages_readable(ptr, length))
 922                                return -EFAULT;
 923                }
 924        }
 925
 926        return 0;
 927}
 928
 929static struct i915_hw_context *
 930i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 931                          struct intel_ring_buffer *ring, const u32 ctx_id)
 932{
 933        struct i915_hw_context *ctx = NULL;
 934        struct i915_ctx_hang_stats *hs;
 935
 936        if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
 937                return ERR_PTR(-EINVAL);
 938
 939        ctx = i915_gem_context_get(file->driver_priv, ctx_id);
 940        if (IS_ERR(ctx))
 941                return ctx;
 942
 943        hs = &ctx->hang_stats;
 944        if (hs->banned) {
 945                DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
 946                return ERR_PTR(-EIO);
 947        }
 948
 949        return ctx;
 950}
 951
 952static void
 953i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 954                                   struct intel_ring_buffer *ring)
 955{
 956        struct i915_vma *vma;
 957
 958        list_for_each_entry(vma, vmas, exec_list) {
 959                struct drm_i915_gem_object *obj = vma->obj;
 960                u32 old_read = obj->base.read_domains;
 961                u32 old_write = obj->base.write_domain;
 962
 963                obj->base.write_domain = obj->base.pending_write_domain;
 964                if (obj->base.write_domain == 0)
 965                        obj->base.pending_read_domains |= obj->base.read_domains;
 966                obj->base.read_domains = obj->base.pending_read_domains;
 967                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 968
 969                i915_vma_move_to_active(vma, ring);
 970                if (obj->base.write_domain) {
 971                        obj->dirty = 1;
 972                        obj->last_write_seqno = intel_ring_get_seqno(ring);
 973                        /* check for potential scanout */
 974                        if (i915_gem_obj_ggtt_bound(obj) &&
 975                            i915_gem_obj_to_ggtt(obj)->pin_count)
 976                                intel_mark_fb_busy(obj, ring);
 977                }
 978
 979                trace_i915_gem_object_change_domain(obj, old_read, old_write);
 980        }
 981}
 982
 983static void
 984i915_gem_execbuffer_retire_commands(struct drm_device *dev,
 985                                    struct drm_file *file,
 986                                    struct intel_ring_buffer *ring,
 987                                    struct drm_i915_gem_object *obj)
 988{
 989        /* Unconditionally force add_request to emit a full flush. */
 990        ring->gpu_caches_dirty = true;
 991
 992        /* Add a breadcrumb for the completion of the batch buffer */
 993        (void)__i915_add_request(ring, file, obj, NULL);
 994}
 995
 996static int
 997i915_reset_gen7_sol_offsets(struct drm_device *dev,
 998                            struct intel_ring_buffer *ring)
 999{
1000        struct drm_i915_private *dev_priv = dev->dev_private;
1001        int ret, i;
1002
1003        if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
1004                return 0;
1005
1006        ret = intel_ring_begin(ring, 4 * 3);
1007        if (ret)
1008                return ret;
1009
1010        for (i = 0; i < 4; i++) {
1011                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1012                intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1013                intel_ring_emit(ring, 0);
1014        }
1015
1016        intel_ring_advance(ring);
1017
1018        return 0;
1019}
1020
1021static struct drm_i915_gem_object *
1022eb_get_batch(struct eb_vmas *eb)
1023{
1024        struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1025
1026        /*
1027         * SNA is doing fancy tricks with compressing batch buffers, which leads
1028         * to negative relocation deltas. Usually that works out ok since the
1029         * relocate address is still positive, except when the batch is placed
1030         * very low in the GTT. Ensure this doesn't happen.
1031         *
1032         * Note that actual hangs have only been observed on gen7, but for
1033         * paranoia do it everywhere.
1034         */
1035        vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1036
1037        return vma->obj;
1038}
1039
1040static int
1041i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1042                       struct drm_file *file,
1043                       struct drm_i915_gem_execbuffer2 *args,
1044                       struct drm_i915_gem_exec_object2 *exec)
1045{
1046        struct drm_i915_private *dev_priv = dev->dev_private;
1047        struct eb_vmas *eb;
1048        struct drm_i915_gem_object *batch_obj;
1049        struct drm_clip_rect *cliprects = NULL;
1050        struct intel_ring_buffer *ring;
1051        struct i915_hw_context *ctx;
1052        struct i915_address_space *vm;
1053        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1054        u32 exec_start = args->batch_start_offset, exec_len;
1055        u32 mask, flags;
1056        int ret, mode, i;
1057        bool need_relocs;
1058
1059        if (!i915_gem_check_execbuffer(args))
1060                return -EINVAL;
1061
1062        ret = validate_exec_list(exec, args->buffer_count);
1063        if (ret)
1064                return ret;
1065
1066        flags = 0;
1067        if (args->flags & I915_EXEC_SECURE) {
1068                if (!file->is_master || !capable(CAP_SYS_ADMIN))
1069                    return -EPERM;
1070
1071                flags |= I915_DISPATCH_SECURE;
1072        }
1073        if (args->flags & I915_EXEC_IS_PINNED)
1074                flags |= I915_DISPATCH_PINNED;
1075
1076        if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1077                DRM_DEBUG("execbuf with unknown ring: %d\n",
1078                          (int)(args->flags & I915_EXEC_RING_MASK));
1079                return -EINVAL;
1080        }
1081
1082        if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1083                ring = &dev_priv->ring[RCS];
1084        else
1085                ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1086
1087        if (!intel_ring_initialized(ring)) {
1088                DRM_DEBUG("execbuf with invalid ring: %d\n",
1089                          (int)(args->flags & I915_EXEC_RING_MASK));
1090                return -EINVAL;
1091        }
1092
1093        mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1094        mask = I915_EXEC_CONSTANTS_MASK;
1095        switch (mode) {
1096        case I915_EXEC_CONSTANTS_REL_GENERAL:
1097        case I915_EXEC_CONSTANTS_ABSOLUTE:
1098        case I915_EXEC_CONSTANTS_REL_SURFACE:
1099                if (ring == &dev_priv->ring[RCS] &&
1100                    mode != dev_priv->relative_constants_mode) {
1101                        if (INTEL_INFO(dev)->gen < 4)
1102                                return -EINVAL;
1103
1104                        if (INTEL_INFO(dev)->gen > 5 &&
1105                            mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1106                                return -EINVAL;
1107
1108                        /* The HW changed the meaning on this bit on gen6 */
1109                        if (INTEL_INFO(dev)->gen >= 6)
1110                                mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1111                }
1112                break;
1113        default:
1114                DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1115                return -EINVAL;
1116        }
1117
1118        if (args->buffer_count < 1) {
1119                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1120                return -EINVAL;
1121        }
1122
1123        if (args->num_cliprects != 0) {
1124                if (ring != &dev_priv->ring[RCS]) {
1125                        DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1126                        return -EINVAL;
1127                }
1128
1129                if (INTEL_INFO(dev)->gen >= 5) {
1130                        DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1131                        return -EINVAL;
1132                }
1133
1134                if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1135                        DRM_DEBUG("execbuf with %u cliprects\n",
1136                                  args->num_cliprects);
1137                        return -EINVAL;
1138                }
1139
1140                cliprects = kcalloc(args->num_cliprects,
1141                                    sizeof(*cliprects),
1142                                    GFP_KERNEL);
1143                if (cliprects == NULL) {
1144                        ret = -ENOMEM;
1145                        goto pre_mutex_err;
1146                }
1147
1148                if (copy_from_user(cliprects,
1149                                   to_user_ptr(args->cliprects_ptr),
1150                                   sizeof(*cliprects)*args->num_cliprects)) {
1151                        ret = -EFAULT;
1152                        goto pre_mutex_err;
1153                }
1154        }
1155
1156        intel_runtime_pm_get(dev_priv);
1157
1158        ret = i915_mutex_lock_interruptible(dev);
1159        if (ret)
1160                goto pre_mutex_err;
1161
1162        if (dev_priv->ums.mm_suspended) {
1163                mutex_unlock(&dev->struct_mutex);
1164                ret = -EBUSY;
1165                goto pre_mutex_err;
1166        }
1167
1168        ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1169        if (IS_ERR(ctx)) {
1170                mutex_unlock(&dev->struct_mutex);
1171                ret = PTR_ERR(ctx);
1172                goto pre_mutex_err;
1173        } 
1174
1175        i915_gem_context_reference(ctx);
1176
1177        vm = ctx->vm;
1178        if (!USES_FULL_PPGTT(dev))
1179                vm = &dev_priv->gtt.base;
1180
1181        eb = eb_create(args);
1182        if (eb == NULL) {
1183                mutex_unlock(&dev->struct_mutex);
1184                ret = -ENOMEM;
1185                goto pre_mutex_err;
1186        }
1187
1188        /* Look up object handles */
1189        ret = eb_lookup_vmas(eb, exec, args, vm, file);
1190        if (ret)
1191                goto err;
1192
1193        /* take note of the batch buffer before we might reorder the lists */
1194        batch_obj = eb_get_batch(eb);
1195
1196        /* Move the objects en-masse into the GTT, evicting if necessary. */
1197        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1198        ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1199        if (ret)
1200                goto err;
1201
1202        /* The objects are in their final locations, apply the relocations. */
1203        if (need_relocs)
1204                ret = i915_gem_execbuffer_relocate(eb);
1205        if (ret) {
1206                if (ret == -EFAULT) {
1207                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1208                                                                eb, exec);
1209                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1210                }
1211                if (ret)
1212                        goto err;
1213        }
1214
1215        /* Set the pending read domains for the batch buffer to COMMAND */
1216        if (batch_obj->base.pending_write_domain) {
1217                DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1218                ret = -EINVAL;
1219                goto err;
1220        }
1221        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1222
1223        if (i915_needs_cmd_parser(ring)) {
1224                ret = i915_parse_cmds(ring,
1225                                      batch_obj,
1226                                      args->batch_start_offset,
1227                                      file->is_master);
1228                if (ret)
1229                        goto err;
1230
1231                /*
1232                 * XXX: Actually do this when enabling batch copy...
1233                 *
1234                 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1235                 * from MI_BATCH_BUFFER_START commands issued in the
1236                 * dispatch_execbuffer implementations. We specifically don't
1237                 * want that set when the command parser is enabled.
1238                 */
1239        }
1240
1241        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1242         * batch" bit. Hence we need to pin secure batches into the global gtt.
1243         * hsw should have this fixed, but bdw mucks it up again. */
1244        if (flags & I915_DISPATCH_SECURE &&
1245            !batch_obj->has_global_gtt_mapping) {
1246                /* When we have multiple VMs, we'll need to make sure that we
1247                 * allocate space first */
1248                struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1249                BUG_ON(!vma);
1250                vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1251        }
1252
1253        if (flags & I915_DISPATCH_SECURE)
1254                exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1255        else
1256                exec_start += i915_gem_obj_offset(batch_obj, vm);
1257
1258        ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1259        if (ret)
1260                goto err;
1261
1262        ret = i915_switch_context(ring, ctx);
1263        if (ret)
1264                goto err;
1265
1266        if (ring == &dev_priv->ring[RCS] &&
1267            mode != dev_priv->relative_constants_mode) {
1268                ret = intel_ring_begin(ring, 4);
1269                if (ret)
1270                                goto err;
1271
1272                intel_ring_emit(ring, MI_NOOP);
1273                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1274                intel_ring_emit(ring, INSTPM);
1275                intel_ring_emit(ring, mask << 16 | mode);
1276                intel_ring_advance(ring);
1277
1278                dev_priv->relative_constants_mode = mode;
1279        }
1280
1281        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1282                ret = i915_reset_gen7_sol_offsets(dev, ring);
1283                if (ret)
1284                        goto err;
1285        }
1286
1287
1288        exec_len = args->batch_len;
1289        if (cliprects) {
1290                for (i = 0; i < args->num_cliprects; i++) {
1291                        ret = i915_emit_box(dev, &cliprects[i],
1292                                            args->DR1, args->DR4);
1293                        if (ret)
1294                                goto err;
1295
1296                        ret = ring->dispatch_execbuffer(ring,
1297                                                        exec_start, exec_len,
1298                                                        flags);
1299                        if (ret)
1300                                goto err;
1301                }
1302        } else {
1303                ret = ring->dispatch_execbuffer(ring,
1304                                                exec_start, exec_len,
1305                                                flags);
1306                if (ret)
1307                        goto err;
1308        }
1309
1310        trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1311
1312        i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1313        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1314
1315err:
1316        /* the request owns the ref now */
1317        i915_gem_context_unreference(ctx);
1318        eb_destroy(eb);
1319
1320        mutex_unlock(&dev->struct_mutex);
1321
1322pre_mutex_err:
1323        kfree(cliprects);
1324
1325        /* intel_gpu_busy should also get a ref, so it will free when the device
1326         * is really idle. */
1327        intel_runtime_pm_put(dev_priv);
1328        return ret;
1329}
1330
1331/*
1332 * Legacy execbuffer just creates an exec2 list from the original exec object
1333 * list array and passes it to the real function.
1334 */
1335int
1336i915_gem_execbuffer(struct drm_device *dev, void *data,
1337                    struct drm_file *file)
1338{
1339        struct drm_i915_gem_execbuffer *args = data;
1340        struct drm_i915_gem_execbuffer2 exec2;
1341        struct drm_i915_gem_exec_object *exec_list = NULL;
1342        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1343        int ret, i;
1344
1345        if (args->buffer_count < 1) {
1346                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1347                return -EINVAL;
1348        }
1349
1350        /* Copy in the exec list from userland */
1351        exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1352        exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1353        if (exec_list == NULL || exec2_list == NULL) {
1354                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1355                          args->buffer_count);
1356                drm_free_large(exec_list);
1357                drm_free_large(exec2_list);
1358                return -ENOMEM;
1359        }
1360        ret = copy_from_user(exec_list,
1361                             to_user_ptr(args->buffers_ptr),
1362                             sizeof(*exec_list) * args->buffer_count);
1363        if (ret != 0) {
1364                DRM_DEBUG("copy %d exec entries failed %d\n",
1365                          args->buffer_count, ret);
1366                drm_free_large(exec_list);
1367                drm_free_large(exec2_list);
1368                return -EFAULT;
1369        }
1370
1371        for (i = 0; i < args->buffer_count; i++) {
1372                exec2_list[i].handle = exec_list[i].handle;
1373                exec2_list[i].relocation_count = exec_list[i].relocation_count;
1374                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1375                exec2_list[i].alignment = exec_list[i].alignment;
1376                exec2_list[i].offset = exec_list[i].offset;
1377                if (INTEL_INFO(dev)->gen < 4)
1378                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1379                else
1380                        exec2_list[i].flags = 0;
1381        }
1382
1383        exec2.buffers_ptr = args->buffers_ptr;
1384        exec2.buffer_count = args->buffer_count;
1385        exec2.batch_start_offset = args->batch_start_offset;
1386        exec2.batch_len = args->batch_len;
1387        exec2.DR1 = args->DR1;
1388        exec2.DR4 = args->DR4;
1389        exec2.num_cliprects = args->num_cliprects;
1390        exec2.cliprects_ptr = args->cliprects_ptr;
1391        exec2.flags = I915_EXEC_RENDER;
1392        i915_execbuffer2_set_context_id(exec2, 0);
1393
1394        ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1395        if (!ret) {
1396                struct drm_i915_gem_exec_object __user *user_exec_list =
1397                        to_user_ptr(args->buffers_ptr);
1398
1399                /* Copy the new buffer offsets back to the user's exec list. */
1400                for (i = 0; i < args->buffer_count; i++) {
1401                        ret = __copy_to_user(&user_exec_list[i].offset,
1402                                             &exec2_list[i].offset,
1403                                             sizeof(user_exec_list[i].offset));
1404                        if (ret) {
1405                                ret = -EFAULT;
1406                                DRM_DEBUG("failed to copy %d exec entries "
1407                                          "back to user (%d)\n",
1408                                          args->buffer_count, ret);
1409                                break;
1410                        }
1411                }
1412        }
1413
1414        drm_free_large(exec_list);
1415        drm_free_large(exec2_list);
1416        return ret;
1417}
1418
1419int
1420i915_gem_execbuffer2(struct drm_device *dev, void *data,
1421                     struct drm_file *file)
1422{
1423        struct drm_i915_gem_execbuffer2 *args = data;
1424        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1425        int ret;
1426
1427        if (args->buffer_count < 1 ||
1428            args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1429                DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1430                return -EINVAL;
1431        }
1432
1433        exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1434                             GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1435        if (exec2_list == NULL)
1436                exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1437                                           args->buffer_count);
1438        if (exec2_list == NULL) {
1439                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1440                          args->buffer_count);
1441                return -ENOMEM;
1442        }
1443        ret = copy_from_user(exec2_list,
1444                             to_user_ptr(args->buffers_ptr),
1445                             sizeof(*exec2_list) * args->buffer_count);
1446        if (ret != 0) {
1447                DRM_DEBUG("copy %d exec entries failed %d\n",
1448                          args->buffer_count, ret);
1449                drm_free_large(exec2_list);
1450                return -EFAULT;
1451        }
1452
1453        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1454        if (!ret) {
1455                /* Copy the new buffer offsets back to the user's exec list. */
1456                struct drm_i915_gem_exec_object2 *user_exec_list =
1457                                   to_user_ptr(args->buffers_ptr);
1458                int i;
1459
1460                for (i = 0; i < args->buffer_count; i++) {
1461                        ret = __copy_to_user(&user_exec_list[i].offset,
1462                                             &exec2_list[i].offset,
1463                                             sizeof(user_exec_list[i].offset));
1464                        if (ret) {
1465                                ret = -EFAULT;
1466                                DRM_DEBUG("failed to copy %d exec entries "
1467                                          "back to user\n",
1468                                          args->buffer_count);
1469                                break;
1470                        }
1471                }
1472        }
1473
1474        drm_free_large(exec2_list);
1475        return ret;
1476}
1477