linux/drivers/gpu/drm/i915/intel_ringbuffer.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2010 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Zou Nan hai <nanhai.zou@intel.com>
  26 *    Xiang Hai hao<haihao.xiang@intel.com>
  27 *
  28 */
  29
  30#include <linux/log2.h>
  31#include <drm/drmP.h>
  32#include "i915_drv.h"
  33#include <drm/i915_drm.h>
  34#include "i915_trace.h"
  35#include "intel_drv.h"
  36
  37/* Rough estimate of the typical request size, performing a flush,
  38 * set-context and then emitting the batch.
  39 */
  40#define LEGACY_REQUEST_SIZE 200
  41
  42int __intel_ring_space(int head, int tail, int size)
  43{
  44        int space = head - tail;
  45        if (space <= 0)
  46                space += size;
  47        return space - I915_RING_FREE_SPACE;
  48}
  49
  50void intel_ring_update_space(struct intel_ring *ring)
  51{
  52        if (ring->last_retired_head != -1) {
  53                ring->head = ring->last_retired_head;
  54                ring->last_retired_head = -1;
  55        }
  56
  57        ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
  58                                         ring->tail, ring->size);
  59}
  60
  61static int
  62gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  63{
  64        struct intel_ring *ring = req->ring;
  65        u32 cmd;
  66        int ret;
  67
  68        cmd = MI_FLUSH;
  69
  70        if (mode & EMIT_INVALIDATE)
  71                cmd |= MI_READ_FLUSH;
  72
  73        ret = intel_ring_begin(req, 2);
  74        if (ret)
  75                return ret;
  76
  77        intel_ring_emit(ring, cmd);
  78        intel_ring_emit(ring, MI_NOOP);
  79        intel_ring_advance(ring);
  80
  81        return 0;
  82}
  83
  84static int
  85gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
  86{
  87        struct intel_ring *ring = req->ring;
  88        u32 cmd;
  89        int ret;
  90
  91        /*
  92         * read/write caches:
  93         *
  94         * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  95         * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
  96         * also flushed at 2d versus 3d pipeline switches.
  97         *
  98         * read-only caches:
  99         *
 100         * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
 101         * MI_READ_FLUSH is set, and is always flushed on 965.
 102         *
 103         * I915_GEM_DOMAIN_COMMAND may not exist?
 104         *
 105         * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
 106         * invalidated when MI_EXE_FLUSH is set.
 107         *
 108         * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
 109         * invalidated with every MI_FLUSH.
 110         *
 111         * TLBs:
 112         *
 113         * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
 114         * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
 115         * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
 116         * are flushed at any MI_FLUSH.
 117         */
 118
 119        cmd = MI_FLUSH;
 120        if (mode & EMIT_INVALIDATE) {
 121                cmd |= MI_EXE_FLUSH;
 122                if (IS_G4X(req->i915) || IS_GEN5(req->i915))
 123                        cmd |= MI_INVALIDATE_ISP;
 124        }
 125
 126        ret = intel_ring_begin(req, 2);
 127        if (ret)
 128                return ret;
 129
 130        intel_ring_emit(ring, cmd);
 131        intel_ring_emit(ring, MI_NOOP);
 132        intel_ring_advance(ring);
 133
 134        return 0;
 135}
 136
 137/**
 138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
 139 * implementing two workarounds on gen6.  From section 1.4.7.1
 140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
 141 *
 142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
 143 * produced by non-pipelined state commands), software needs to first
 144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
 145 * 0.
 146 *
 147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
 148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
 149 *
 150 * And the workaround for these two requires this workaround first:
 151 *
 152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
 153 * BEFORE the pipe-control with a post-sync op and no write-cache
 154 * flushes.
 155 *
 156 * And this last workaround is tricky because of the requirements on
 157 * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
 158 * volume 2 part 1:
 159 *
 160 *     "1 of the following must also be set:
 161 *      - Render Target Cache Flush Enable ([12] of DW1)
 162 *      - Depth Cache Flush Enable ([0] of DW1)
 163 *      - Stall at Pixel Scoreboard ([1] of DW1)
 164 *      - Depth Stall ([13] of DW1)
 165 *      - Post-Sync Operation ([13] of DW1)
 166 *      - Notify Enable ([8] of DW1)"
 167 *
 168 * The cache flushes require the workaround flush that triggered this
 169 * one, so we can't use it.  Depth stall would trigger the same.
 170 * Post-sync nonzero is what triggered this second workaround, so we
 171 * can't use that one either.  Notify enable is IRQs, which aren't
 172 * really our business.  That leaves only stall at scoreboard.
 173 */
 174static int
 175intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 176{
 177        struct intel_ring *ring = req->ring;
 178        u32 scratch_addr =
 179                i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 180        int ret;
 181
 182        ret = intel_ring_begin(req, 6);
 183        if (ret)
 184                return ret;
 185
 186        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
 187        intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
 188                        PIPE_CONTROL_STALL_AT_SCOREBOARD);
 189        intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
 190        intel_ring_emit(ring, 0); /* low dword */
 191        intel_ring_emit(ring, 0); /* high dword */
 192        intel_ring_emit(ring, MI_NOOP);
 193        intel_ring_advance(ring);
 194
 195        ret = intel_ring_begin(req, 6);
 196        if (ret)
 197                return ret;
 198
 199        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
 200        intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
 201        intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
 202        intel_ring_emit(ring, 0);
 203        intel_ring_emit(ring, 0);
 204        intel_ring_emit(ring, MI_NOOP);
 205        intel_ring_advance(ring);
 206
 207        return 0;
 208}
 209
 210static int
 211gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 212{
 213        struct intel_ring *ring = req->ring;
 214        u32 scratch_addr =
 215                i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 216        u32 flags = 0;
 217        int ret;
 218
 219        /* Force SNB workarounds for PIPE_CONTROL flushes */
 220        ret = intel_emit_post_sync_nonzero_flush(req);
 221        if (ret)
 222                return ret;
 223
 224        /* Just flush everything.  Experiments have shown that reducing the
 225         * number of bits based on the write domains has little performance
 226         * impact.
 227         */
 228        if (mode & EMIT_FLUSH) {
 229                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 230                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 231                /*
 232                 * Ensure that any following seqno writes only happen
 233                 * when the render cache is indeed flushed.
 234                 */
 235                flags |= PIPE_CONTROL_CS_STALL;
 236        }
 237        if (mode & EMIT_INVALIDATE) {
 238                flags |= PIPE_CONTROL_TLB_INVALIDATE;
 239                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 240                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
 241                flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
 242                flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
 243                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 244                /*
 245                 * TLB invalidate requires a post-sync write.
 246                 */
 247                flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 248        }
 249
 250        ret = intel_ring_begin(req, 4);
 251        if (ret)
 252                return ret;
 253
 254        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
 255        intel_ring_emit(ring, flags);
 256        intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
 257        intel_ring_emit(ring, 0);
 258        intel_ring_advance(ring);
 259
 260        return 0;
 261}
 262
 263static int
 264gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 265{
 266        struct intel_ring *ring = req->ring;
 267        int ret;
 268
 269        ret = intel_ring_begin(req, 4);
 270        if (ret)
 271                return ret;
 272
 273        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
 274        intel_ring_emit(ring,
 275                        PIPE_CONTROL_CS_STALL |
 276                        PIPE_CONTROL_STALL_AT_SCOREBOARD);
 277        intel_ring_emit(ring, 0);
 278        intel_ring_emit(ring, 0);
 279        intel_ring_advance(ring);
 280
 281        return 0;
 282}
 283
 284static int
 285gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 286{
 287        struct intel_ring *ring = req->ring;
 288        u32 scratch_addr =
 289                i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 290        u32 flags = 0;
 291        int ret;
 292
 293        /*
 294         * Ensure that any following seqno writes only happen when the render
 295         * cache is indeed flushed.
 296         *
 297         * Workaround: 4th PIPE_CONTROL command (except the ones with only
 298         * read-cache invalidate bits set) must have the CS_STALL bit set. We
 299         * don't try to be clever and just set it unconditionally.
 300         */
 301        flags |= PIPE_CONTROL_CS_STALL;
 302
 303        /* Just flush everything.  Experiments have shown that reducing the
 304         * number of bits based on the write domains has little performance
 305         * impact.
 306         */
 307        if (mode & EMIT_FLUSH) {
 308                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 309                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 310                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 311                flags |= PIPE_CONTROL_FLUSH_ENABLE;
 312        }
 313        if (mode & EMIT_INVALIDATE) {
 314                flags |= PIPE_CONTROL_TLB_INVALIDATE;
 315                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 316                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
 317                flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
 318                flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
 319                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 320                flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
 321                /*
 322                 * TLB invalidate requires a post-sync write.
 323                 */
 324                flags |= PIPE_CONTROL_QW_WRITE;
 325                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 326
 327                flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
 328
 329                /* Workaround: we must issue a pipe_control with CS-stall bit
 330                 * set before a pipe_control command that has the state cache
 331                 * invalidate bit set. */
 332                gen7_render_ring_cs_stall_wa(req);
 333        }
 334
 335        ret = intel_ring_begin(req, 4);
 336        if (ret)
 337                return ret;
 338
 339        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
 340        intel_ring_emit(ring, flags);
 341        intel_ring_emit(ring, scratch_addr);
 342        intel_ring_emit(ring, 0);
 343        intel_ring_advance(ring);
 344
 345        return 0;
 346}
 347
 348static int
 349gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 350                       u32 flags, u32 scratch_addr)
 351{
 352        struct intel_ring *ring = req->ring;
 353        int ret;
 354
 355        ret = intel_ring_begin(req, 6);
 356        if (ret)
 357                return ret;
 358
 359        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
 360        intel_ring_emit(ring, flags);
 361        intel_ring_emit(ring, scratch_addr);
 362        intel_ring_emit(ring, 0);
 363        intel_ring_emit(ring, 0);
 364        intel_ring_emit(ring, 0);
 365        intel_ring_advance(ring);
 366
 367        return 0;
 368}
 369
 370static int
 371gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 372{
 373        u32 scratch_addr =
 374                i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
 375        u32 flags = 0;
 376        int ret;
 377
 378        flags |= PIPE_CONTROL_CS_STALL;
 379
 380        if (mode & EMIT_FLUSH) {
 381                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 382                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 383                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 384                flags |= PIPE_CONTROL_FLUSH_ENABLE;
 385        }
 386        if (mode & EMIT_INVALIDATE) {
 387                flags |= PIPE_CONTROL_TLB_INVALIDATE;
 388                flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 389                flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
 390                flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
 391                flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
 392                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 393                flags |= PIPE_CONTROL_QW_WRITE;
 394                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 395
 396                /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
 397                ret = gen8_emit_pipe_control(req,
 398                                             PIPE_CONTROL_CS_STALL |
 399                                             PIPE_CONTROL_STALL_AT_SCOREBOARD,
 400                                             0);
 401                if (ret)
 402                        return ret;
 403        }
 404
 405        return gen8_emit_pipe_control(req, flags, scratch_addr);
 406}
 407
 408u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 409{
 410        struct drm_i915_private *dev_priv = engine->i915;
 411        u64 acthd;
 412
 413        if (INTEL_GEN(dev_priv) >= 8)
 414                acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
 415                                         RING_ACTHD_UDW(engine->mmio_base));
 416        else if (INTEL_GEN(dev_priv) >= 4)
 417                acthd = I915_READ(RING_ACTHD(engine->mmio_base));
 418        else
 419                acthd = I915_READ(ACTHD);
 420
 421        return acthd;
 422}
 423
 424static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 425{
 426        struct drm_i915_private *dev_priv = engine->i915;
 427        u32 addr;
 428
 429        addr = dev_priv->status_page_dmah->busaddr;
 430        if (INTEL_GEN(dev_priv) >= 4)
 431                addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
 432        I915_WRITE(HWS_PGA, addr);
 433}
 434
 435static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 436{
 437        struct drm_i915_private *dev_priv = engine->i915;
 438        i915_reg_t mmio;
 439
 440        /* The ring status page addresses are no longer next to the rest of
 441         * the ring registers as of gen7.
 442         */
 443        if (IS_GEN7(dev_priv)) {
 444                switch (engine->id) {
 445                case RCS:
 446                        mmio = RENDER_HWS_PGA_GEN7;
 447                        break;
 448                case BCS:
 449                        mmio = BLT_HWS_PGA_GEN7;
 450                        break;
 451                /*
 452                 * VCS2 actually doesn't exist on Gen7. Only shut up
 453                 * gcc switch check warning
 454                 */
 455                case VCS2:
 456                case VCS:
 457                        mmio = BSD_HWS_PGA_GEN7;
 458                        break;
 459                case VECS:
 460                        mmio = VEBOX_HWS_PGA_GEN7;
 461                        break;
 462                }
 463        } else if (IS_GEN6(dev_priv)) {
 464                mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
 465        } else {
 466                /* XXX: gen8 returns to sanity */
 467                mmio = RING_HWS_PGA(engine->mmio_base);
 468        }
 469
 470        I915_WRITE(mmio, engine->status_page.ggtt_offset);
 471        POSTING_READ(mmio);
 472
 473        /*
 474         * Flush the TLB for this page
 475         *
 476         * FIXME: These two bits have disappeared on gen8, so a question
 477         * arises: do we still need this and if so how should we go about
 478         * invalidating the TLB?
 479         */
 480        if (IS_GEN(dev_priv, 6, 7)) {
 481                i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 482
 483                /* ring should be idle before issuing a sync flush*/
 484                WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
 485
 486                I915_WRITE(reg,
 487                           _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
 488                                              INSTPM_SYNC_FLUSH));
 489                if (intel_wait_for_register(dev_priv,
 490                                            reg, INSTPM_SYNC_FLUSH, 0,
 491                                            1000))
 492                        DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
 493                                  engine->name);
 494        }
 495}
 496
 497static bool stop_ring(struct intel_engine_cs *engine)
 498{
 499        struct drm_i915_private *dev_priv = engine->i915;
 500
 501        if (INTEL_GEN(dev_priv) > 2) {
 502                I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
 503                if (intel_wait_for_register(dev_priv,
 504                                            RING_MI_MODE(engine->mmio_base),
 505                                            MODE_IDLE,
 506                                            MODE_IDLE,
 507                                            1000)) {
 508                        DRM_ERROR("%s : timed out trying to stop ring\n",
 509                                  engine->name);
 510                        /* Sometimes we observe that the idle flag is not
 511                         * set even though the ring is empty. So double
 512                         * check before giving up.
 513                         */
 514                        if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
 515                                return false;
 516                }
 517        }
 518
 519        I915_WRITE_CTL(engine, 0);
 520        I915_WRITE_HEAD(engine, 0);
 521        I915_WRITE_TAIL(engine, 0);
 522
 523        if (INTEL_GEN(dev_priv) > 2) {
 524                (void)I915_READ_CTL(engine);
 525                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 526        }
 527
 528        return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 529}
 530
 531static int init_ring_common(struct intel_engine_cs *engine)
 532{
 533        struct drm_i915_private *dev_priv = engine->i915;
 534        struct intel_ring *ring = engine->buffer;
 535        int ret = 0;
 536
 537        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 538
 539        if (!stop_ring(engine)) {
 540                /* G45 ring initialization often fails to reset head to zero */
 541                DRM_DEBUG_KMS("%s head not reset to zero "
 542                              "ctl %08x head %08x tail %08x start %08x\n",
 543                              engine->name,
 544                              I915_READ_CTL(engine),
 545                              I915_READ_HEAD(engine),
 546                              I915_READ_TAIL(engine),
 547                              I915_READ_START(engine));
 548
 549                if (!stop_ring(engine)) {
 550                        DRM_ERROR("failed to set %s head to zero "
 551                                  "ctl %08x head %08x tail %08x start %08x\n",
 552                                  engine->name,
 553                                  I915_READ_CTL(engine),
 554                                  I915_READ_HEAD(engine),
 555                                  I915_READ_TAIL(engine),
 556                                  I915_READ_START(engine));
 557                        ret = -EIO;
 558                        goto out;
 559                }
 560        }
 561
 562        if (HWS_NEEDS_PHYSICAL(dev_priv))
 563                ring_setup_phys_status_page(engine);
 564        else
 565                intel_ring_setup_status_page(engine);
 566
 567        intel_engine_reset_breadcrumbs(engine);
 568
 569        /* Enforce ordering by reading HEAD register back */
 570        I915_READ_HEAD(engine);
 571
 572        /* Initialize the ring. This must happen _after_ we've cleared the ring
 573         * registers with the above sequence (the readback of the HEAD registers
 574         * also enforces ordering), otherwise the hw might lose the new ring
 575         * register values. */
 576        I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
 577
 578        /* WaClearRingBufHeadRegAtInit:ctg,elk */
 579        if (I915_READ_HEAD(engine))
 580                DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
 581                          engine->name, I915_READ_HEAD(engine));
 582
 583        intel_ring_update_space(ring);
 584        I915_WRITE_HEAD(engine, ring->head);
 585        I915_WRITE_TAIL(engine, ring->tail);
 586        (void)I915_READ_TAIL(engine);
 587
 588        I915_WRITE_CTL(engine,
 589                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 590                        | RING_VALID);
 591
 592        /* If the head is still not zero, the ring is dead */
 593        if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
 594                                       RING_VALID, RING_VALID,
 595                                       50)) {
 596                DRM_ERROR("%s initialization failed "
 597                          "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
 598                          engine->name,
 599                          I915_READ_CTL(engine),
 600                          I915_READ_CTL(engine) & RING_VALID,
 601                          I915_READ_HEAD(engine), ring->head,
 602                          I915_READ_TAIL(engine), ring->tail,
 603                          I915_READ_START(engine),
 604                          i915_ggtt_offset(ring->vma));
 605                ret = -EIO;
 606                goto out;
 607        }
 608
 609        intel_engine_init_hangcheck(engine);
 610
 611out:
 612        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 613
 614        return ret;
 615}
 616
 617static void reset_ring_common(struct intel_engine_cs *engine,
 618                              struct drm_i915_gem_request *request)
 619{
 620        struct intel_ring *ring = request->ring;
 621
 622        ring->head = request->postfix;
 623        ring->last_retired_head = -1;
 624}
 625
 626static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 627{
 628        struct intel_ring *ring = req->ring;
 629        struct i915_workarounds *w = &req->i915->workarounds;
 630        int ret, i;
 631
 632        if (w->count == 0)
 633                return 0;
 634
 635        ret = req->engine->emit_flush(req, EMIT_BARRIER);
 636        if (ret)
 637                return ret;
 638
 639        ret = intel_ring_begin(req, (w->count * 2 + 2));
 640        if (ret)
 641                return ret;
 642
 643        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 644        for (i = 0; i < w->count; i++) {
 645                intel_ring_emit_reg(ring, w->reg[i].addr);
 646                intel_ring_emit(ring, w->reg[i].value);
 647        }
 648        intel_ring_emit(ring, MI_NOOP);
 649
 650        intel_ring_advance(ring);
 651
 652        ret = req->engine->emit_flush(req, EMIT_BARRIER);
 653        if (ret)
 654                return ret;
 655
 656        DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
 657
 658        return 0;
 659}
 660
 661static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 662{
 663        int ret;
 664
 665        ret = intel_ring_workarounds_emit(req);
 666        if (ret != 0)
 667                return ret;
 668
 669        ret = i915_gem_render_state_init(req);
 670        if (ret)
 671                return ret;
 672
 673        return 0;
 674}
 675
 676static int wa_add(struct drm_i915_private *dev_priv,
 677                  i915_reg_t addr,
 678                  const u32 mask, const u32 val)
 679{
 680        const u32 idx = dev_priv->workarounds.count;
 681
 682        if (WARN_ON(idx >= I915_MAX_WA_REGS))
 683                return -ENOSPC;
 684
 685        dev_priv->workarounds.reg[idx].addr = addr;
 686        dev_priv->workarounds.reg[idx].value = val;
 687        dev_priv->workarounds.reg[idx].mask = mask;
 688
 689        dev_priv->workarounds.count++;
 690
 691        return 0;
 692}
 693
 694#define WA_REG(addr, mask, val) do { \
 695                const int r = wa_add(dev_priv, (addr), (mask), (val)); \
 696                if (r) \
 697                        return r; \
 698        } while (0)
 699
 700#define WA_SET_BIT_MASKED(addr, mask) \
 701        WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
 702
 703#define WA_CLR_BIT_MASKED(addr, mask) \
 704        WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
 705
 706#define WA_SET_FIELD_MASKED(addr, mask, value) \
 707        WA_REG(addr, mask, _MASKED_FIELD(mask, value))
 708
 709#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
 710#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
 711
 712#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 713
 714static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
 715                                 i915_reg_t reg)
 716{
 717        struct drm_i915_private *dev_priv = engine->i915;
 718        struct i915_workarounds *wa = &dev_priv->workarounds;
 719        const uint32_t index = wa->hw_whitelist_count[engine->id];
 720
 721        if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
 722                return -EINVAL;
 723
 724        WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
 725                 i915_mmio_reg_offset(reg));
 726        wa->hw_whitelist_count[engine->id]++;
 727
 728        return 0;
 729}
 730
 731static int gen8_init_workarounds(struct intel_engine_cs *engine)
 732{
 733        struct drm_i915_private *dev_priv = engine->i915;
 734
 735        WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 736
 737        /* WaDisableAsyncFlipPerfMode:bdw,chv */
 738        WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
 739
 740        /* WaDisablePartialInstShootdown:bdw,chv */
 741        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
 742                          PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 743
 744        /* Use Force Non-Coherent whenever executing a 3D context. This is a
 745         * workaround for for a possible hang in the unlikely event a TLB
 746         * invalidation occurs during a PSD flush.
 747         */
 748        /* WaForceEnableNonCoherent:bdw,chv */
 749        /* WaHdcDisableFetchWhenMasked:bdw,chv */
 750        WA_SET_BIT_MASKED(HDC_CHICKEN0,
 751                          HDC_DONOT_FETCH_MEM_WHEN_MASKED |
 752                          HDC_FORCE_NON_COHERENT);
 753
 754        /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
 755         * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
 756         *  polygons in the same 8x4 pixel/sample area to be processed without
 757         *  stalling waiting for the earlier ones to write to Hierarchical Z
 758         *  buffer."
 759         *
 760         * This optimization is off by default for BDW and CHV; turn it on.
 761         */
 762        WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
 763
 764        /* Wa4x4STCOptimizationDisable:bdw,chv */
 765        WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
 766
 767        /*
 768         * BSpec recommends 8x4 when MSAA is used,
 769         * however in practice 16x4 seems fastest.
 770         *
 771         * Note that PS/WM thread counts depend on the WIZ hashing
 772         * disable bit, which we don't touch here, but it's good
 773         * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
 774         */
 775        WA_SET_FIELD_MASKED(GEN7_GT_MODE,
 776                            GEN6_WIZ_HASHING_MASK,
 777                            GEN6_WIZ_HASHING_16x4);
 778
 779        return 0;
 780}
 781
 782static int bdw_init_workarounds(struct intel_engine_cs *engine)
 783{
 784        struct drm_i915_private *dev_priv = engine->i915;
 785        int ret;
 786
 787        ret = gen8_init_workarounds(engine);
 788        if (ret)
 789                return ret;
 790
 791        /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
 792        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 793
 794        /* WaDisableDopClockGating:bdw */
 795        WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
 796                          DOP_CLOCK_GATING_DISABLE);
 797
 798        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 799                          GEN8_SAMPLER_POWER_BYPASS_DIS);
 800
 801        WA_SET_BIT_MASKED(HDC_CHICKEN0,
 802                          /* WaForceContextSaveRestoreNonCoherent:bdw */
 803                          HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
 804                          /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
 805                          (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 806
 807        return 0;
 808}
 809
 810static int chv_init_workarounds(struct intel_engine_cs *engine)
 811{
 812        struct drm_i915_private *dev_priv = engine->i915;
 813        int ret;
 814
 815        ret = gen8_init_workarounds(engine);
 816        if (ret)
 817                return ret;
 818
 819        /* WaDisableThreadStallDopClockGating:chv */
 820        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
 821
 822        /* Improve HiZ throughput on CHV. */
 823        WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
 824
 825        return 0;
 826}
 827
 828static int gen9_init_workarounds(struct intel_engine_cs *engine)
 829{
 830        struct drm_i915_private *dev_priv = engine->i915;
 831        int ret;
 832
 833        /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
 834        I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
 835
 836        /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
 837        I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
 838                   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
 839
 840        /* WaDisableKillLogic:bxt,skl,kbl */
 841        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
 842                   ECOCHK_DIS_TLB);
 843
 844        /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
 845        /* WaDisablePartialInstShootdown:skl,bxt,kbl */
 846        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
 847                          FLOW_CONTROL_ENABLE |
 848                          PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 849
 850        /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
 851        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 852                          GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 853
 854        /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
 855        if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
 856            IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 857                WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
 858                                  GEN9_DG_MIRROR_FIX_ENABLE);
 859
 860        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
 861        if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
 862            IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 863                WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
 864                                  GEN9_RHWO_OPTIMIZATION_DISABLE);
 865                /*
 866                 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
 867                 * but we do that in per ctx batchbuffer as there is an issue
 868                 * with this register not getting restored on ctx restore
 869                 */
 870        }
 871
 872        /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
 873        /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
 874        WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
 875                          GEN9_ENABLE_YV12_BUGFIX |
 876                          GEN9_ENABLE_GPGPU_PREEMPTION);
 877
 878        /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
 879        /* WaDisablePartialResolveInVc:skl,bxt,kbl */
 880        WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
 881                                         GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
 882
 883        /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
 884        WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
 885                          GEN9_CCS_TLB_PREFETCH_ENABLE);
 886
 887        /* WaDisableMaskBasedCammingInRCC:skl,bxt */
 888        if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
 889            IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 890                WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
 891                                  PIXEL_MASK_CAMMING_DISABLE);
 892
 893        /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
 894        WA_SET_BIT_MASKED(HDC_CHICKEN0,
 895                          HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
 896                          HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
 897
 898        /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
 899         * both tied to WaForceContextSaveRestoreNonCoherent
 900         * in some hsds for skl. We keep the tie for all gen9. The
 901         * documentation is a bit hazy and so we want to get common behaviour,
 902         * even though there is no clear evidence we would need both on kbl/bxt.
 903         * This area has been source of system hangs so we play it safe
 904         * and mimic the skl regardless of what bspec says.
 905         *
 906         * Use Force Non-Coherent whenever executing a 3D context. This
 907         * is a workaround for a possible hang in the unlikely event
 908         * a TLB invalidation occurs during a PSD flush.
 909         */
 910
 911        /* WaForceEnableNonCoherent:skl,bxt,kbl */
 912        WA_SET_BIT_MASKED(HDC_CHICKEN0,
 913                          HDC_FORCE_NON_COHERENT);
 914
 915        /* WaDisableHDCInvalidation:skl,bxt,kbl */
 916        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
 917                   BDW_DISABLE_HDC_INVALIDATION);
 918
 919        /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
 920        if (IS_SKYLAKE(dev_priv) ||
 921            IS_KABYLAKE(dev_priv) ||
 922            IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
 923                WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 924                                  GEN8_SAMPLER_POWER_BYPASS_DIS);
 925
 926        /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
 927        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 928
 929        /* WaOCLCoherentLineFlush:skl,bxt,kbl */
 930        I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
 931                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 932
 933        /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
 934        ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
 935        if (ret)
 936                return ret;
 937
 938        /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
 939        ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
 940        if (ret)
 941                return ret;
 942
 943        /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
 944        ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
 945        if (ret)
 946                return ret;
 947
 948        return 0;
 949}
 950
 951static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 952{
 953        struct drm_i915_private *dev_priv = engine->i915;
 954        u8 vals[3] = { 0, 0, 0 };
 955        unsigned int i;
 956
 957        for (i = 0; i < 3; i++) {
 958                u8 ss;
 959
 960                /*
 961                 * Only consider slices where one, and only one, subslice has 7
 962                 * EUs
 963                 */
 964                if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
 965                        continue;
 966
 967                /*
 968                 * subslice_7eu[i] != 0 (because of the check above) and
 969                 * ss_max == 4 (maximum number of subslices possible per slice)
 970                 *
 971                 * ->    0 <= ss <= 3;
 972                 */
 973                ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
 974                vals[i] = 3 - ss;
 975        }
 976
 977        if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
 978                return 0;
 979
 980        /* Tune IZ hashing. See intel_device_info_runtime_init() */
 981        WA_SET_FIELD_MASKED(GEN7_GT_MODE,
 982                            GEN9_IZ_HASHING_MASK(2) |
 983                            GEN9_IZ_HASHING_MASK(1) |
 984                            GEN9_IZ_HASHING_MASK(0),
 985                            GEN9_IZ_HASHING(2, vals[2]) |
 986                            GEN9_IZ_HASHING(1, vals[1]) |
 987                            GEN9_IZ_HASHING(0, vals[0]));
 988
 989        return 0;
 990}
 991
 992static int skl_init_workarounds(struct intel_engine_cs *engine)
 993{
 994        struct drm_i915_private *dev_priv = engine->i915;
 995        int ret;
 996
 997        ret = gen9_init_workarounds(engine);
 998        if (ret)
 999                return ret;
1000
1001        /*
1002         * Actual WA is to disable percontext preemption granularity control
1003         * until D0 which is the default case so this is equivalent to
1004         * !WaDisablePerCtxtPreemptionGranularityControl:skl
1005         */
1006        if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
1007                I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1008                           _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1009        }
1010
1011        if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
1012                /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1013                I915_WRITE(FF_SLICE_CS_CHICKEN2,
1014                           _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1015        }
1016
1017        /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1018         * involving this register should also be added to WA batch as required.
1019         */
1020        if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
1021                /* WaDisableLSQCROPERFforOCL:skl */
1022                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1023                           GEN8_LQSC_RO_PERF_DIS);
1024
1025        /* WaEnableGapsTsvCreditFix:skl */
1026        if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
1027                I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1028                                           GEN9_GAPS_TSV_CREDIT_DISABLE));
1029        }
1030
1031        /* WaDisablePowerCompilerClockGating:skl */
1032        if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
1033                WA_SET_BIT_MASKED(HIZ_CHICKEN,
1034                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1035
1036        /* WaBarrierPerformanceFixDisable:skl */
1037        if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
1038                WA_SET_BIT_MASKED(HDC_CHICKEN0,
1039                                  HDC_FENCE_DEST_SLM_DISABLE |
1040                                  HDC_BARRIER_PERFORMANCE_DISABLE);
1041
1042        /* WaDisableSbeCacheDispatchPortSharing:skl */
1043        if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
1044                WA_SET_BIT_MASKED(
1045                        GEN7_HALF_SLICE_CHICKEN1,
1046                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1047
1048        /* WaDisableGafsUnitClkGating:skl */
1049        WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1050
1051        /* WaInPlaceDecompressionHang:skl */
1052        if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1053                WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1054                           GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1055
1056        /* WaDisableLSQCROPERFforOCL:skl */
1057        ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1058        if (ret)
1059                return ret;
1060
1061        return skl_tune_iz_hashing(engine);
1062}
1063
1064static int bxt_init_workarounds(struct intel_engine_cs *engine)
1065{
1066        struct drm_i915_private *dev_priv = engine->i915;
1067        int ret;
1068
1069        ret = gen9_init_workarounds(engine);
1070        if (ret)
1071                return ret;
1072
1073        /* WaStoreMultiplePTEenable:bxt */
1074        /* This is a requirement according to Hardware specification */
1075        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1076                I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1077
1078        /* WaSetClckGatingDisableMedia:bxt */
1079        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1080                I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1081                                            ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1082        }
1083
1084        /* WaDisableThreadStallDopClockGating:bxt */
1085        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1086                          STALL_DOP_GATING_DISABLE);
1087
1088        /* WaDisablePooledEuLoadBalancingFix:bxt */
1089        if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1090                WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1091                                  GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1092        }
1093
1094        /* WaDisableSbeCacheDispatchPortSharing:bxt */
1095        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1096                WA_SET_BIT_MASKED(
1097                        GEN7_HALF_SLICE_CHICKEN1,
1098                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1099        }
1100
1101        /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1102        /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1103        /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1104        /* WaDisableLSQCROPERFforOCL:bxt */
1105        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1106                ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1107                if (ret)
1108                        return ret;
1109
1110                ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1111                if (ret)
1112                        return ret;
1113        }
1114
1115        /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1116        if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1117                I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1118                                           L3_HIGH_PRIO_CREDITS(2));
1119
1120        /* WaToEnableHwFixForPushConstHWBug:bxt */
1121        if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1122                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1123                                  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1124
1125        /* WaInPlaceDecompressionHang:bxt */
1126        if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1127                WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1128                           GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1129
1130        return 0;
1131}
1132
1133static int kbl_init_workarounds(struct intel_engine_cs *engine)
1134{
1135        struct drm_i915_private *dev_priv = engine->i915;
1136        int ret;
1137
1138        ret = gen9_init_workarounds(engine);
1139        if (ret)
1140                return ret;
1141
1142        /* WaEnableGapsTsvCreditFix:kbl */
1143        I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1144                                   GEN9_GAPS_TSV_CREDIT_DISABLE));
1145
1146        /* WaDisableDynamicCreditSharing:kbl */
1147        if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1148                WA_SET_BIT(GAMT_CHKN_BIT_REG,
1149                           GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1150
1151        /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1152        if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1153                WA_SET_BIT_MASKED(HDC_CHICKEN0,
1154                                  HDC_FENCE_DEST_SLM_DISABLE);
1155
1156        /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1157         * involving this register should also be added to WA batch as required.
1158         */
1159        if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1160                /* WaDisableLSQCROPERFforOCL:kbl */
1161                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1162                           GEN8_LQSC_RO_PERF_DIS);
1163
1164        /* WaToEnableHwFixForPushConstHWBug:kbl */
1165        if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1166                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1167                                  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1168
1169        /* WaDisableGafsUnitClkGating:kbl */
1170        WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1171
1172        /* WaDisableSbeCacheDispatchPortSharing:kbl */
1173        WA_SET_BIT_MASKED(
1174                GEN7_HALF_SLICE_CHICKEN1,
1175                GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1176
1177        /* WaInPlaceDecompressionHang:kbl */
1178        WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1179                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1180
1181        /* WaDisableLSQCROPERFforOCL:kbl */
1182        ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1183        if (ret)
1184                return ret;
1185
1186        return 0;
1187}
1188
1189int init_workarounds_ring(struct intel_engine_cs *engine)
1190{
1191        struct drm_i915_private *dev_priv = engine->i915;
1192
1193        WARN_ON(engine->id != RCS);
1194
1195        dev_priv->workarounds.count = 0;
1196        dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1197
1198        if (IS_BROADWELL(dev_priv))
1199                return bdw_init_workarounds(engine);
1200
1201        if (IS_CHERRYVIEW(dev_priv))
1202                return chv_init_workarounds(engine);
1203
1204        if (IS_SKYLAKE(dev_priv))
1205                return skl_init_workarounds(engine);
1206
1207        if (IS_BROXTON(dev_priv))
1208                return bxt_init_workarounds(engine);
1209
1210        if (IS_KABYLAKE(dev_priv))
1211                return kbl_init_workarounds(engine);
1212
1213        return 0;
1214}
1215
1216static int init_render_ring(struct intel_engine_cs *engine)
1217{
1218        struct drm_i915_private *dev_priv = engine->i915;
1219        int ret = init_ring_common(engine);
1220        if (ret)
1221                return ret;
1222
1223        /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1224        if (IS_GEN(dev_priv, 4, 6))
1225                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1226
1227        /* We need to disable the AsyncFlip performance optimisations in order
1228         * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1229         * programmed to '1' on all products.
1230         *
1231         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1232         */
1233        if (IS_GEN(dev_priv, 6, 7))
1234                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1235
1236        /* Required for the hardware to program scanline values for waiting */
1237        /* WaEnableFlushTlbInvalidationMode:snb */
1238        if (IS_GEN6(dev_priv))
1239                I915_WRITE(GFX_MODE,
1240                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1241
1242        /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1243        if (IS_GEN7(dev_priv))
1244                I915_WRITE(GFX_MODE_GEN7,
1245                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1246                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1247
1248        if (IS_GEN6(dev_priv)) {
1249                /* From the Sandybridge PRM, volume 1 part 3, page 24:
1250                 * "If this bit is set, STCunit will have LRA as replacement
1251                 *  policy. [...] This bit must be reset.  LRA replacement
1252                 *  policy is not supported."
1253                 */
1254                I915_WRITE(CACHE_MODE_0,
1255                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1256        }
1257
1258        if (IS_GEN(dev_priv, 6, 7))
1259                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1260
1261        if (INTEL_INFO(dev_priv)->gen >= 6)
1262                I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1263
1264        return init_workarounds_ring(engine);
1265}
1266
1267static void render_ring_cleanup(struct intel_engine_cs *engine)
1268{
1269        struct drm_i915_private *dev_priv = engine->i915;
1270
1271        i915_vma_unpin_and_release(&dev_priv->semaphore);
1272}
1273
1274static int gen8_rcs_signal(struct drm_i915_gem_request *req)
1275{
1276        struct intel_ring *ring = req->ring;
1277        struct drm_i915_private *dev_priv = req->i915;
1278        struct intel_engine_cs *waiter;
1279        enum intel_engine_id id;
1280        int ret, num_rings;
1281
1282        num_rings = INTEL_INFO(dev_priv)->num_rings;
1283        ret = intel_ring_begin(req, (num_rings-1) * 8);
1284        if (ret)
1285                return ret;
1286
1287        for_each_engine_id(waiter, dev_priv, id) {
1288                u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1289                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1290                        continue;
1291
1292                intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1293                intel_ring_emit(ring,
1294                                PIPE_CONTROL_GLOBAL_GTT_IVB |
1295                                PIPE_CONTROL_QW_WRITE |
1296                                PIPE_CONTROL_CS_STALL);
1297                intel_ring_emit(ring, lower_32_bits(gtt_offset));
1298                intel_ring_emit(ring, upper_32_bits(gtt_offset));
1299                intel_ring_emit(ring, req->fence.seqno);
1300                intel_ring_emit(ring, 0);
1301                intel_ring_emit(ring,
1302                                MI_SEMAPHORE_SIGNAL |
1303                                MI_SEMAPHORE_TARGET(waiter->hw_id));
1304                intel_ring_emit(ring, 0);
1305        }
1306        intel_ring_advance(ring);
1307
1308        return 0;
1309}
1310
1311static int gen8_xcs_signal(struct drm_i915_gem_request *req)
1312{
1313        struct intel_ring *ring = req->ring;
1314        struct drm_i915_private *dev_priv = req->i915;
1315        struct intel_engine_cs *waiter;
1316        enum intel_engine_id id;
1317        int ret, num_rings;
1318
1319        num_rings = INTEL_INFO(dev_priv)->num_rings;
1320        ret = intel_ring_begin(req, (num_rings-1) * 6);
1321        if (ret)
1322                return ret;
1323
1324        for_each_engine_id(waiter, dev_priv, id) {
1325                u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1326                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1327                        continue;
1328
1329                intel_ring_emit(ring,
1330                                (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1331                intel_ring_emit(ring,
1332                                lower_32_bits(gtt_offset) |
1333                                MI_FLUSH_DW_USE_GTT);
1334                intel_ring_emit(ring, upper_32_bits(gtt_offset));
1335                intel_ring_emit(ring, req->fence.seqno);
1336                intel_ring_emit(ring,
1337                                MI_SEMAPHORE_SIGNAL |
1338                                MI_SEMAPHORE_TARGET(waiter->hw_id));
1339                intel_ring_emit(ring, 0);
1340        }
1341        intel_ring_advance(ring);
1342
1343        return 0;
1344}
1345
1346static int gen6_signal(struct drm_i915_gem_request *req)
1347{
1348        struct intel_ring *ring = req->ring;
1349        struct drm_i915_private *dev_priv = req->i915;
1350        struct intel_engine_cs *engine;
1351        int ret, num_rings;
1352
1353        num_rings = INTEL_INFO(dev_priv)->num_rings;
1354        ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
1355        if (ret)
1356                return ret;
1357
1358        for_each_engine(engine, dev_priv) {
1359                i915_reg_t mbox_reg;
1360
1361                if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
1362                        continue;
1363
1364                mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
1365                if (i915_mmio_reg_valid(mbox_reg)) {
1366                        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1367                        intel_ring_emit_reg(ring, mbox_reg);
1368                        intel_ring_emit(ring, req->fence.seqno);
1369                }
1370        }
1371
1372        /* If num_dwords was rounded, make sure the tail pointer is correct */
1373        if (num_rings % 2 == 0)
1374                intel_ring_emit(ring, MI_NOOP);
1375        intel_ring_advance(ring);
1376
1377        return 0;
1378}
1379
1380static void i9xx_submit_request(struct drm_i915_gem_request *request)
1381{
1382        struct drm_i915_private *dev_priv = request->i915;
1383
1384        I915_WRITE_TAIL(request->engine,
1385                        intel_ring_offset(request->ring, request->tail));
1386}
1387
1388static int i9xx_emit_request(struct drm_i915_gem_request *req)
1389{
1390        struct intel_ring *ring = req->ring;
1391        int ret;
1392
1393        ret = intel_ring_begin(req, 4);
1394        if (ret)
1395                return ret;
1396
1397        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1398        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1399        intel_ring_emit(ring, req->fence.seqno);
1400        intel_ring_emit(ring, MI_USER_INTERRUPT);
1401        intel_ring_advance(ring);
1402
1403        req->tail = ring->tail;
1404
1405        return 0;
1406}
1407
1408/**
1409 * gen6_sema_emit_request - Update the semaphore mailbox registers
1410 *
1411 * @request - request to write to the ring
1412 *
1413 * Update the mailbox registers in the *other* rings with the current seqno.
1414 * This acts like a signal in the canonical semaphore.
1415 */
1416static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
1417{
1418        int ret;
1419
1420        ret = req->engine->semaphore.signal(req);
1421        if (ret)
1422                return ret;
1423
1424        return i9xx_emit_request(req);
1425}
1426
1427static int gen8_render_emit_request(struct drm_i915_gem_request *req)
1428{
1429        struct intel_engine_cs *engine = req->engine;
1430        struct intel_ring *ring = req->ring;
1431        int ret;
1432
1433        if (engine->semaphore.signal) {
1434                ret = engine->semaphore.signal(req);
1435                if (ret)
1436                        return ret;
1437        }
1438
1439        ret = intel_ring_begin(req, 8);
1440        if (ret)
1441                return ret;
1442
1443        intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1444        intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1445                               PIPE_CONTROL_CS_STALL |
1446                               PIPE_CONTROL_QW_WRITE));
1447        intel_ring_emit(ring, intel_hws_seqno_address(engine));
1448        intel_ring_emit(ring, 0);
1449        intel_ring_emit(ring, i915_gem_request_get_seqno(req));
1450        /* We're thrashing one dword of HWS. */
1451        intel_ring_emit(ring, 0);
1452        intel_ring_emit(ring, MI_USER_INTERRUPT);
1453        intel_ring_emit(ring, MI_NOOP);
1454        intel_ring_advance(ring);
1455
1456        req->tail = ring->tail;
1457
1458        return 0;
1459}
1460
1461/**
1462 * intel_ring_sync - sync the waiter to the signaller on seqno
1463 *
1464 * @waiter - ring that is waiting
1465 * @signaller - ring which has, or will signal
1466 * @seqno - seqno which the waiter will block on
1467 */
1468
1469static int
1470gen8_ring_sync_to(struct drm_i915_gem_request *req,
1471                  struct drm_i915_gem_request *signal)
1472{
1473        struct intel_ring *ring = req->ring;
1474        struct drm_i915_private *dev_priv = req->i915;
1475        u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
1476        struct i915_hw_ppgtt *ppgtt;
1477        int ret;
1478
1479        ret = intel_ring_begin(req, 4);
1480        if (ret)
1481                return ret;
1482
1483        intel_ring_emit(ring,
1484                        MI_SEMAPHORE_WAIT |
1485                        MI_SEMAPHORE_GLOBAL_GTT |
1486                        MI_SEMAPHORE_SAD_GTE_SDD);
1487        intel_ring_emit(ring, signal->fence.seqno);
1488        intel_ring_emit(ring, lower_32_bits(offset));
1489        intel_ring_emit(ring, upper_32_bits(offset));
1490        intel_ring_advance(ring);
1491
1492        /* When the !RCS engines idle waiting upon a semaphore, they lose their
1493         * pagetables and we must reload them before executing the batch.
1494         * We do this on the i915_switch_context() following the wait and
1495         * before the dispatch.
1496         */
1497        ppgtt = req->ctx->ppgtt;
1498        if (ppgtt && req->engine->id != RCS)
1499                ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
1500        return 0;
1501}
1502
1503static int
1504gen6_ring_sync_to(struct drm_i915_gem_request *req,
1505                  struct drm_i915_gem_request *signal)
1506{
1507        struct intel_ring *ring = req->ring;
1508        u32 dw1 = MI_SEMAPHORE_MBOX |
1509                  MI_SEMAPHORE_COMPARE |
1510                  MI_SEMAPHORE_REGISTER;
1511        u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
1512        int ret;
1513
1514        WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1515
1516        ret = intel_ring_begin(req, 4);
1517        if (ret)
1518                return ret;
1519
1520        intel_ring_emit(ring, dw1 | wait_mbox);
1521        /* Throughout all of the GEM code, seqno passed implies our current
1522         * seqno is >= the last seqno executed. However for hardware the
1523         * comparison is strictly greater than.
1524         */
1525        intel_ring_emit(ring, signal->fence.seqno - 1);
1526        intel_ring_emit(ring, 0);
1527        intel_ring_emit(ring, MI_NOOP);
1528        intel_ring_advance(ring);
1529
1530        return 0;
1531}
1532
1533static void
1534gen5_seqno_barrier(struct intel_engine_cs *engine)
1535{
1536        /* MI_STORE are internally buffered by the GPU and not flushed
1537         * either by MI_FLUSH or SyncFlush or any other combination of
1538         * MI commands.
1539         *
1540         * "Only the submission of the store operation is guaranteed.
1541         * The write result will be complete (coherent) some time later
1542         * (this is practically a finite period but there is no guaranteed
1543         * latency)."
1544         *
1545         * Empirically, we observe that we need a delay of at least 75us to
1546         * be sure that the seqno write is visible by the CPU.
1547         */
1548        usleep_range(125, 250);
1549}
1550
1551static void
1552gen6_seqno_barrier(struct intel_engine_cs *engine)
1553{
1554        struct drm_i915_private *dev_priv = engine->i915;
1555
1556        /* Workaround to force correct ordering between irq and seqno writes on
1557         * ivb (and maybe also on snb) by reading from a CS register (like
1558         * ACTHD) before reading the status page.
1559         *
1560         * Note that this effectively stalls the read by the time it takes to
1561         * do a memory transaction, which more or less ensures that the write
1562         * from the GPU has sufficient time to invalidate the CPU cacheline.
1563         * Alternatively we could delay the interrupt from the CS ring to give
1564         * the write time to land, but that would incur a delay after every
1565         * batch i.e. much more frequent than a delay when waiting for the
1566         * interrupt (with the same net latency).
1567         *
1568         * Also note that to prevent whole machine hangs on gen7, we have to
1569         * take the spinlock to guard against concurrent cacheline access.
1570         */
1571        spin_lock_irq(&dev_priv->uncore.lock);
1572        POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
1573        spin_unlock_irq(&dev_priv->uncore.lock);
1574}
1575
1576static void
1577gen5_irq_enable(struct intel_engine_cs *engine)
1578{
1579        gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
1580}
1581
1582static void
1583gen5_irq_disable(struct intel_engine_cs *engine)
1584{
1585        gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
1586}
1587
1588static void
1589i9xx_irq_enable(struct intel_engine_cs *engine)
1590{
1591        struct drm_i915_private *dev_priv = engine->i915;
1592
1593        dev_priv->irq_mask &= ~engine->irq_enable_mask;
1594        I915_WRITE(IMR, dev_priv->irq_mask);
1595        POSTING_READ_FW(RING_IMR(engine->mmio_base));
1596}
1597
1598static void
1599i9xx_irq_disable(struct intel_engine_cs *engine)
1600{
1601        struct drm_i915_private *dev_priv = engine->i915;
1602
1603        dev_priv->irq_mask |= engine->irq_enable_mask;
1604        I915_WRITE(IMR, dev_priv->irq_mask);
1605}
1606
1607static void
1608i8xx_irq_enable(struct intel_engine_cs *engine)
1609{
1610        struct drm_i915_private *dev_priv = engine->i915;
1611
1612        dev_priv->irq_mask &= ~engine->irq_enable_mask;
1613        I915_WRITE16(IMR, dev_priv->irq_mask);
1614        POSTING_READ16(RING_IMR(engine->mmio_base));
1615}
1616
1617static void
1618i8xx_irq_disable(struct intel_engine_cs *engine)
1619{
1620        struct drm_i915_private *dev_priv = engine->i915;
1621
1622        dev_priv->irq_mask |= engine->irq_enable_mask;
1623        I915_WRITE16(IMR, dev_priv->irq_mask);
1624}
1625
1626static int
1627bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1628{
1629        struct intel_ring *ring = req->ring;
1630        int ret;
1631
1632        ret = intel_ring_begin(req, 2);
1633        if (ret)
1634                return ret;
1635
1636        intel_ring_emit(ring, MI_FLUSH);
1637        intel_ring_emit(ring, MI_NOOP);
1638        intel_ring_advance(ring);
1639        return 0;
1640}
1641
1642static void
1643gen6_irq_enable(struct intel_engine_cs *engine)
1644{
1645        struct drm_i915_private *dev_priv = engine->i915;
1646
1647        I915_WRITE_IMR(engine,
1648                       ~(engine->irq_enable_mask |
1649                         engine->irq_keep_mask));
1650        gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1651}
1652
1653static void
1654gen6_irq_disable(struct intel_engine_cs *engine)
1655{
1656        struct drm_i915_private *dev_priv = engine->i915;
1657
1658        I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1659        gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1660}
1661
1662static void
1663hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1664{
1665        struct drm_i915_private *dev_priv = engine->i915;
1666
1667        I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1668        gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1669}
1670
1671static void
1672hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1673{
1674        struct drm_i915_private *dev_priv = engine->i915;
1675
1676        I915_WRITE_IMR(engine, ~0);
1677        gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1678}
1679
1680static void
1681gen8_irq_enable(struct intel_engine_cs *engine)
1682{
1683        struct drm_i915_private *dev_priv = engine->i915;
1684
1685        I915_WRITE_IMR(engine,
1686                       ~(engine->irq_enable_mask |
1687                         engine->irq_keep_mask));
1688        POSTING_READ_FW(RING_IMR(engine->mmio_base));
1689}
1690
1691static void
1692gen8_irq_disable(struct intel_engine_cs *engine)
1693{
1694        struct drm_i915_private *dev_priv = engine->i915;
1695
1696        I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1697}
1698
1699static int
1700i965_emit_bb_start(struct drm_i915_gem_request *req,
1701                   u64 offset, u32 length,
1702                   unsigned int dispatch_flags)
1703{
1704        struct intel_ring *ring = req->ring;
1705        int ret;
1706
1707        ret = intel_ring_begin(req, 2);
1708        if (ret)
1709                return ret;
1710
1711        intel_ring_emit(ring,
1712                        MI_BATCH_BUFFER_START |
1713                        MI_BATCH_GTT |
1714                        (dispatch_flags & I915_DISPATCH_SECURE ?
1715                         0 : MI_BATCH_NON_SECURE_I965));
1716        intel_ring_emit(ring, offset);
1717        intel_ring_advance(ring);
1718
1719        return 0;
1720}
1721
1722/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1723#define I830_BATCH_LIMIT (256*1024)
1724#define I830_TLB_ENTRIES (2)
1725#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1726static int
1727i830_emit_bb_start(struct drm_i915_gem_request *req,
1728                   u64 offset, u32 len,
1729                   unsigned int dispatch_flags)
1730{
1731        struct intel_ring *ring = req->ring;
1732        u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
1733        int ret;
1734
1735        ret = intel_ring_begin(req, 6);
1736        if (ret)
1737                return ret;
1738
1739        /* Evict the invalid PTE TLBs */
1740        intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1741        intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1742        intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1743        intel_ring_emit(ring, cs_offset);
1744        intel_ring_emit(ring, 0xdeadbeef);
1745        intel_ring_emit(ring, MI_NOOP);
1746        intel_ring_advance(ring);
1747
1748        if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1749                if (len > I830_BATCH_LIMIT)
1750                        return -ENOSPC;
1751
1752                ret = intel_ring_begin(req, 6 + 2);
1753                if (ret)
1754                        return ret;
1755
1756                /* Blit the batch (which has now all relocs applied) to the
1757                 * stable batch scratch bo area (so that the CS never
1758                 * stumbles over its tlb invalidation bug) ...
1759                 */
1760                intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1761                intel_ring_emit(ring,
1762                                BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1763                intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1764                intel_ring_emit(ring, cs_offset);
1765                intel_ring_emit(ring, 4096);
1766                intel_ring_emit(ring, offset);
1767
1768                intel_ring_emit(ring, MI_FLUSH);
1769                intel_ring_emit(ring, MI_NOOP);
1770                intel_ring_advance(ring);
1771
1772                /* ... and execute it. */
1773                offset = cs_offset;
1774        }
1775
1776        ret = intel_ring_begin(req, 2);
1777        if (ret)
1778                return ret;
1779
1780        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1781        intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1782                                        0 : MI_BATCH_NON_SECURE));
1783        intel_ring_advance(ring);
1784
1785        return 0;
1786}
1787
1788static int
1789i915_emit_bb_start(struct drm_i915_gem_request *req,
1790                   u64 offset, u32 len,
1791                   unsigned int dispatch_flags)
1792{
1793        struct intel_ring *ring = req->ring;
1794        int ret;
1795
1796        ret = intel_ring_begin(req, 2);
1797        if (ret)
1798                return ret;
1799
1800        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1801        intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1802                                        0 : MI_BATCH_NON_SECURE));
1803        intel_ring_advance(ring);
1804
1805        return 0;
1806}
1807
1808static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1809{
1810        struct drm_i915_private *dev_priv = engine->i915;
1811
1812        if (!dev_priv->status_page_dmah)
1813                return;
1814
1815        drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1816        engine->status_page.page_addr = NULL;
1817}
1818
1819static void cleanup_status_page(struct intel_engine_cs *engine)
1820{
1821        struct i915_vma *vma;
1822
1823        vma = fetch_and_zero(&engine->status_page.vma);
1824        if (!vma)
1825                return;
1826
1827        i915_vma_unpin(vma);
1828        i915_gem_object_unpin_map(vma->obj);
1829        i915_vma_put(vma);
1830}
1831
1832static int init_status_page(struct intel_engine_cs *engine)
1833{
1834        struct drm_i915_gem_object *obj;
1835        struct i915_vma *vma;
1836        unsigned int flags;
1837        int ret;
1838
1839        obj = i915_gem_object_create(&engine->i915->drm, 4096);
1840        if (IS_ERR(obj)) {
1841                DRM_ERROR("Failed to allocate status page\n");
1842                return PTR_ERR(obj);
1843        }
1844
1845        ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1846        if (ret)
1847                goto err;
1848
1849        vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1850        if (IS_ERR(vma)) {
1851                ret = PTR_ERR(vma);
1852                goto err;
1853        }
1854
1855        flags = PIN_GLOBAL;
1856        if (!HAS_LLC(engine->i915))
1857                /* On g33, we cannot place HWS above 256MiB, so
1858                 * restrict its pinning to the low mappable arena.
1859                 * Though this restriction is not documented for
1860                 * gen4, gen5, or byt, they also behave similarly
1861                 * and hang if the HWS is placed at the top of the
1862                 * GTT. To generalise, it appears that all !llc
1863                 * platforms have issues with us placing the HWS
1864                 * above the mappable region (even though we never
1865                 * actualy map it).
1866                 */
1867                flags |= PIN_MAPPABLE;
1868        ret = i915_vma_pin(vma, 0, 4096, flags);
1869        if (ret)
1870                goto err;
1871
1872        engine->status_page.vma = vma;
1873        engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1874        engine->status_page.page_addr =
1875                i915_gem_object_pin_map(obj, I915_MAP_WB);
1876
1877        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1878                         engine->name, i915_ggtt_offset(vma));
1879        return 0;
1880
1881err:
1882        i915_gem_object_put(obj);
1883        return ret;
1884}
1885
1886static int init_phys_status_page(struct intel_engine_cs *engine)
1887{
1888        struct drm_i915_private *dev_priv = engine->i915;
1889
1890        dev_priv->status_page_dmah =
1891                drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1892        if (!dev_priv->status_page_dmah)
1893                return -ENOMEM;
1894
1895        engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1896        memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1897
1898        return 0;
1899}
1900
1901int intel_ring_pin(struct intel_ring *ring)
1902{
1903        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1904        unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
1905        enum i915_map_type map;
1906        struct i915_vma *vma = ring->vma;
1907        void *addr;
1908        int ret;
1909
1910        GEM_BUG_ON(ring->vaddr);
1911
1912        map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
1913
1914        if (vma->obj->stolen)
1915                flags |= PIN_MAPPABLE;
1916
1917        if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1918                if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1919                        ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1920                else
1921                        ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1922                if (unlikely(ret))
1923                        return ret;
1924        }
1925
1926        ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1927        if (unlikely(ret))
1928                return ret;
1929
1930        if (i915_vma_is_map_and_fenceable(vma))
1931                addr = (void __force *)i915_vma_pin_iomap(vma);
1932        else
1933                addr = i915_gem_object_pin_map(vma->obj, map);
1934        if (IS_ERR(addr))
1935                goto err;
1936
1937        ring->vaddr = addr;
1938        return 0;
1939
1940err:
1941        i915_vma_unpin(vma);
1942        return PTR_ERR(addr);
1943}
1944
1945void intel_ring_unpin(struct intel_ring *ring)
1946{
1947        GEM_BUG_ON(!ring->vma);
1948        GEM_BUG_ON(!ring->vaddr);
1949
1950        if (i915_vma_is_map_and_fenceable(ring->vma))
1951                i915_vma_unpin_iomap(ring->vma);
1952        else
1953                i915_gem_object_unpin_map(ring->vma->obj);
1954        ring->vaddr = NULL;
1955
1956        i915_vma_unpin(ring->vma);
1957}
1958
1959static struct i915_vma *
1960intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1961{
1962        struct drm_i915_gem_object *obj;
1963        struct i915_vma *vma;
1964
1965        obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
1966        if (!obj)
1967                obj = i915_gem_object_create(&dev_priv->drm, size);
1968        if (IS_ERR(obj))
1969                return ERR_CAST(obj);
1970
1971        /* mark ring buffers as read-only from GPU side by default */
1972        obj->gt_ro = 1;
1973
1974        vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
1975        if (IS_ERR(vma))
1976                goto err;
1977
1978        return vma;
1979
1980err:
1981        i915_gem_object_put(obj);
1982        return vma;
1983}
1984
1985struct intel_ring *
1986intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1987{
1988        struct intel_ring *ring;
1989        struct i915_vma *vma;
1990
1991        GEM_BUG_ON(!is_power_of_2(size));
1992
1993        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1994        if (!ring)
1995                return ERR_PTR(-ENOMEM);
1996
1997        ring->engine = engine;
1998
1999        INIT_LIST_HEAD(&ring->request_list);
2000
2001        ring->size = size;
2002        /* Workaround an erratum on the i830 which causes a hang if
2003         * the TAIL pointer points to within the last 2 cachelines
2004         * of the buffer.
2005         */
2006        ring->effective_size = size;
2007        if (IS_I830(engine->i915) || IS_845G(engine->i915))
2008                ring->effective_size -= 2 * CACHELINE_BYTES;
2009
2010        ring->last_retired_head = -1;
2011        intel_ring_update_space(ring);
2012
2013        vma = intel_ring_create_vma(engine->i915, size);
2014        if (IS_ERR(vma)) {
2015                kfree(ring);
2016                return ERR_CAST(vma);
2017        }
2018        ring->vma = vma;
2019
2020        return ring;
2021}
2022
2023void
2024intel_ring_free(struct intel_ring *ring)
2025{
2026        i915_vma_put(ring->vma);
2027        kfree(ring);
2028}
2029
2030static int intel_ring_context_pin(struct i915_gem_context *ctx,
2031                                  struct intel_engine_cs *engine)
2032{
2033        struct intel_context *ce = &ctx->engine[engine->id];
2034        int ret;
2035
2036        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2037
2038        if (ce->pin_count++)
2039                return 0;
2040
2041        if (ce->state) {
2042                ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
2043                if (ret)
2044                        goto error;
2045
2046                ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
2047                                   PIN_GLOBAL | PIN_HIGH);
2048                if (ret)
2049                        goto error;
2050        }
2051
2052        /* The kernel context is only used as a placeholder for flushing the
2053         * active context. It is never used for submitting user rendering and
2054         * as such never requires the golden render context, and so we can skip
2055         * emitting it when we switch to the kernel context. This is required
2056         * as during eviction we cannot allocate and pin the renderstate in
2057         * order to initialise the context.
2058         */
2059        if (ctx == ctx->i915->kernel_context)
2060                ce->initialised = true;
2061
2062        i915_gem_context_get(ctx);
2063        return 0;
2064
2065error:
2066        ce->pin_count = 0;
2067        return ret;
2068}
2069
2070static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2071                                     struct intel_engine_cs *engine)
2072{
2073        struct intel_context *ce = &ctx->engine[engine->id];
2074
2075        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2076
2077        if (--ce->pin_count)
2078                return;
2079
2080        if (ce->state)
2081                i915_vma_unpin(ce->state);
2082
2083        i915_gem_context_put(ctx);
2084}
2085
2086static int intel_init_ring_buffer(struct intel_engine_cs *engine)
2087{
2088        struct drm_i915_private *dev_priv = engine->i915;
2089        struct intel_ring *ring;
2090        int ret;
2091
2092        WARN_ON(engine->buffer);
2093
2094        intel_engine_setup_common(engine);
2095
2096        memset(engine->semaphore.sync_seqno, 0,
2097               sizeof(engine->semaphore.sync_seqno));
2098
2099        ret = intel_engine_init_common(engine);
2100        if (ret)
2101                goto error;
2102
2103        /* We may need to do things with the shrinker which
2104         * require us to immediately switch back to the default
2105         * context. This can cause a problem as pinning the
2106         * default context also requires GTT space which may not
2107         * be available. To avoid this we always pin the default
2108         * context.
2109         */
2110        ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2111        if (ret)
2112                goto error;
2113
2114        ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2115        if (IS_ERR(ring)) {
2116                ret = PTR_ERR(ring);
2117                goto error;
2118        }
2119
2120        if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2121                WARN_ON(engine->id != RCS);
2122                ret = init_phys_status_page(engine);
2123                if (ret)
2124                        goto error;
2125        } else {
2126                ret = init_status_page(engine);
2127                if (ret)
2128                        goto error;
2129        }
2130
2131        ret = intel_ring_pin(ring);
2132        if (ret) {
2133                intel_ring_free(ring);
2134                goto error;
2135        }
2136        engine->buffer = ring;
2137
2138        return 0;
2139
2140error:
2141        intel_engine_cleanup(engine);
2142        return ret;
2143}
2144
2145void intel_engine_cleanup(struct intel_engine_cs *engine)
2146{
2147        struct drm_i915_private *dev_priv;
2148
2149        if (!intel_engine_initialized(engine))
2150                return;
2151
2152        dev_priv = engine->i915;
2153
2154        if (engine->buffer) {
2155                WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2156                        (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2157
2158                intel_ring_unpin(engine->buffer);
2159                intel_ring_free(engine->buffer);
2160                engine->buffer = NULL;
2161        }
2162
2163        if (engine->cleanup)
2164                engine->cleanup(engine);
2165
2166        if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2167                WARN_ON(engine->id != RCS);
2168                cleanup_phys_status_page(engine);
2169        } else {
2170                cleanup_status_page(engine);
2171        }
2172
2173        intel_engine_cleanup_common(engine);
2174
2175        intel_ring_context_unpin(dev_priv->kernel_context, engine);
2176
2177        engine->i915 = NULL;
2178}
2179
2180void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
2181{
2182        struct intel_engine_cs *engine;
2183
2184        for_each_engine(engine, dev_priv) {
2185                engine->buffer->head = engine->buffer->tail;
2186                engine->buffer->last_retired_head = -1;
2187        }
2188}
2189
2190int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2191{
2192        int ret;
2193
2194        /* Flush enough space to reduce the likelihood of waiting after
2195         * we start building the request - in which case we will just
2196         * have to repeat work.
2197         */
2198        request->reserved_space += LEGACY_REQUEST_SIZE;
2199
2200        request->ring = request->engine->buffer;
2201
2202        ret = intel_ring_begin(request, 0);
2203        if (ret)
2204                return ret;
2205
2206        request->reserved_space -= LEGACY_REQUEST_SIZE;
2207        return 0;
2208}
2209
2210static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2211{
2212        struct intel_ring *ring = req->ring;
2213        struct drm_i915_gem_request *target;
2214        int ret;
2215
2216        intel_ring_update_space(ring);
2217        if (ring->space >= bytes)
2218                return 0;
2219
2220        /*
2221         * Space is reserved in the ringbuffer for finalising the request,
2222         * as that cannot be allowed to fail. During request finalisation,
2223         * reserved_space is set to 0 to stop the overallocation and the
2224         * assumption is that then we never need to wait (which has the
2225         * risk of failing with EINTR).
2226         *
2227         * See also i915_gem_request_alloc() and i915_add_request().
2228         */
2229        GEM_BUG_ON(!req->reserved_space);
2230
2231        list_for_each_entry(target, &ring->request_list, ring_link) {
2232                unsigned space;
2233
2234                /* Would completion of this request free enough space? */
2235                space = __intel_ring_space(target->postfix, ring->tail,
2236                                           ring->size);
2237                if (space >= bytes)
2238                        break;
2239        }
2240
2241        if (WARN_ON(&target->ring_link == &ring->request_list))
2242                return -ENOSPC;
2243
2244        ret = i915_wait_request(target,
2245                                I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
2246                                NULL, NO_WAITBOOST);
2247        if (ret)
2248                return ret;
2249
2250        i915_gem_request_retire_upto(target);
2251
2252        intel_ring_update_space(ring);
2253        GEM_BUG_ON(ring->space < bytes);
2254        return 0;
2255}
2256
2257int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2258{
2259        struct intel_ring *ring = req->ring;
2260        int remain_actual = ring->size - ring->tail;
2261        int remain_usable = ring->effective_size - ring->tail;
2262        int bytes = num_dwords * sizeof(u32);
2263        int total_bytes, wait_bytes;
2264        bool need_wrap = false;
2265
2266        total_bytes = bytes + req->reserved_space;
2267
2268        if (unlikely(bytes > remain_usable)) {
2269                /*
2270                 * Not enough space for the basic request. So need to flush
2271                 * out the remainder and then wait for base + reserved.
2272                 */
2273                wait_bytes = remain_actual + total_bytes;
2274                need_wrap = true;
2275        } else if (unlikely(total_bytes > remain_usable)) {
2276                /*
2277                 * The base request will fit but the reserved space
2278                 * falls off the end. So we don't need an immediate wrap
2279                 * and only need to effectively wait for the reserved
2280                 * size space from the start of ringbuffer.
2281                 */
2282                wait_bytes = remain_actual + req->reserved_space;
2283        } else {
2284                /* No wrapping required, just waiting. */
2285                wait_bytes = total_bytes;
2286        }
2287
2288        if (wait_bytes > ring->space) {
2289                int ret = wait_for_space(req, wait_bytes);
2290                if (unlikely(ret))
2291                        return ret;
2292        }
2293
2294        if (unlikely(need_wrap)) {
2295                GEM_BUG_ON(remain_actual > ring->space);
2296                GEM_BUG_ON(ring->tail + remain_actual > ring->size);
2297
2298                /* Fill the tail with MI_NOOP */
2299                memset(ring->vaddr + ring->tail, 0, remain_actual);
2300                ring->tail = 0;
2301                ring->space -= remain_actual;
2302        }
2303
2304        ring->space -= bytes;
2305        GEM_BUG_ON(ring->space < 0);
2306        return 0;
2307}
2308
2309/* Align the ring tail to a cacheline boundary */
2310int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2311{
2312        struct intel_ring *ring = req->ring;
2313        int num_dwords =
2314                (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2315        int ret;
2316
2317        if (num_dwords == 0)
2318                return 0;
2319
2320        num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2321        ret = intel_ring_begin(req, num_dwords);
2322        if (ret)
2323                return ret;
2324
2325        while (num_dwords--)
2326                intel_ring_emit(ring, MI_NOOP);
2327
2328        intel_ring_advance(ring);
2329
2330        return 0;
2331}
2332
2333static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
2334{
2335        struct drm_i915_private *dev_priv = request->i915;
2336
2337        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2338
2339       /* Every tail move must follow the sequence below */
2340
2341        /* Disable notification that the ring is IDLE. The GT
2342         * will then assume that it is busy and bring it out of rc6.
2343         */
2344        I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2345                      _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2346
2347        /* Clear the context id. Here be magic! */
2348        I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2349
2350        /* Wait for the ring not to be idle, i.e. for it to wake up. */
2351        if (intel_wait_for_register_fw(dev_priv,
2352                                       GEN6_BSD_SLEEP_PSMI_CONTROL,
2353                                       GEN6_BSD_SLEEP_INDICATOR,
2354                                       0,
2355                                       50))
2356                DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2357
2358        /* Now that the ring is fully powered up, update the tail */
2359        i9xx_submit_request(request);
2360
2361        /* Let the ring send IDLE messages to the GT again,
2362         * and so let it sleep to conserve power when idle.
2363         */
2364        I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2365                      _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2366
2367        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2368}
2369
2370static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2371{
2372        struct intel_ring *ring = req->ring;
2373        uint32_t cmd;
2374        int ret;
2375
2376        ret = intel_ring_begin(req, 4);
2377        if (ret)
2378                return ret;
2379
2380        cmd = MI_FLUSH_DW;
2381        if (INTEL_GEN(req->i915) >= 8)
2382                cmd += 1;
2383
2384        /* We always require a command barrier so that subsequent
2385         * commands, such as breadcrumb interrupts, are strictly ordered
2386         * wrt the contents of the write cache being flushed to memory
2387         * (and thus being coherent from the CPU).
2388         */
2389        cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2390
2391        /*
2392         * Bspec vol 1c.5 - video engine command streamer:
2393         * "If ENABLED, all TLBs will be invalidated once the flush
2394         * operation is complete. This bit is only valid when the
2395         * Post-Sync Operation field is a value of 1h or 3h."
2396         */
2397        if (mode & EMIT_INVALIDATE)
2398                cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2399
2400        intel_ring_emit(ring, cmd);
2401        intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2402        if (INTEL_GEN(req->i915) >= 8) {
2403                intel_ring_emit(ring, 0); /* upper addr */
2404                intel_ring_emit(ring, 0); /* value */
2405        } else  {
2406                intel_ring_emit(ring, 0);
2407                intel_ring_emit(ring, MI_NOOP);
2408        }
2409        intel_ring_advance(ring);
2410        return 0;
2411}
2412
2413static int
2414gen8_emit_bb_start(struct drm_i915_gem_request *req,
2415                   u64 offset, u32 len,
2416                   unsigned int dispatch_flags)
2417{
2418        struct intel_ring *ring = req->ring;
2419        bool ppgtt = USES_PPGTT(req->i915) &&
2420                        !(dispatch_flags & I915_DISPATCH_SECURE);
2421        int ret;
2422
2423        ret = intel_ring_begin(req, 4);
2424        if (ret)
2425                return ret;
2426
2427        /* FIXME(BDW): Address space and security selectors. */
2428        intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
2429                        (dispatch_flags & I915_DISPATCH_RS ?
2430                         MI_BATCH_RESOURCE_STREAMER : 0));
2431        intel_ring_emit(ring, lower_32_bits(offset));
2432        intel_ring_emit(ring, upper_32_bits(offset));
2433        intel_ring_emit(ring, MI_NOOP);
2434        intel_ring_advance(ring);
2435
2436        return 0;
2437}
2438
2439static int
2440hsw_emit_bb_start(struct drm_i915_gem_request *req,
2441                  u64 offset, u32 len,
2442                  unsigned int dispatch_flags)
2443{
2444        struct intel_ring *ring = req->ring;
2445        int ret;
2446
2447        ret = intel_ring_begin(req, 2);
2448        if (ret)
2449                return ret;
2450
2451        intel_ring_emit(ring,
2452                        MI_BATCH_BUFFER_START |
2453                        (dispatch_flags & I915_DISPATCH_SECURE ?
2454                         0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2455                        (dispatch_flags & I915_DISPATCH_RS ?
2456                         MI_BATCH_RESOURCE_STREAMER : 0));
2457        /* bit0-7 is the length on GEN6+ */
2458        intel_ring_emit(ring, offset);
2459        intel_ring_advance(ring);
2460
2461        return 0;
2462}
2463
2464static int
2465gen6_emit_bb_start(struct drm_i915_gem_request *req,
2466                   u64 offset, u32 len,
2467                   unsigned int dispatch_flags)
2468{
2469        struct intel_ring *ring = req->ring;
2470        int ret;
2471
2472        ret = intel_ring_begin(req, 2);
2473        if (ret)
2474                return ret;
2475
2476        intel_ring_emit(ring,
2477                        MI_BATCH_BUFFER_START |
2478                        (dispatch_flags & I915_DISPATCH_SECURE ?
2479                         0 : MI_BATCH_NON_SECURE_I965));
2480        /* bit0-7 is the length on GEN6+ */
2481        intel_ring_emit(ring, offset);
2482        intel_ring_advance(ring);
2483
2484        return 0;
2485}
2486
2487/* Blitter support (SandyBridge+) */
2488
2489static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2490{
2491        struct intel_ring *ring = req->ring;
2492        uint32_t cmd;
2493        int ret;
2494
2495        ret = intel_ring_begin(req, 4);
2496        if (ret)
2497                return ret;
2498
2499        cmd = MI_FLUSH_DW;
2500        if (INTEL_GEN(req->i915) >= 8)
2501                cmd += 1;
2502
2503        /* We always require a command barrier so that subsequent
2504         * commands, such as breadcrumb interrupts, are strictly ordered
2505         * wrt the contents of the write cache being flushed to memory
2506         * (and thus being coherent from the CPU).
2507         */
2508        cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2509
2510        /*
2511         * Bspec vol 1c.3 - blitter engine command streamer:
2512         * "If ENABLED, all TLBs will be invalidated once the flush
2513         * operation is complete. This bit is only valid when the
2514         * Post-Sync Operation field is a value of 1h or 3h."
2515         */
2516        if (mode & EMIT_INVALIDATE)
2517                cmd |= MI_INVALIDATE_TLB;
2518        intel_ring_emit(ring, cmd);
2519        intel_ring_emit(ring,
2520                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2521        if (INTEL_GEN(req->i915) >= 8) {
2522                intel_ring_emit(ring, 0); /* upper addr */
2523                intel_ring_emit(ring, 0); /* value */
2524        } else  {
2525                intel_ring_emit(ring, 0);
2526                intel_ring_emit(ring, MI_NOOP);
2527        }
2528        intel_ring_advance(ring);
2529
2530        return 0;
2531}
2532
2533static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2534                                       struct intel_engine_cs *engine)
2535{
2536        struct drm_i915_gem_object *obj;
2537        int ret, i;
2538
2539        if (!i915.semaphores)
2540                return;
2541
2542        if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
2543                struct i915_vma *vma;
2544
2545                obj = i915_gem_object_create(&dev_priv->drm, 4096);
2546                if (IS_ERR(obj))
2547                        goto err;
2548
2549                vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
2550                if (IS_ERR(vma))
2551                        goto err_obj;
2552
2553                ret = i915_gem_object_set_to_gtt_domain(obj, false);
2554                if (ret)
2555                        goto err_obj;
2556
2557                ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2558                if (ret)
2559                        goto err_obj;
2560
2561                dev_priv->semaphore = vma;
2562        }
2563
2564        if (INTEL_GEN(dev_priv) >= 8) {
2565                u32 offset = i915_ggtt_offset(dev_priv->semaphore);
2566
2567                engine->semaphore.sync_to = gen8_ring_sync_to;
2568                engine->semaphore.signal = gen8_xcs_signal;
2569
2570                for (i = 0; i < I915_NUM_ENGINES; i++) {
2571                        u32 ring_offset;
2572
2573                        if (i != engine->id)
2574                                ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2575                        else
2576                                ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2577
2578                        engine->semaphore.signal_ggtt[i] = ring_offset;
2579                }
2580        } else if (INTEL_GEN(dev_priv) >= 6) {
2581                engine->semaphore.sync_to = gen6_ring_sync_to;
2582                engine->semaphore.signal = gen6_signal;
2583
2584                /*
2585                 * The current semaphore is only applied on pre-gen8
2586                 * platform.  And there is no VCS2 ring on the pre-gen8
2587                 * platform. So the semaphore between RCS and VCS2 is
2588                 * initialized as INVALID.  Gen8 will initialize the
2589                 * sema between VCS2 and RCS later.
2590                 */
2591                for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2592                        static const struct {
2593                                u32 wait_mbox;
2594                                i915_reg_t mbox_reg;
2595                        } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2596                                [RCS_HW] = {
2597                                        [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
2598                                        [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
2599                                        [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2600                                },
2601                                [VCS_HW] = {
2602                                        [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
2603                                        [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
2604                                        [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2605                                },
2606                                [BCS_HW] = {
2607                                        [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
2608                                        [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
2609                                        [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2610                                },
2611                                [VECS_HW] = {
2612                                        [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2613                                        [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2614                                        [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2615                                },
2616                        };
2617                        u32 wait_mbox;
2618                        i915_reg_t mbox_reg;
2619
2620                        if (i == engine->hw_id) {
2621                                wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2622                                mbox_reg = GEN6_NOSYNC;
2623                        } else {
2624                                wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2625                                mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2626                        }
2627
2628                        engine->semaphore.mbox.wait[i] = wait_mbox;
2629                        engine->semaphore.mbox.signal[i] = mbox_reg;
2630                }
2631        }
2632
2633        return;
2634
2635err_obj:
2636        i915_gem_object_put(obj);
2637err:
2638        DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2639        i915.semaphores = 0;
2640}
2641
2642static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2643                                struct intel_engine_cs *engine)
2644{
2645        engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2646
2647        if (INTEL_GEN(dev_priv) >= 8) {
2648                engine->irq_enable = gen8_irq_enable;
2649                engine->irq_disable = gen8_irq_disable;
2650                engine->irq_seqno_barrier = gen6_seqno_barrier;
2651        } else if (INTEL_GEN(dev_priv) >= 6) {
2652                engine->irq_enable = gen6_irq_enable;
2653                engine->irq_disable = gen6_irq_disable;
2654                engine->irq_seqno_barrier = gen6_seqno_barrier;
2655        } else if (INTEL_GEN(dev_priv) >= 5) {
2656                engine->irq_enable = gen5_irq_enable;
2657                engine->irq_disable = gen5_irq_disable;
2658                engine->irq_seqno_barrier = gen5_seqno_barrier;
2659        } else if (INTEL_GEN(dev_priv) >= 3) {
2660                engine->irq_enable = i9xx_irq_enable;
2661                engine->irq_disable = i9xx_irq_disable;
2662        } else {
2663                engine->irq_enable = i8xx_irq_enable;
2664                engine->irq_disable = i8xx_irq_disable;
2665        }
2666}
2667
2668static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2669                                      struct intel_engine_cs *engine)
2670{
2671        intel_ring_init_irq(dev_priv, engine);
2672        intel_ring_init_semaphores(dev_priv, engine);
2673
2674        engine->init_hw = init_ring_common;
2675        engine->reset_hw = reset_ring_common;
2676
2677        engine->emit_request = i9xx_emit_request;
2678        if (i915.semaphores)
2679                engine->emit_request = gen6_sema_emit_request;
2680        engine->submit_request = i9xx_submit_request;
2681
2682        if (INTEL_GEN(dev_priv) >= 8)
2683                engine->emit_bb_start = gen8_emit_bb_start;
2684        else if (INTEL_GEN(dev_priv) >= 6)
2685                engine->emit_bb_start = gen6_emit_bb_start;
2686        else if (INTEL_GEN(dev_priv) >= 4)
2687                engine->emit_bb_start = i965_emit_bb_start;
2688        else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2689                engine->emit_bb_start = i830_emit_bb_start;
2690        else
2691                engine->emit_bb_start = i915_emit_bb_start;
2692}
2693
2694int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2695{
2696        struct drm_i915_private *dev_priv = engine->i915;
2697        int ret;
2698
2699        intel_ring_default_vfuncs(dev_priv, engine);
2700
2701        if (HAS_L3_DPF(dev_priv))
2702                engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2703
2704        if (INTEL_GEN(dev_priv) >= 8) {
2705                engine->init_context = intel_rcs_ctx_init;
2706                engine->emit_request = gen8_render_emit_request;
2707                engine->emit_flush = gen8_render_ring_flush;
2708                if (i915.semaphores)
2709                        engine->semaphore.signal = gen8_rcs_signal;
2710        } else if (INTEL_GEN(dev_priv) >= 6) {
2711                engine->init_context = intel_rcs_ctx_init;
2712                engine->emit_flush = gen7_render_ring_flush;
2713                if (IS_GEN6(dev_priv))
2714                        engine->emit_flush = gen6_render_ring_flush;
2715        } else if (IS_GEN5(dev_priv)) {
2716                engine->emit_flush = gen4_render_ring_flush;
2717        } else {
2718                if (INTEL_GEN(dev_priv) < 4)
2719                        engine->emit_flush = gen2_render_ring_flush;
2720                else
2721                        engine->emit_flush = gen4_render_ring_flush;
2722                engine->irq_enable_mask = I915_USER_INTERRUPT;
2723        }
2724
2725        if (IS_HASWELL(dev_priv))
2726                engine->emit_bb_start = hsw_emit_bb_start;
2727
2728        engine->init_hw = init_render_ring;
2729        engine->cleanup = render_ring_cleanup;
2730
2731        ret = intel_init_ring_buffer(engine);
2732        if (ret)
2733                return ret;
2734
2735        if (INTEL_GEN(dev_priv) >= 6) {
2736                ret = intel_engine_create_scratch(engine, 4096);
2737                if (ret)
2738                        return ret;
2739        } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2740                ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2741                if (ret)
2742                        return ret;
2743        }
2744
2745        return 0;
2746}
2747
2748int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2749{
2750        struct drm_i915_private *dev_priv = engine->i915;
2751
2752        intel_ring_default_vfuncs(dev_priv, engine);
2753
2754        if (INTEL_GEN(dev_priv) >= 6) {
2755                /* gen6 bsd needs a special wa for tail updates */
2756                if (IS_GEN6(dev_priv))
2757                        engine->submit_request = gen6_bsd_submit_request;
2758                engine->emit_flush = gen6_bsd_ring_flush;
2759                if (INTEL_GEN(dev_priv) < 8)
2760                        engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2761        } else {
2762                engine->mmio_base = BSD_RING_BASE;
2763                engine->emit_flush = bsd_ring_flush;
2764                if (IS_GEN5(dev_priv))
2765                        engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2766                else
2767                        engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2768        }
2769
2770        return intel_init_ring_buffer(engine);
2771}
2772
2773/**
2774 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2775 */
2776int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2777{
2778        struct drm_i915_private *dev_priv = engine->i915;
2779
2780        intel_ring_default_vfuncs(dev_priv, engine);
2781
2782        engine->emit_flush = gen6_bsd_ring_flush;
2783
2784        return intel_init_ring_buffer(engine);
2785}
2786
2787int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2788{
2789        struct drm_i915_private *dev_priv = engine->i915;
2790
2791        intel_ring_default_vfuncs(dev_priv, engine);
2792
2793        engine->emit_flush = gen6_ring_flush;
2794        if (INTEL_GEN(dev_priv) < 8)
2795                engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2796
2797        return intel_init_ring_buffer(engine);
2798}
2799
2800int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2801{
2802        struct drm_i915_private *dev_priv = engine->i915;
2803
2804        intel_ring_default_vfuncs(dev_priv, engine);
2805
2806        engine->emit_flush = gen6_ring_flush;
2807
2808        if (INTEL_GEN(dev_priv) < 8) {
2809                engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2810                engine->irq_enable = hsw_vebox_irq_enable;
2811                engine->irq_disable = hsw_vebox_irq_disable;
2812        }
2813
2814        return intel_init_ring_buffer(engine);
2815}
2816