linux/drivers/gpu/drm/i915/gvt/render.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21 * SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eddie Dong <eddie.dong@intel.com>
  25 *    Kevin Tian <kevin.tian@intel.com>
  26 *
  27 * Contributors:
  28 *    Zhi Wang <zhi.a.wang@intel.com>
  29 *    Changbin Du <changbin.du@intel.com>
  30 *    Zhenyu Wang <zhenyuw@linux.intel.com>
  31 *    Tina Zhang <tina.zhang@intel.com>
  32 *    Bing Niu <bing.niu@intel.com>
  33 *
  34 */
  35
  36#include "i915_drv.h"
  37#include "gvt.h"
  38#include "trace.h"
  39
  40struct render_mmio {
  41        int ring_id;
  42        i915_reg_t reg;
  43        u32 mask;
  44        bool in_context;
  45        u32 value;
  46};
  47
  48static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
  49        {RCS, _MMIO(0x229c), 0xffff, false},
  50        {RCS, _MMIO(0x2248), 0x0, false},
  51        {RCS, _MMIO(0x2098), 0x0, false},
  52        {RCS, _MMIO(0x20c0), 0xffff, true},
  53        {RCS, _MMIO(0x24d0), 0, false},
  54        {RCS, _MMIO(0x24d4), 0, false},
  55        {RCS, _MMIO(0x24d8), 0, false},
  56        {RCS, _MMIO(0x24dc), 0, false},
  57        {RCS, _MMIO(0x24e0), 0, false},
  58        {RCS, _MMIO(0x24e4), 0, false},
  59        {RCS, _MMIO(0x24e8), 0, false},
  60        {RCS, _MMIO(0x24ec), 0, false},
  61        {RCS, _MMIO(0x24f0), 0, false},
  62        {RCS, _MMIO(0x24f4), 0, false},
  63        {RCS, _MMIO(0x24f8), 0, false},
  64        {RCS, _MMIO(0x24fc), 0, false},
  65        {RCS, _MMIO(0x7004), 0xffff, true},
  66        {RCS, _MMIO(0x7008), 0xffff, true},
  67        {RCS, _MMIO(0x7000), 0xffff, true},
  68        {RCS, _MMIO(0x7010), 0xffff, true},
  69        {RCS, _MMIO(0x7300), 0xffff, true},
  70        {RCS, _MMIO(0x83a4), 0xffff, true},
  71
  72        {BCS, _MMIO(0x2229c), 0xffff, false},
  73        {BCS, _MMIO(0x2209c), 0xffff, false},
  74        {BCS, _MMIO(0x220c0), 0xffff, false},
  75        {BCS, _MMIO(0x22098), 0x0, false},
  76        {BCS, _MMIO(0x22028), 0x0, false},
  77};
  78
  79static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
  80        {RCS, _MMIO(0x229c), 0xffff, false},
  81        {RCS, _MMIO(0x2248), 0x0, false},
  82        {RCS, _MMIO(0x2098), 0x0, false},
  83        {RCS, _MMIO(0x20c0), 0xffff, true},
  84        {RCS, _MMIO(0x24d0), 0, false},
  85        {RCS, _MMIO(0x24d4), 0, false},
  86        {RCS, _MMIO(0x24d8), 0, false},
  87        {RCS, _MMIO(0x24dc), 0, false},
  88        {RCS, _MMIO(0x24e0), 0, false},
  89        {RCS, _MMIO(0x24e4), 0, false},
  90        {RCS, _MMIO(0x24e8), 0, false},
  91        {RCS, _MMIO(0x24ec), 0, false},
  92        {RCS, _MMIO(0x24f0), 0, false},
  93        {RCS, _MMIO(0x24f4), 0, false},
  94        {RCS, _MMIO(0x24f8), 0, false},
  95        {RCS, _MMIO(0x24fc), 0, false},
  96        {RCS, _MMIO(0x7004), 0xffff, true},
  97        {RCS, _MMIO(0x7008), 0xffff, true},
  98        {RCS, _MMIO(0x7000), 0xffff, true},
  99        {RCS, _MMIO(0x7010), 0xffff, true},
 100        {RCS, _MMIO(0x7300), 0xffff, true},
 101        {RCS, _MMIO(0x83a4), 0xffff, true},
 102
 103        {RCS, _MMIO(0x40e0), 0, false},
 104        {RCS, _MMIO(0x40e4), 0, false},
 105        {RCS, _MMIO(0x2580), 0xffff, true},
 106        {RCS, _MMIO(0x7014), 0xffff, true},
 107        {RCS, _MMIO(0x20ec), 0xffff, false},
 108        {RCS, _MMIO(0xb118), 0, false},
 109        {RCS, _MMIO(0xe100), 0xffff, true},
 110        {RCS, _MMIO(0xe180), 0xffff, true},
 111        {RCS, _MMIO(0xe184), 0xffff, true},
 112        {RCS, _MMIO(0xe188), 0xffff, true},
 113        {RCS, _MMIO(0xe194), 0xffff, true},
 114        {RCS, _MMIO(0x4de0), 0, false},
 115        {RCS, _MMIO(0x4de4), 0, false},
 116        {RCS, _MMIO(0x4de8), 0, false},
 117        {RCS, _MMIO(0x4dec), 0, false},
 118        {RCS, _MMIO(0x4df0), 0, false},
 119        {RCS, _MMIO(0x4df4), 0, false},
 120
 121        {BCS, _MMIO(0x2229c), 0xffff, false},
 122        {BCS, _MMIO(0x2209c), 0xffff, false},
 123        {BCS, _MMIO(0x220c0), 0xffff, false},
 124        {BCS, _MMIO(0x22098), 0x0, false},
 125        {BCS, _MMIO(0x22028), 0x0, false},
 126
 127        {VCS2, _MMIO(0x1c028), 0xffff, false},
 128
 129        {VECS, _MMIO(0x1a028), 0xffff, false},
 130
 131        {RCS, _MMIO(0x7304), 0xffff, true},
 132        {RCS, _MMIO(0x2248), 0x0, false},
 133        {RCS, _MMIO(0x940c), 0x0, false},
 134        {RCS, _MMIO(0x4ab8), 0x0, false},
 135
 136        {RCS, _MMIO(0x4ab0), 0x0, false},
 137        {RCS, _MMIO(0x20d4), 0x0, false},
 138
 139        {RCS, _MMIO(0xb004), 0x0, false},
 140        {RCS, _MMIO(0x20a0), 0x0, false},
 141        {RCS, _MMIO(0x20e4), 0xffff, false},
 142};
 143
 144static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
 145static u32 gen9_render_mocs_L3[32];
 146
 147static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 148{
 149        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 150        enum forcewake_domains fw;
 151        i915_reg_t reg;
 152        u32 regs[] = {
 153                [RCS] = 0x4260,
 154                [VCS] = 0x4264,
 155                [VCS2] = 0x4268,
 156                [BCS] = 0x426c,
 157                [VECS] = 0x4270,
 158        };
 159
 160        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
 161                return;
 162
 163        if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
 164                return;
 165
 166        reg = _MMIO(regs[ring_id]);
 167
 168        /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
 169         * we need to put a forcewake when invalidating RCS TLB caches,
 170         * otherwise device can go to RC6 state and interrupt invalidation
 171         * process
 172         */
 173        fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
 174                                            FW_REG_READ | FW_REG_WRITE);
 175        if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
 176                fw |= FORCEWAKE_RENDER;
 177
 178        intel_uncore_forcewake_get(dev_priv, fw);
 179
 180        I915_WRITE_FW(reg, 0x1);
 181
 182        if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
 183                gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
 184        else
 185                vgpu_vreg(vgpu, regs[ring_id]) = 0;
 186
 187        intel_uncore_forcewake_put(dev_priv, fw);
 188
 189        gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
 190}
 191
 192static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
 193{
 194        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 195        i915_reg_t offset, l3_offset;
 196        u32 regs[] = {
 197                [RCS] = 0xc800,
 198                [VCS] = 0xc900,
 199                [VCS2] = 0xca00,
 200                [BCS] = 0xcc00,
 201                [VECS] = 0xcb00,
 202        };
 203        int i;
 204
 205        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
 206                return;
 207
 208        offset.reg = regs[ring_id];
 209        for (i = 0; i < 64; i++) {
 210                gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
 211                I915_WRITE(offset, vgpu_vreg(vgpu, offset));
 212                offset.reg += 4;
 213        }
 214
 215        if (ring_id == RCS) {
 216                l3_offset.reg = 0xb020;
 217                for (i = 0; i < 32; i++) {
 218                        gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
 219                        I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
 220                        l3_offset.reg += 4;
 221                }
 222        }
 223}
 224
 225static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
 226{
 227        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 228        i915_reg_t offset, l3_offset;
 229        u32 regs[] = {
 230                [RCS] = 0xc800,
 231                [VCS] = 0xc900,
 232                [VCS2] = 0xca00,
 233                [BCS] = 0xcc00,
 234                [VECS] = 0xcb00,
 235        };
 236        int i;
 237
 238        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
 239                return;
 240
 241        offset.reg = regs[ring_id];
 242        for (i = 0; i < 64; i++) {
 243                vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
 244                I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
 245                offset.reg += 4;
 246        }
 247
 248        if (ring_id == RCS) {
 249                l3_offset.reg = 0xb020;
 250                for (i = 0; i < 32; i++) {
 251                        vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
 252                        I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
 253                        l3_offset.reg += 4;
 254                }
 255        }
 256}
 257
 258#define CTX_CONTEXT_CONTROL_VAL 0x03
 259
 260/* Switch ring mmio values (context) from host to a vgpu. */
 261static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
 262{
 263        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 264        struct render_mmio *mmio;
 265        u32 v;
 266        int i, array_size;
 267        u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
 268        u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
 269        u32 inhibit_mask =
 270                _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 271        i915_reg_t last_reg = _MMIO(0);
 272
 273        if (IS_SKYLAKE(vgpu->gvt->dev_priv)
 274                || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
 275                mmio = gen9_render_mmio_list;
 276                array_size = ARRAY_SIZE(gen9_render_mmio_list);
 277                load_mocs(vgpu, ring_id);
 278        } else {
 279                mmio = gen8_render_mmio_list;
 280                array_size = ARRAY_SIZE(gen8_render_mmio_list);
 281        }
 282
 283        for (i = 0; i < array_size; i++, mmio++) {
 284                if (mmio->ring_id != ring_id)
 285                        continue;
 286
 287                mmio->value = I915_READ_FW(mmio->reg);
 288
 289                /*
 290                 * if it is an inhibit context, load in_context mmio
 291                 * into HW by mmio write. If it is not, skip this mmio
 292                 * write.
 293                 */
 294                if (mmio->in_context &&
 295                                ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
 296                                i915.enable_execlists)
 297                        continue;
 298
 299                if (mmio->mask)
 300                        v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
 301                else
 302                        v = vgpu_vreg(vgpu, mmio->reg);
 303
 304                I915_WRITE_FW(mmio->reg, v);
 305                last_reg = mmio->reg;
 306
 307                trace_render_mmio(vgpu->id, "load",
 308                                  i915_mmio_reg_offset(mmio->reg),
 309                                  mmio->value, v);
 310        }
 311
 312        /* Make sure the swiched MMIOs has taken effect. */
 313        if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
 314                I915_READ_FW(last_reg);
 315
 316        handle_tlb_pending_event(vgpu, ring_id);
 317}
 318
 319/* Switch ring mmio values (context) from vgpu to host. */
 320static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
 321{
 322        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 323        struct render_mmio *mmio;
 324        i915_reg_t last_reg = _MMIO(0);
 325        u32 v;
 326        int i, array_size;
 327
 328        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 329                mmio = gen9_render_mmio_list;
 330                array_size = ARRAY_SIZE(gen9_render_mmio_list);
 331                restore_mocs(vgpu, ring_id);
 332        } else {
 333                mmio = gen8_render_mmio_list;
 334                array_size = ARRAY_SIZE(gen8_render_mmio_list);
 335        }
 336
 337        for (i = 0; i < array_size; i++, mmio++) {
 338                if (mmio->ring_id != ring_id)
 339                        continue;
 340
 341                vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
 342
 343                if (mmio->mask) {
 344                        vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
 345                        v = mmio->value | (mmio->mask << 16);
 346                } else
 347                        v = mmio->value;
 348
 349                if (mmio->in_context)
 350                        continue;
 351
 352                I915_WRITE_FW(mmio->reg, v);
 353                last_reg = mmio->reg;
 354
 355                trace_render_mmio(vgpu->id, "restore",
 356                                  i915_mmio_reg_offset(mmio->reg),
 357                                  mmio->value, v);
 358        }
 359
 360        /* Make sure the swiched MMIOs has taken effect. */
 361        if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
 362                I915_READ_FW(last_reg);
 363}
 364
 365/**
 366 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
 367 * @pre: the last vGPU that own the engine
 368 * @next: the vGPU to switch to
 369 * @ring_id: specify the engine
 370 *
 371 * If pre is null indicates that host own the engine. If next is null
 372 * indicates that we are switching to host workload.
 373 */
 374void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 375                           struct intel_vgpu *next, int ring_id)
 376{
 377        struct drm_i915_private *dev_priv;
 378
 379        if (WARN_ON(!pre && !next))
 380                return;
 381
 382        gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
 383                       pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
 384
 385        dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
 386
 387        /**
 388         * We are using raw mmio access wrapper to improve the
 389         * performace for batch mmio read/write, so we need
 390         * handle forcewake mannually.
 391         */
 392        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 393
 394        /**
 395         * TODO: Optimize for vGPU to vGPU switch by merging
 396         * switch_mmio_to_host() and switch_mmio_to_vgpu().
 397         */
 398        if (pre)
 399                switch_mmio_to_host(pre, ring_id);
 400
 401        if (next)
 402                switch_mmio_to_vgpu(next, ring_id);
 403
 404        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 405}
 406