linux/drivers/gpu/drm/i915/i915_debugfs.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/sched/mm.h>
  30#include <linux/sort.h>
  31
  32#include <drm/drm_debugfs.h>
  33
  34#include "gem/i915_gem_context.h"
  35#include "gt/intel_gt_buffer_pool.h"
  36#include "gt/intel_gt_clock_utils.h"
  37#include "gt/intel_gt_pm.h"
  38#include "gt/intel_gt_requests.h"
  39#include "gt/intel_reset.h"
  40#include "gt/intel_rc6.h"
  41#include "gt/intel_rps.h"
  42
  43#include "i915_debugfs.h"
  44#include "i915_debugfs_params.h"
  45#include "i915_irq.h"
  46#include "i915_trace.h"
  47#include "intel_pm.h"
  48#include "intel_sideband.h"
  49
  50static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
  51{
  52        return to_i915(node->minor->dev);
  53}
  54
  55static int i915_capabilities(struct seq_file *m, void *data)
  56{
  57        struct drm_i915_private *i915 = node_to_i915(m->private);
  58        struct drm_printer p = drm_seq_file_printer(m);
  59
  60        seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
  61
  62        intel_device_info_print_static(INTEL_INFO(i915), &p);
  63        intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
  64        intel_driver_caps_print(&i915->caps, &p);
  65
  66        kernel_param_lock(THIS_MODULE);
  67        i915_params_dump(&i915_modparams, &p);
  68        kernel_param_unlock(THIS_MODULE);
  69
  70        return 0;
  71}
  72
  73static char get_tiling_flag(struct drm_i915_gem_object *obj)
  74{
  75        switch (i915_gem_object_get_tiling(obj)) {
  76        default:
  77        case I915_TILING_NONE: return ' ';
  78        case I915_TILING_X: return 'X';
  79        case I915_TILING_Y: return 'Y';
  80        }
  81}
  82
  83static char get_global_flag(struct drm_i915_gem_object *obj)
  84{
  85        return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
  86}
  87
  88static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
  89{
  90        return obj->mm.mapping ? 'M' : ' ';
  91}
  92
  93static const char *
  94stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
  95{
  96        size_t x = 0;
  97
  98        switch (page_sizes) {
  99        case 0:
 100                return "";
 101        case I915_GTT_PAGE_SIZE_4K:
 102                return "4K";
 103        case I915_GTT_PAGE_SIZE_64K:
 104                return "64K";
 105        case I915_GTT_PAGE_SIZE_2M:
 106                return "2M";
 107        default:
 108                if (!buf)
 109                        return "M";
 110
 111                if (page_sizes & I915_GTT_PAGE_SIZE_2M)
 112                        x += snprintf(buf + x, len - x, "2M, ");
 113                if (page_sizes & I915_GTT_PAGE_SIZE_64K)
 114                        x += snprintf(buf + x, len - x, "64K, ");
 115                if (page_sizes & I915_GTT_PAGE_SIZE_4K)
 116                        x += snprintf(buf + x, len - x, "4K, ");
 117                buf[x-2] = '\0';
 118
 119                return buf;
 120        }
 121}
 122
 123void
 124i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 125{
 126        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 127        struct intel_engine_cs *engine;
 128        struct i915_vma *vma;
 129        int pin_count = 0;
 130
 131        seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
 132                   &obj->base,
 133                   get_tiling_flag(obj),
 134                   get_global_flag(obj),
 135                   get_pin_mapped_flag(obj),
 136                   obj->base.size / 1024,
 137                   obj->read_domains,
 138                   obj->write_domain,
 139                   i915_cache_level_str(dev_priv, obj->cache_level),
 140                   obj->mm.dirty ? " dirty" : "",
 141                   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
 142        if (obj->base.name)
 143                seq_printf(m, " (name: %d)", obj->base.name);
 144
 145        spin_lock(&obj->vma.lock);
 146        list_for_each_entry(vma, &obj->vma.list, obj_link) {
 147                if (!drm_mm_node_allocated(&vma->node))
 148                        continue;
 149
 150                spin_unlock(&obj->vma.lock);
 151
 152                if (i915_vma_is_pinned(vma))
 153                        pin_count++;
 154
 155                seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
 156                           i915_vma_is_ggtt(vma) ? "g" : "pp",
 157                           vma->node.start, vma->node.size,
 158                           stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
 159                if (i915_vma_is_ggtt(vma)) {
 160                        switch (vma->ggtt_view.type) {
 161                        case I915_GGTT_VIEW_NORMAL:
 162                                seq_puts(m, ", normal");
 163                                break;
 164
 165                        case I915_GGTT_VIEW_PARTIAL:
 166                                seq_printf(m, ", partial [%08llx+%x]",
 167                                           vma->ggtt_view.partial.offset << PAGE_SHIFT,
 168                                           vma->ggtt_view.partial.size << PAGE_SHIFT);
 169                                break;
 170
 171                        case I915_GGTT_VIEW_ROTATED:
 172                                seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
 173                                           vma->ggtt_view.rotated.plane[0].width,
 174                                           vma->ggtt_view.rotated.plane[0].height,
 175                                           vma->ggtt_view.rotated.plane[0].stride,
 176                                           vma->ggtt_view.rotated.plane[0].offset,
 177                                           vma->ggtt_view.rotated.plane[1].width,
 178                                           vma->ggtt_view.rotated.plane[1].height,
 179                                           vma->ggtt_view.rotated.plane[1].stride,
 180                                           vma->ggtt_view.rotated.plane[1].offset);
 181                                break;
 182
 183                        case I915_GGTT_VIEW_REMAPPED:
 184                                seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
 185                                           vma->ggtt_view.remapped.plane[0].width,
 186                                           vma->ggtt_view.remapped.plane[0].height,
 187                                           vma->ggtt_view.remapped.plane[0].stride,
 188                                           vma->ggtt_view.remapped.plane[0].offset,
 189                                           vma->ggtt_view.remapped.plane[1].width,
 190                                           vma->ggtt_view.remapped.plane[1].height,
 191                                           vma->ggtt_view.remapped.plane[1].stride,
 192                                           vma->ggtt_view.remapped.plane[1].offset);
 193                                break;
 194
 195                        default:
 196                                MISSING_CASE(vma->ggtt_view.type);
 197                                break;
 198                        }
 199                }
 200                if (vma->fence)
 201                        seq_printf(m, " , fence: %d", vma->fence->id);
 202                seq_puts(m, ")");
 203
 204                spin_lock(&obj->vma.lock);
 205        }
 206        spin_unlock(&obj->vma.lock);
 207
 208        seq_printf(m, " (pinned x %d)", pin_count);
 209        if (obj->stolen)
 210                seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 211        if (i915_gem_object_is_framebuffer(obj))
 212                seq_printf(m, " (fb)");
 213
 214        engine = i915_gem_object_last_write_engine(obj);
 215        if (engine)
 216                seq_printf(m, " (%s)", engine->name);
 217}
 218
 219struct file_stats {
 220        struct i915_address_space *vm;
 221        unsigned long count;
 222        u64 total;
 223        u64 active, inactive;
 224        u64 closed;
 225};
 226
 227static int per_file_stats(int id, void *ptr, void *data)
 228{
 229        struct drm_i915_gem_object *obj = ptr;
 230        struct file_stats *stats = data;
 231        struct i915_vma *vma;
 232
 233        if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
 234                return 0;
 235
 236        stats->count++;
 237        stats->total += obj->base.size;
 238
 239        spin_lock(&obj->vma.lock);
 240        if (!stats->vm) {
 241                for_each_ggtt_vma(vma, obj) {
 242                        if (!drm_mm_node_allocated(&vma->node))
 243                                continue;
 244
 245                        if (i915_vma_is_active(vma))
 246                                stats->active += vma->node.size;
 247                        else
 248                                stats->inactive += vma->node.size;
 249
 250                        if (i915_vma_is_closed(vma))
 251                                stats->closed += vma->node.size;
 252                }
 253        } else {
 254                struct rb_node *p = obj->vma.tree.rb_node;
 255
 256                while (p) {
 257                        long cmp;
 258
 259                        vma = rb_entry(p, typeof(*vma), obj_node);
 260                        cmp = i915_vma_compare(vma, stats->vm, NULL);
 261                        if (cmp == 0) {
 262                                if (drm_mm_node_allocated(&vma->node)) {
 263                                        if (i915_vma_is_active(vma))
 264                                                stats->active += vma->node.size;
 265                                        else
 266                                                stats->inactive += vma->node.size;
 267
 268                                        if (i915_vma_is_closed(vma))
 269                                                stats->closed += vma->node.size;
 270                                }
 271                                break;
 272                        }
 273                        if (cmp < 0)
 274                                p = p->rb_right;
 275                        else
 276                                p = p->rb_left;
 277                }
 278        }
 279        spin_unlock(&obj->vma.lock);
 280
 281        i915_gem_object_put(obj);
 282        return 0;
 283}
 284
 285#define print_file_stats(m, name, stats) do { \
 286        if (stats.count) \
 287                seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \
 288                           name, \
 289                           stats.count, \
 290                           stats.total, \
 291                           stats.active, \
 292                           stats.inactive, \
 293                           stats.closed); \
 294} while (0)
 295
 296static void print_context_stats(struct seq_file *m,
 297                                struct drm_i915_private *i915)
 298{
 299        struct file_stats kstats = {};
 300        struct i915_gem_context *ctx, *cn;
 301
 302        spin_lock(&i915->gem.contexts.lock);
 303        list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
 304                struct i915_gem_engines_iter it;
 305                struct intel_context *ce;
 306
 307                if (!kref_get_unless_zero(&ctx->ref))
 308                        continue;
 309
 310                spin_unlock(&i915->gem.contexts.lock);
 311
 312                for_each_gem_engine(ce,
 313                                    i915_gem_context_lock_engines(ctx), it) {
 314                        if (intel_context_pin_if_active(ce)) {
 315                                rcu_read_lock();
 316                                if (ce->state)
 317                                        per_file_stats(0,
 318                                                       ce->state->obj, &kstats);
 319                                per_file_stats(0, ce->ring->vma->obj, &kstats);
 320                                rcu_read_unlock();
 321                                intel_context_unpin(ce);
 322                        }
 323                }
 324                i915_gem_context_unlock_engines(ctx);
 325
 326                if (!IS_ERR_OR_NULL(ctx->file_priv)) {
 327                        struct file_stats stats = {
 328                                .vm = rcu_access_pointer(ctx->vm),
 329                        };
 330                        struct drm_file *file = ctx->file_priv->file;
 331                        struct task_struct *task;
 332                        char name[80];
 333
 334                        rcu_read_lock();
 335                        idr_for_each(&file->object_idr, per_file_stats, &stats);
 336                        rcu_read_unlock();
 337
 338                        rcu_read_lock();
 339                        task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
 340                        snprintf(name, sizeof(name), "%s",
 341                                 task ? task->comm : "<unknown>");
 342                        rcu_read_unlock();
 343
 344                        print_file_stats(m, name, stats);
 345                }
 346
 347                spin_lock(&i915->gem.contexts.lock);
 348                list_safe_reset_next(ctx, cn, link);
 349                i915_gem_context_put(ctx);
 350        }
 351        spin_unlock(&i915->gem.contexts.lock);
 352
 353        print_file_stats(m, "[k]contexts", kstats);
 354}
 355
 356static int i915_gem_object_info(struct seq_file *m, void *data)
 357{
 358        struct drm_i915_private *i915 = node_to_i915(m->private);
 359        struct intel_memory_region *mr;
 360        enum intel_region_id id;
 361
 362        seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
 363                   i915->mm.shrink_count,
 364                   atomic_read(&i915->mm.free_count),
 365                   i915->mm.shrink_memory);
 366        for_each_memory_region(mr, i915, id)
 367                seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
 368                           mr->name, &mr->total, &mr->avail);
 369        seq_putc(m, '\n');
 370
 371        print_context_stats(m, i915);
 372
 373        return 0;
 374}
 375
 376static void gen8_display_interrupt_info(struct seq_file *m)
 377{
 378        struct drm_i915_private *dev_priv = node_to_i915(m->private);
 379        enum pipe pipe;
 380
 381        for_each_pipe(dev_priv, pipe) {
 382                enum intel_display_power_domain power_domain;
 383                intel_wakeref_t wakeref;
 384
 385                power_domain = POWER_DOMAIN_PIPE(pipe);
 386                wakeref = intel_display_power_get_if_enabled(dev_priv,
 387                                                             power_domain);
 388                if (!wakeref) {
 389                        seq_printf(m, "Pipe %c power disabled\n",
 390                                   pipe_name(pipe));
 391                        continue;
 392                }
 393                seq_printf(m, "Pipe %c IMR:\t%08x\n",
 394                           pipe_name(pipe),
 395                           I915_READ(GEN8_DE_PIPE_IMR(pipe)));
 396                seq_printf(m, "Pipe %c IIR:\t%08x\n",
 397                           pipe_name(pipe),
 398                           I915_READ(GEN8_DE_PIPE_IIR(pipe)));
 399                seq_printf(m, "Pipe %c IER:\t%08x\n",
 400                           pipe_name(pipe),
 401                           I915_READ(GEN8_DE_PIPE_IER(pipe)));
 402
 403                intel_display_power_put(dev_priv, power_domain, wakeref);
 404        }
 405
 406        seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
 407                   I915_READ(GEN8_DE_PORT_IMR));
 408        seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
 409                   I915_READ(GEN8_DE_PORT_IIR));
 410        seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
 411                   I915_READ(GEN8_DE_PORT_IER));
 412
 413        seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
 414                   I915_READ(GEN8_DE_MISC_IMR));
 415        seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
 416                   I915_READ(GEN8_DE_MISC_IIR));
 417        seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
 418                   I915_READ(GEN8_DE_MISC_IER));
 419
 420        seq_printf(m, "PCU interrupt mask:\t%08x\n",
 421                   I915_READ(GEN8_PCU_IMR));
 422        seq_printf(m, "PCU interrupt identity:\t%08x\n",
 423                   I915_READ(GEN8_PCU_IIR));
 424        seq_printf(m, "PCU interrupt enable:\t%08x\n",
 425                   I915_READ(GEN8_PCU_IER));
 426}
 427
 428static int i915_interrupt_info(struct seq_file *m, void *data)
 429{
 430        struct drm_i915_private *dev_priv = node_to_i915(m->private);
 431        struct intel_engine_cs *engine;
 432        intel_wakeref_t wakeref;
 433        int i, pipe;
 434
 435        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 436
 437        if (IS_CHERRYVIEW(dev_priv)) {
 438                intel_wakeref_t pref;
 439
 440                seq_printf(m, "Master Interrupt Control:\t%08x\n",
 441                           I915_READ(GEN8_MASTER_IRQ));
 442
 443                seq_printf(m, "Display IER:\t%08x\n",
 444                           I915_READ(VLV_IER));
 445                seq_printf(m, "Display IIR:\t%08x\n",
 446                           I915_READ(VLV_IIR));
 447                seq_printf(m, "Display IIR_RW:\t%08x\n",
 448                           I915_READ(VLV_IIR_RW));
 449                seq_printf(m, "Display IMR:\t%08x\n",
 450                           I915_READ(VLV_IMR));
 451                for_each_pipe(dev_priv, pipe) {
 452                        enum intel_display_power_domain power_domain;
 453
 454                        power_domain = POWER_DOMAIN_PIPE(pipe);
 455                        pref = intel_display_power_get_if_enabled(dev_priv,
 456                                                                  power_domain);
 457                        if (!pref) {
 458                                seq_printf(m, "Pipe %c power disabled\n",
 459                                           pipe_name(pipe));
 460                                continue;
 461                        }
 462
 463                        seq_printf(m, "Pipe %c stat:\t%08x\n",
 464                                   pipe_name(pipe),
 465                                   I915_READ(PIPESTAT(pipe)));
 466
 467                        intel_display_power_put(dev_priv, power_domain, pref);
 468                }
 469
 470                pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 471                seq_printf(m, "Port hotplug:\t%08x\n",
 472                           I915_READ(PORT_HOTPLUG_EN));
 473                seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 474                           I915_READ(VLV_DPFLIPSTAT));
 475                seq_printf(m, "DPINVGTT:\t%08x\n",
 476                           I915_READ(DPINVGTT));
 477                intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
 478
 479                for (i = 0; i < 4; i++) {
 480                        seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 481                                   i, I915_READ(GEN8_GT_IMR(i)));
 482                        seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 483                                   i, I915_READ(GEN8_GT_IIR(i)));
 484                        seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 485                                   i, I915_READ(GEN8_GT_IER(i)));
 486                }
 487
 488                seq_printf(m, "PCU interrupt mask:\t%08x\n",
 489                           I915_READ(GEN8_PCU_IMR));
 490                seq_printf(m, "PCU interrupt identity:\t%08x\n",
 491                           I915_READ(GEN8_PCU_IIR));
 492                seq_printf(m, "PCU interrupt enable:\t%08x\n",
 493                           I915_READ(GEN8_PCU_IER));
 494        } else if (INTEL_GEN(dev_priv) >= 11) {
 495                seq_printf(m, "Master Interrupt Control:  %08x\n",
 496                           I915_READ(GEN11_GFX_MSTR_IRQ));
 497
 498                seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
 499                           I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
 500                seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
 501                           I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
 502                seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
 503                           I915_READ(GEN11_GUC_SG_INTR_ENABLE));
 504                seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
 505                           I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
 506                seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
 507                           I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
 508                seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
 509                           I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
 510
 511                seq_printf(m, "Display Interrupt Control:\t%08x\n",
 512                           I915_READ(GEN11_DISPLAY_INT_CTL));
 513
 514                gen8_display_interrupt_info(m);
 515        } else if (INTEL_GEN(dev_priv) >= 8) {
 516                seq_printf(m, "Master Interrupt Control:\t%08x\n",
 517                           I915_READ(GEN8_MASTER_IRQ));
 518
 519                for (i = 0; i < 4; i++) {
 520                        seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 521                                   i, I915_READ(GEN8_GT_IMR(i)));
 522                        seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 523                                   i, I915_READ(GEN8_GT_IIR(i)));
 524                        seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 525                                   i, I915_READ(GEN8_GT_IER(i)));
 526                }
 527
 528                gen8_display_interrupt_info(m);
 529        } else if (IS_VALLEYVIEW(dev_priv)) {
 530                intel_wakeref_t pref;
 531
 532                seq_printf(m, "Display IER:\t%08x\n",
 533                           I915_READ(VLV_IER));
 534                seq_printf(m, "Display IIR:\t%08x\n",
 535                           I915_READ(VLV_IIR));
 536                seq_printf(m, "Display IIR_RW:\t%08x\n",
 537                           I915_READ(VLV_IIR_RW));
 538                seq_printf(m, "Display IMR:\t%08x\n",
 539                           I915_READ(VLV_IMR));
 540                for_each_pipe(dev_priv, pipe) {
 541                        enum intel_display_power_domain power_domain;
 542
 543                        power_domain = POWER_DOMAIN_PIPE(pipe);
 544                        pref = intel_display_power_get_if_enabled(dev_priv,
 545                                                                  power_domain);
 546                        if (!pref) {
 547                                seq_printf(m, "Pipe %c power disabled\n",
 548                                           pipe_name(pipe));
 549                                continue;
 550                        }
 551
 552                        seq_printf(m, "Pipe %c stat:\t%08x\n",
 553                                   pipe_name(pipe),
 554                                   I915_READ(PIPESTAT(pipe)));
 555                        intel_display_power_put(dev_priv, power_domain, pref);
 556                }
 557
 558                seq_printf(m, "Master IER:\t%08x\n",
 559                           I915_READ(VLV_MASTER_IER));
 560
 561                seq_printf(m, "Render IER:\t%08x\n",
 562                           I915_READ(GTIER));
 563                seq_printf(m, "Render IIR:\t%08x\n",
 564                           I915_READ(GTIIR));
 565                seq_printf(m, "Render IMR:\t%08x\n",
 566                           I915_READ(GTIMR));
 567
 568                seq_printf(m, "PM IER:\t\t%08x\n",
 569                           I915_READ(GEN6_PMIER));
 570                seq_printf(m, "PM IIR:\t\t%08x\n",
 571                           I915_READ(GEN6_PMIIR));
 572                seq_printf(m, "PM IMR:\t\t%08x\n",
 573                           I915_READ(GEN6_PMIMR));
 574
 575                pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 576                seq_printf(m, "Port hotplug:\t%08x\n",
 577                           I915_READ(PORT_HOTPLUG_EN));
 578                seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 579                           I915_READ(VLV_DPFLIPSTAT));
 580                seq_printf(m, "DPINVGTT:\t%08x\n",
 581                           I915_READ(DPINVGTT));
 582                intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
 583
 584        } else if (!HAS_PCH_SPLIT(dev_priv)) {
 585                seq_printf(m, "Interrupt enable:    %08x\n",
 586                           I915_READ(GEN2_IER));
 587                seq_printf(m, "Interrupt identity:  %08x\n",
 588                           I915_READ(GEN2_IIR));
 589                seq_printf(m, "Interrupt mask:      %08x\n",
 590                           I915_READ(GEN2_IMR));
 591                for_each_pipe(dev_priv, pipe)
 592                        seq_printf(m, "Pipe %c stat:         %08x\n",
 593                                   pipe_name(pipe),
 594                                   I915_READ(PIPESTAT(pipe)));
 595        } else {
 596                seq_printf(m, "North Display Interrupt enable:          %08x\n",
 597                           I915_READ(DEIER));
 598                seq_printf(m, "North Display Interrupt identity:        %08x\n",
 599                           I915_READ(DEIIR));
 600                seq_printf(m, "North Display Interrupt mask:            %08x\n",
 601                           I915_READ(DEIMR));
 602                seq_printf(m, "South Display Interrupt enable:          %08x\n",
 603                           I915_READ(SDEIER));
 604                seq_printf(m, "South Display Interrupt identity:        %08x\n",
 605                           I915_READ(SDEIIR));
 606                seq_printf(m, "South Display Interrupt mask:            %08x\n",
 607                           I915_READ(SDEIMR));
 608                seq_printf(m, "Graphics Interrupt enable:               %08x\n",
 609                           I915_READ(GTIER));
 610                seq_printf(m, "Graphics Interrupt identity:             %08x\n",
 611                           I915_READ(GTIIR));
 612                seq_printf(m, "Graphics Interrupt mask:         %08x\n",
 613                           I915_READ(GTIMR));
 614        }
 615
 616        if (INTEL_GEN(dev_priv) >= 11) {
 617                seq_printf(m, "RCS Intr Mask:\t %08x\n",
 618                           I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
 619                seq_printf(m, "BCS Intr Mask:\t %08x\n",
 620                           I915_READ(GEN11_BCS_RSVD_INTR_MASK));
 621                seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
 622                           I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
 623                seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
 624                           I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
 625                seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
 626                           I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
 627                seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
 628                           I915_READ(GEN11_GUC_SG_INTR_MASK));
 629                seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
 630                           I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
 631                seq_printf(m, "Crypto Intr Mask:\t %08x\n",
 632                           I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
 633                seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
 634                           I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
 635
 636        } else if (INTEL_GEN(dev_priv) >= 6) {
 637                for_each_uabi_engine(engine, dev_priv) {
 638                        seq_printf(m,
 639                                   "Graphics Interrupt mask (%s):       %08x\n",
 640                                   engine->name, ENGINE_READ(engine, RING_IMR));
 641                }
 642        }
 643
 644        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 645
 646        return 0;
 647}
 648
 649static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 650{
 651        struct drm_i915_private *i915 = node_to_i915(m->private);
 652        unsigned int i;
 653
 654        seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
 655
 656        rcu_read_lock();
 657        for (i = 0; i < i915->ggtt.num_fences; i++) {
 658                struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
 659                struct i915_vma *vma = reg->vma;
 660
 661                seq_printf(m, "Fence %d, pin count = %d, object = ",
 662                           i, atomic_read(&reg->pin_count));
 663                if (!vma)
 664                        seq_puts(m, "unused");
 665                else
 666                        i915_debugfs_describe_obj(m, vma->obj);
 667                seq_putc(m, '\n');
 668        }
 669        rcu_read_unlock();
 670
 671        return 0;
 672}
 673
 674#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 675static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 676                              size_t count, loff_t *pos)
 677{
 678        struct i915_gpu_coredump *error;
 679        ssize_t ret;
 680        void *buf;
 681
 682        error = file->private_data;
 683        if (!error)
 684                return 0;
 685
 686        /* Bounce buffer required because of kernfs __user API convenience. */
 687        buf = kmalloc(count, GFP_KERNEL);
 688        if (!buf)
 689                return -ENOMEM;
 690
 691        ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
 692        if (ret <= 0)
 693                goto out;
 694
 695        if (!copy_to_user(ubuf, buf, ret))
 696                *pos += ret;
 697        else
 698                ret = -EFAULT;
 699
 700out:
 701        kfree(buf);
 702        return ret;
 703}
 704
 705static int gpu_state_release(struct inode *inode, struct file *file)
 706{
 707        i915_gpu_coredump_put(file->private_data);
 708        return 0;
 709}
 710
 711static int i915_gpu_info_open(struct inode *inode, struct file *file)
 712{
 713        struct drm_i915_private *i915 = inode->i_private;
 714        struct i915_gpu_coredump *gpu;
 715        intel_wakeref_t wakeref;
 716
 717        gpu = NULL;
 718        with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 719                gpu = i915_gpu_coredump(i915);
 720        if (IS_ERR(gpu))
 721                return PTR_ERR(gpu);
 722
 723        file->private_data = gpu;
 724        return 0;
 725}
 726
 727static const struct file_operations i915_gpu_info_fops = {
 728        .owner = THIS_MODULE,
 729        .open = i915_gpu_info_open,
 730        .read = gpu_state_read,
 731        .llseek = default_llseek,
 732        .release = gpu_state_release,
 733};
 734
 735static ssize_t
 736i915_error_state_write(struct file *filp,
 737                       const char __user *ubuf,
 738                       size_t cnt,
 739                       loff_t *ppos)
 740{
 741        struct i915_gpu_coredump *error = filp->private_data;
 742
 743        if (!error)
 744                return 0;
 745
 746        drm_dbg(&error->i915->drm, "Resetting error state\n");
 747        i915_reset_error_state(error->i915);
 748
 749        return cnt;
 750}
 751
 752static int i915_error_state_open(struct inode *inode, struct file *file)
 753{
 754        struct i915_gpu_coredump *error;
 755
 756        error = i915_first_error_state(inode->i_private);
 757        if (IS_ERR(error))
 758                return PTR_ERR(error);
 759
 760        file->private_data  = error;
 761        return 0;
 762}
 763
 764static const struct file_operations i915_error_state_fops = {
 765        .owner = THIS_MODULE,
 766        .open = i915_error_state_open,
 767        .read = gpu_state_read,
 768        .write = i915_error_state_write,
 769        .llseek = default_llseek,
 770        .release = gpu_state_release,
 771};
 772#endif
 773
 774static int i915_frequency_info(struct seq_file *m, void *unused)
 775{
 776        struct drm_i915_private *dev_priv = node_to_i915(m->private);
 777        struct intel_uncore *uncore = &dev_priv->uncore;
 778        struct intel_rps *rps = &dev_priv->gt.rps;
 779        intel_wakeref_t wakeref;
 780        int ret = 0;
 781
 782        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 783
 784        if (IS_GEN(dev_priv, 5)) {
 785                u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
 786                u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
 787
 788                seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
 789                seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
 790                seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
 791                           MEMSTAT_VID_SHIFT);
 792                seq_printf(m, "Current P-state: %d\n",
 793                           (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 794        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 795                u32 rpmodectl, freq_sts;
 796
 797                rpmodectl = I915_READ(GEN6_RP_CONTROL);
 798                seq_printf(m, "Video Turbo Mode: %s\n",
 799                           yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
 800                seq_printf(m, "HW control enabled: %s\n",
 801                           yesno(rpmodectl & GEN6_RP_ENABLE));
 802                seq_printf(m, "SW control enabled: %s\n",
 803                           yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
 804                                  GEN6_RP_MEDIA_SW_MODE));
 805
 806                vlv_punit_get(dev_priv);
 807                freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 808                vlv_punit_put(dev_priv);
 809
 810                seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
 811                seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 812
 813                seq_printf(m, "actual GPU freq: %d MHz\n",
 814                           intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
 815
 816                seq_printf(m, "current GPU freq: %d MHz\n",
 817                           intel_gpu_freq(rps, rps->cur_freq));
 818
 819                seq_printf(m, "max GPU freq: %d MHz\n",
 820                           intel_gpu_freq(rps, rps->max_freq));
 821
 822                seq_printf(m, "min GPU freq: %d MHz\n",
 823                           intel_gpu_freq(rps, rps->min_freq));
 824
 825                seq_printf(m, "idle GPU freq: %d MHz\n",
 826                           intel_gpu_freq(rps, rps->idle_freq));
 827
 828                seq_printf(m,
 829                           "efficient (RPe) frequency: %d MHz\n",
 830                           intel_gpu_freq(rps, rps->efficient_freq));
 831        } else if (INTEL_GEN(dev_priv) >= 6) {
 832                u32 rp_state_limits;
 833                u32 gt_perf_status;
 834                u32 rp_state_cap;
 835                u32 rpmodectl, rpinclimit, rpdeclimit;
 836                u32 rpstat, cagf, reqf;
 837                u32 rpupei, rpcurup, rpprevup;
 838                u32 rpdownei, rpcurdown, rpprevdown;
 839                u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
 840                int max_freq;
 841
 842                rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 843                if (IS_GEN9_LP(dev_priv)) {
 844                        rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
 845                        gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
 846                } else {
 847                        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 848                        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 849                }
 850
 851                /* RPSTAT1 is in the GT power well */
 852                intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 853
 854                reqf = I915_READ(GEN6_RPNSWREQ);
 855                if (INTEL_GEN(dev_priv) >= 9)
 856                        reqf >>= 23;
 857                else {
 858                        reqf &= ~GEN6_TURBO_DISABLE;
 859                        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 860                                reqf >>= 24;
 861                        else
 862                                reqf >>= 25;
 863                }
 864                reqf = intel_gpu_freq(rps, reqf);
 865
 866                rpmodectl = I915_READ(GEN6_RP_CONTROL);
 867                rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
 868                rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
 869
 870                rpstat = I915_READ(GEN6_RPSTAT1);
 871                rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
 872                rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
 873                rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
 874                rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 875                rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 876                rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
 877                cagf = intel_rps_read_actual_frequency(rps);
 878
 879                intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 880
 881                if (INTEL_GEN(dev_priv) >= 11) {
 882                        pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
 883                        pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
 884                        /*
 885                         * The equivalent to the PM ISR & IIR cannot be read
 886                         * without affecting the current state of the system
 887                         */
 888                        pm_isr = 0;
 889                        pm_iir = 0;
 890                } else if (INTEL_GEN(dev_priv) >= 8) {
 891                        pm_ier = I915_READ(GEN8_GT_IER(2));
 892                        pm_imr = I915_READ(GEN8_GT_IMR(2));
 893                        pm_isr = I915_READ(GEN8_GT_ISR(2));
 894                        pm_iir = I915_READ(GEN8_GT_IIR(2));
 895                } else {
 896                        pm_ier = I915_READ(GEN6_PMIER);
 897                        pm_imr = I915_READ(GEN6_PMIMR);
 898                        pm_isr = I915_READ(GEN6_PMISR);
 899                        pm_iir = I915_READ(GEN6_PMIIR);
 900                }
 901                pm_mask = I915_READ(GEN6_PMINTRMSK);
 902
 903                seq_printf(m, "Video Turbo Mode: %s\n",
 904                           yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
 905                seq_printf(m, "HW control enabled: %s\n",
 906                           yesno(rpmodectl & GEN6_RP_ENABLE));
 907                seq_printf(m, "SW control enabled: %s\n",
 908                           yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
 909                                  GEN6_RP_MEDIA_SW_MODE));
 910
 911                seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
 912                           pm_ier, pm_imr, pm_mask);
 913                if (INTEL_GEN(dev_priv) <= 10)
 914                        seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
 915                                   pm_isr, pm_iir);
 916                seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
 917                           rps->pm_intrmsk_mbz);
 918                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 919                seq_printf(m, "Render p-state ratio: %d\n",
 920                           (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
 921                seq_printf(m, "Render p-state VID: %d\n",
 922                           gt_perf_status & 0xff);
 923                seq_printf(m, "Render p-state limit: %d\n",
 924                           rp_state_limits & 0xff);
 925                seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
 926                seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
 927                seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
 928                seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
 929                seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 930                seq_printf(m, "CAGF: %dMHz\n", cagf);
 931                seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
 932                           rpupei,
 933                           intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
 934                seq_printf(m, "RP CUR UP: %d (%dun)\n",
 935                           rpcurup,
 936                           intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
 937                seq_printf(m, "RP PREV UP: %d (%dns)\n",
 938                           rpprevup,
 939                           intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
 940                seq_printf(m, "Up threshold: %d%%\n",
 941                           rps->power.up_threshold);
 942
 943                seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
 944                           rpdownei,
 945                           intel_gt_pm_interval_to_ns(&dev_priv->gt,
 946                                                      rpdownei));
 947                seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
 948                           rpcurdown,
 949                           intel_gt_pm_interval_to_ns(&dev_priv->gt,
 950                                                      rpcurdown));
 951                seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
 952                           rpprevdown,
 953                           intel_gt_pm_interval_to_ns(&dev_priv->gt,
 954                                                      rpprevdown));
 955                seq_printf(m, "Down threshold: %d%%\n",
 956                           rps->power.down_threshold);
 957
 958                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
 959                            rp_state_cap >> 16) & 0xff;
 960                max_freq *= (IS_GEN9_BC(dev_priv) ||
 961                             INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 962                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 963                           intel_gpu_freq(rps, max_freq));
 964
 965                max_freq = (rp_state_cap & 0xff00) >> 8;
 966                max_freq *= (IS_GEN9_BC(dev_priv) ||
 967                             INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 968                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 969                           intel_gpu_freq(rps, max_freq));
 970
 971                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
 972                            rp_state_cap >> 0) & 0xff;
 973                max_freq *= (IS_GEN9_BC(dev_priv) ||
 974                             INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 975                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 976                           intel_gpu_freq(rps, max_freq));
 977                seq_printf(m, "Max overclocked frequency: %dMHz\n",
 978                           intel_gpu_freq(rps, rps->max_freq));
 979
 980                seq_printf(m, "Current freq: %d MHz\n",
 981                           intel_gpu_freq(rps, rps->cur_freq));
 982                seq_printf(m, "Actual freq: %d MHz\n", cagf);
 983                seq_printf(m, "Idle freq: %d MHz\n",
 984                           intel_gpu_freq(rps, rps->idle_freq));
 985                seq_printf(m, "Min freq: %d MHz\n",
 986                           intel_gpu_freq(rps, rps->min_freq));
 987                seq_printf(m, "Boost freq: %d MHz\n",
 988                           intel_gpu_freq(rps, rps->boost_freq));
 989                seq_printf(m, "Max freq: %d MHz\n",
 990                           intel_gpu_freq(rps, rps->max_freq));
 991                seq_printf(m,
 992                           "efficient (RPe) frequency: %d MHz\n",
 993                           intel_gpu_freq(rps, rps->efficient_freq));
 994        } else {
 995                seq_puts(m, "no P-state info available\n");
 996        }
 997
 998        seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
 999        seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1000        seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1001
1002        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1003        return ret;
1004}
1005
1006static int i915_ring_freq_table(struct seq_file *m, void *unused)
1007{
1008        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1009        struct intel_rps *rps = &dev_priv->gt.rps;
1010        unsigned int max_gpu_freq, min_gpu_freq;
1011        intel_wakeref_t wakeref;
1012        int gpu_freq, ia_freq;
1013
1014        if (!HAS_LLC(dev_priv))
1015                return -ENODEV;
1016
1017        min_gpu_freq = rps->min_freq;
1018        max_gpu_freq = rps->max_freq;
1019        if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1020                /* Convert GT frequency to 50 HZ units */
1021                min_gpu_freq /= GEN9_FREQ_SCALER;
1022                max_gpu_freq /= GEN9_FREQ_SCALER;
1023        }
1024
1025        seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1026
1027        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1028        for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1029                ia_freq = gpu_freq;
1030                sandybridge_pcode_read(dev_priv,
1031                                       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1032                                       &ia_freq, NULL);
1033                seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1034                           intel_gpu_freq(rps,
1035                                          (gpu_freq *
1036                                           (IS_GEN9_BC(dev_priv) ||
1037                                            INTEL_GEN(dev_priv) >= 10 ?
1038                                            GEN9_FREQ_SCALER : 1))),
1039                           ((ia_freq >> 0) & 0xff) * 100,
1040                           ((ia_freq >> 8) & 0xff) * 100);
1041        }
1042        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1043
1044        return 0;
1045}
1046
1047static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1048{
1049        seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1050                   ring->space, ring->head, ring->tail, ring->emit);
1051}
1052
1053static int i915_context_status(struct seq_file *m, void *unused)
1054{
1055        struct drm_i915_private *i915 = node_to_i915(m->private);
1056        struct i915_gem_context *ctx, *cn;
1057
1058        spin_lock(&i915->gem.contexts.lock);
1059        list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1060                struct i915_gem_engines_iter it;
1061                struct intel_context *ce;
1062
1063                if (!kref_get_unless_zero(&ctx->ref))
1064                        continue;
1065
1066                spin_unlock(&i915->gem.contexts.lock);
1067
1068                seq_puts(m, "HW context ");
1069                if (ctx->pid) {
1070                        struct task_struct *task;
1071
1072                        task = get_pid_task(ctx->pid, PIDTYPE_PID);
1073                        if (task) {
1074                                seq_printf(m, "(%s [%d]) ",
1075                                           task->comm, task->pid);
1076                                put_task_struct(task);
1077                        }
1078                } else if (IS_ERR(ctx->file_priv)) {
1079                        seq_puts(m, "(deleted) ");
1080                } else {
1081                        seq_puts(m, "(kernel) ");
1082                }
1083
1084                seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1085                seq_putc(m, '\n');
1086
1087                for_each_gem_engine(ce,
1088                                    i915_gem_context_lock_engines(ctx), it) {
1089                        if (intel_context_pin_if_active(ce)) {
1090                                seq_printf(m, "%s: ", ce->engine->name);
1091                                if (ce->state)
1092                                        i915_debugfs_describe_obj(m, ce->state->obj);
1093                                describe_ctx_ring(m, ce->ring);
1094                                seq_putc(m, '\n');
1095                                intel_context_unpin(ce);
1096                        }
1097                }
1098                i915_gem_context_unlock_engines(ctx);
1099
1100                seq_putc(m, '\n');
1101
1102                spin_lock(&i915->gem.contexts.lock);
1103                list_safe_reset_next(ctx, cn, link);
1104                i915_gem_context_put(ctx);
1105        }
1106        spin_unlock(&i915->gem.contexts.lock);
1107
1108        return 0;
1109}
1110
1111static const char *swizzle_string(unsigned swizzle)
1112{
1113        switch (swizzle) {
1114        case I915_BIT_6_SWIZZLE_NONE:
1115                return "none";
1116        case I915_BIT_6_SWIZZLE_9:
1117                return "bit9";
1118        case I915_BIT_6_SWIZZLE_9_10:
1119                return "bit9/bit10";
1120        case I915_BIT_6_SWIZZLE_9_11:
1121                return "bit9/bit11";
1122        case I915_BIT_6_SWIZZLE_9_10_11:
1123                return "bit9/bit10/bit11";
1124        case I915_BIT_6_SWIZZLE_9_17:
1125                return "bit9/bit17";
1126        case I915_BIT_6_SWIZZLE_9_10_17:
1127                return "bit9/bit10/bit17";
1128        case I915_BIT_6_SWIZZLE_UNKNOWN:
1129                return "unknown";
1130        }
1131
1132        return "bug";
1133}
1134
1135static int i915_swizzle_info(struct seq_file *m, void *data)
1136{
1137        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1138        struct intel_uncore *uncore = &dev_priv->uncore;
1139        intel_wakeref_t wakeref;
1140
1141        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1142
1143        seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1144                   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1145        seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1146                   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1147
1148        if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1149                seq_printf(m, "DDC = 0x%08x\n",
1150                           intel_uncore_read(uncore, DCC));
1151                seq_printf(m, "DDC2 = 0x%08x\n",
1152                           intel_uncore_read(uncore, DCC2));
1153                seq_printf(m, "C0DRB3 = 0x%04x\n",
1154                           intel_uncore_read16(uncore, C0DRB3));
1155                seq_printf(m, "C1DRB3 = 0x%04x\n",
1156                           intel_uncore_read16(uncore, C1DRB3));
1157        } else if (INTEL_GEN(dev_priv) >= 6) {
1158                seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1159                           intel_uncore_read(uncore, MAD_DIMM_C0));
1160                seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1161                           intel_uncore_read(uncore, MAD_DIMM_C1));
1162                seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1163                           intel_uncore_read(uncore, MAD_DIMM_C2));
1164                seq_printf(m, "TILECTL = 0x%08x\n",
1165                           intel_uncore_read(uncore, TILECTL));
1166                if (INTEL_GEN(dev_priv) >= 8)
1167                        seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1168                                   intel_uncore_read(uncore, GAMTARBMODE));
1169                else
1170                        seq_printf(m, "ARB_MODE = 0x%08x\n",
1171                                   intel_uncore_read(uncore, ARB_MODE));
1172                seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1173                           intel_uncore_read(uncore, DISP_ARB_CTL));
1174        }
1175
1176        if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1177                seq_puts(m, "L-shaped memory detected\n");
1178
1179        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1180
1181        return 0;
1182}
1183
1184static const char *rps_power_to_str(unsigned int power)
1185{
1186        static const char * const strings[] = {
1187                [LOW_POWER] = "low power",
1188                [BETWEEN] = "mixed",
1189                [HIGH_POWER] = "high power",
1190        };
1191
1192        if (power >= ARRAY_SIZE(strings) || !strings[power])
1193                return "unknown";
1194
1195        return strings[power];
1196}
1197
1198static int i915_rps_boost_info(struct seq_file *m, void *data)
1199{
1200        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1201        struct intel_rps *rps = &dev_priv->gt.rps;
1202
1203        seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
1204        seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
1205        seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1206        seq_printf(m, "Boosts outstanding? %d\n",
1207                   atomic_read(&rps->num_waiters));
1208        seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1209        seq_printf(m, "Frequency requested %d, actual %d\n",
1210                   intel_gpu_freq(rps, rps->cur_freq),
1211                   intel_rps_read_actual_frequency(rps));
1212        seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1213                   intel_gpu_freq(rps, rps->min_freq),
1214                   intel_gpu_freq(rps, rps->min_freq_softlimit),
1215                   intel_gpu_freq(rps, rps->max_freq_softlimit),
1216                   intel_gpu_freq(rps, rps->max_freq));
1217        seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1218                   intel_gpu_freq(rps, rps->idle_freq),
1219                   intel_gpu_freq(rps, rps->efficient_freq),
1220                   intel_gpu_freq(rps, rps->boost_freq));
1221
1222        seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1223
1224        if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
1225                u32 rpup, rpupei;
1226                u32 rpdown, rpdownei;
1227
1228                intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1229                rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1230                rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1231                rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1232                rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1233                intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1234
1235                seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1236                           rps_power_to_str(rps->power.mode));
1237                seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1238                           rpup && rpupei ? 100 * rpup / rpupei : 0,
1239                           rps->power.up_threshold);
1240                seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1241                           rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1242                           rps->power.down_threshold);
1243        } else {
1244                seq_puts(m, "\nRPS Autotuning inactive\n");
1245        }
1246
1247        return 0;
1248}
1249
1250static int i915_llc(struct seq_file *m, void *data)
1251{
1252        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1253        const bool edram = INTEL_GEN(dev_priv) > 8;
1254
1255        seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1256        seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1257                   dev_priv->edram_size_mb);
1258
1259        return 0;
1260}
1261
1262static int i915_runtime_pm_status(struct seq_file *m, void *unused)
1263{
1264        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1265        struct pci_dev *pdev = dev_priv->drm.pdev;
1266
1267        if (!HAS_RUNTIME_PM(dev_priv))
1268                seq_puts(m, "Runtime power management not supported\n");
1269
1270        seq_printf(m, "Runtime power status: %s\n",
1271                   enableddisabled(!dev_priv->power_domains.wakeref));
1272
1273        seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
1274        seq_printf(m, "IRQs disabled: %s\n",
1275                   yesno(!intel_irqs_enabled(dev_priv)));
1276#ifdef CONFIG_PM
1277        seq_printf(m, "Usage count: %d\n",
1278                   atomic_read(&dev_priv->drm.dev->power.usage_count));
1279#else
1280        seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
1281#endif
1282        seq_printf(m, "PCI device power state: %s [%d]\n",
1283                   pci_power_name(pdev->current_state),
1284                   pdev->current_state);
1285
1286        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
1287                struct drm_printer p = drm_seq_file_printer(m);
1288
1289                print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
1290        }
1291
1292        return 0;
1293}
1294
1295static int i915_engine_info(struct seq_file *m, void *unused)
1296{
1297        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1298        struct intel_engine_cs *engine;
1299        intel_wakeref_t wakeref;
1300        struct drm_printer p;
1301
1302        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1303
1304        seq_printf(m, "GT awake? %s [%d]\n",
1305                   yesno(dev_priv->gt.awake),
1306                   atomic_read(&dev_priv->gt.wakeref.count));
1307        seq_printf(m, "CS timestamp frequency: %u Hz\n",
1308                   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz);
1309
1310        p = drm_seq_file_printer(m);
1311        for_each_uabi_engine(engine, dev_priv)
1312                intel_engine_dump(engine, &p, "%s\n", engine->name);
1313
1314        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1315
1316        return 0;
1317}
1318
1319static int i915_rcs_topology(struct seq_file *m, void *unused)
1320{
1321        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1322        struct drm_printer p = drm_seq_file_printer(m);
1323
1324        intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
1325
1326        return 0;
1327}
1328
1329static int i915_shrinker_info(struct seq_file *m, void *unused)
1330{
1331        struct drm_i915_private *i915 = node_to_i915(m->private);
1332
1333        seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
1334        seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
1335
1336        return 0;
1337}
1338
1339static int i915_wa_registers(struct seq_file *m, void *unused)
1340{
1341        struct drm_i915_private *i915 = node_to_i915(m->private);
1342        struct intel_engine_cs *engine;
1343
1344        for_each_uabi_engine(engine, i915) {
1345                const struct i915_wa_list *wal = &engine->ctx_wa_list;
1346                const struct i915_wa *wa;
1347                unsigned int count;
1348
1349                count = wal->count;
1350                if (!count)
1351                        continue;
1352
1353                seq_printf(m, "%s: Workarounds applied: %u\n",
1354                           engine->name, count);
1355
1356                for (wa = wal->list; count--; wa++)
1357                        seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
1358                                   i915_mmio_reg_offset(wa->reg),
1359                                   wa->set, wa->clr);
1360
1361                seq_printf(m, "\n");
1362        }
1363
1364        return 0;
1365}
1366
1367static int
1368i915_wedged_get(void *data, u64 *val)
1369{
1370        struct drm_i915_private *i915 = data;
1371        int ret = intel_gt_terminally_wedged(&i915->gt);
1372
1373        switch (ret) {
1374        case -EIO:
1375                *val = 1;
1376                return 0;
1377        case 0:
1378                *val = 0;
1379                return 0;
1380        default:
1381                return ret;
1382        }
1383}
1384
1385static int
1386i915_wedged_set(void *data, u64 val)
1387{
1388        struct drm_i915_private *i915 = data;
1389
1390        /* Flush any previous reset before applying for a new one */
1391        wait_event(i915->gt.reset.queue,
1392                   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
1393
1394        intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
1395                              "Manually set wedged engine mask = %llx", val);
1396        return 0;
1397}
1398
1399DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1400                        i915_wedged_get, i915_wedged_set,
1401                        "%llu\n");
1402
1403static int
1404i915_perf_noa_delay_set(void *data, u64 val)
1405{
1406        struct drm_i915_private *i915 = data;
1407
1408        /*
1409         * This would lead to infinite waits as we're doing timestamp
1410         * difference on the CS with only 32bits.
1411         */
1412        if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX)
1413                return -EINVAL;
1414
1415        atomic64_set(&i915->perf.noa_programming_delay, val);
1416        return 0;
1417}
1418
1419static int
1420i915_perf_noa_delay_get(void *data, u64 *val)
1421{
1422        struct drm_i915_private *i915 = data;
1423
1424        *val = atomic64_read(&i915->perf.noa_programming_delay);
1425        return 0;
1426}
1427
1428DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
1429                        i915_perf_noa_delay_get,
1430                        i915_perf_noa_delay_set,
1431                        "%llu\n");
1432
1433#define DROP_UNBOUND    BIT(0)
1434#define DROP_BOUND      BIT(1)
1435#define DROP_RETIRE     BIT(2)
1436#define DROP_ACTIVE     BIT(3)
1437#define DROP_FREED      BIT(4)
1438#define DROP_SHRINK_ALL BIT(5)
1439#define DROP_IDLE       BIT(6)
1440#define DROP_RESET_ACTIVE       BIT(7)
1441#define DROP_RESET_SEQNO        BIT(8)
1442#define DROP_RCU        BIT(9)
1443#define DROP_ALL (DROP_UNBOUND  | \
1444                  DROP_BOUND    | \
1445                  DROP_RETIRE   | \
1446                  DROP_ACTIVE   | \
1447                  DROP_FREED    | \
1448                  DROP_SHRINK_ALL |\
1449                  DROP_IDLE     | \
1450                  DROP_RESET_ACTIVE | \
1451                  DROP_RESET_SEQNO | \
1452                  DROP_RCU)
1453static int
1454i915_drop_caches_get(void *data, u64 *val)
1455{
1456        *val = DROP_ALL;
1457
1458        return 0;
1459}
1460static int
1461gt_drop_caches(struct intel_gt *gt, u64 val)
1462{
1463        int ret;
1464
1465        if (val & DROP_RESET_ACTIVE &&
1466            wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
1467                intel_gt_set_wedged(gt);
1468
1469        if (val & DROP_RETIRE)
1470                intel_gt_retire_requests(gt);
1471
1472        if (val & (DROP_IDLE | DROP_ACTIVE)) {
1473                ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1474                if (ret)
1475                        return ret;
1476        }
1477
1478        if (val & DROP_IDLE) {
1479                ret = intel_gt_pm_wait_for_idle(gt);
1480                if (ret)
1481                        return ret;
1482        }
1483
1484        if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
1485                intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
1486
1487        if (val & DROP_FREED)
1488                intel_gt_flush_buffer_pool(gt);
1489
1490        return 0;
1491}
1492
1493static int
1494i915_drop_caches_set(void *data, u64 val)
1495{
1496        struct drm_i915_private *i915 = data;
1497        int ret;
1498
1499        DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
1500                  val, val & DROP_ALL);
1501
1502        ret = gt_drop_caches(&i915->gt, val);
1503        if (ret)
1504                return ret;
1505
1506        fs_reclaim_acquire(GFP_KERNEL);
1507        if (val & DROP_BOUND)
1508                i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
1509
1510        if (val & DROP_UNBOUND)
1511                i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
1512
1513        if (val & DROP_SHRINK_ALL)
1514                i915_gem_shrink_all(i915);
1515        fs_reclaim_release(GFP_KERNEL);
1516
1517        if (val & DROP_RCU)
1518                rcu_barrier();
1519
1520        if (val & DROP_FREED)
1521                i915_gem_drain_freed_objects(i915);
1522
1523        return 0;
1524}
1525
1526DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1527                        i915_drop_caches_get, i915_drop_caches_set,
1528                        "0x%08llx\n");
1529
1530static int
1531i915_cache_sharing_get(void *data, u64 *val)
1532{
1533        struct drm_i915_private *dev_priv = data;
1534        intel_wakeref_t wakeref;
1535        u32 snpcr = 0;
1536
1537        if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1538                return -ENODEV;
1539
1540        with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1541                snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1542
1543        *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
1544
1545        return 0;
1546}
1547
1548static int
1549i915_cache_sharing_set(void *data, u64 val)
1550{
1551        struct drm_i915_private *dev_priv = data;
1552        intel_wakeref_t wakeref;
1553
1554        if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1555                return -ENODEV;
1556
1557        if (val > 3)
1558                return -EINVAL;
1559
1560        drm_dbg(&dev_priv->drm,
1561                "Manually setting uncore sharing to %llu\n", val);
1562        with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1563                u32 snpcr;
1564
1565                /* Update the cache sharing policy here as well */
1566                snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1567                snpcr &= ~GEN6_MBC_SNPCR_MASK;
1568                snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
1569                I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1570        }
1571
1572        return 0;
1573}
1574
1575static void
1576intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
1577                          u8 *to_mask)
1578{
1579        int offset = slice * sseu->ss_stride;
1580
1581        memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
1582}
1583
1584DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
1585                        i915_cache_sharing_get, i915_cache_sharing_set,
1586                        "%llu\n");
1587
1588static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
1589                                          struct sseu_dev_info *sseu)
1590{
1591#define SS_MAX 2
1592        const int ss_max = SS_MAX;
1593        u32 sig1[SS_MAX], sig2[SS_MAX];
1594        int ss;
1595
1596        sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
1597        sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
1598        sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
1599        sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
1600
1601        for (ss = 0; ss < ss_max; ss++) {
1602                unsigned int eu_cnt;
1603
1604                if (sig1[ss] & CHV_SS_PG_ENABLE)
1605                        /* skip disabled subslice */
1606                        continue;
1607
1608                sseu->slice_mask = BIT(0);
1609                sseu->subslice_mask[0] |= BIT(ss);
1610                eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
1611                         ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
1612                         ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
1613                         ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
1614                sseu->eu_total += eu_cnt;
1615                sseu->eu_per_subslice = max_t(unsigned int,
1616                                              sseu->eu_per_subslice, eu_cnt);
1617        }
1618#undef SS_MAX
1619}
1620
1621static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
1622                                     struct sseu_dev_info *sseu)
1623{
1624#define SS_MAX 6
1625        const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1626        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1627        int s, ss;
1628
1629        for (s = 0; s < info->sseu.max_slices; s++) {
1630                /*
1631                 * FIXME: Valid SS Mask respects the spec and read
1632                 * only valid bits for those registers, excluding reserved
1633                 * although this seems wrong because it would leave many
1634                 * subslices without ACK.
1635                 */
1636                s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
1637                        GEN10_PGCTL_VALID_SS_MASK(s);
1638                eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
1639                eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
1640        }
1641
1642        eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
1643                     GEN9_PGCTL_SSA_EU19_ACK |
1644                     GEN9_PGCTL_SSA_EU210_ACK |
1645                     GEN9_PGCTL_SSA_EU311_ACK;
1646        eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
1647                     GEN9_PGCTL_SSB_EU19_ACK |
1648                     GEN9_PGCTL_SSB_EU210_ACK |
1649                     GEN9_PGCTL_SSB_EU311_ACK;
1650
1651        for (s = 0; s < info->sseu.max_slices; s++) {
1652                if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
1653                        /* skip disabled slice */
1654                        continue;
1655
1656                sseu->slice_mask |= BIT(s);
1657                intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
1658
1659                for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1660                        unsigned int eu_cnt;
1661
1662                        if (info->sseu.has_subslice_pg &&
1663                            !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
1664                                /* skip disabled subslice */
1665                                continue;
1666
1667                        eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
1668                                               eu_mask[ss % 2]);
1669                        sseu->eu_total += eu_cnt;
1670                        sseu->eu_per_subslice = max_t(unsigned int,
1671                                                      sseu->eu_per_subslice,
1672                                                      eu_cnt);
1673                }
1674        }
1675#undef SS_MAX
1676}
1677
1678static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
1679                                    struct sseu_dev_info *sseu)
1680{
1681#define SS_MAX 3
1682        const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1683        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1684        int s, ss;
1685
1686        for (s = 0; s < info->sseu.max_slices; s++) {
1687                s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
1688                eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
1689                eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
1690        }
1691
1692        eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
1693                     GEN9_PGCTL_SSA_EU19_ACK |
1694                     GEN9_PGCTL_SSA_EU210_ACK |
1695                     GEN9_PGCTL_SSA_EU311_ACK;
1696        eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
1697                     GEN9_PGCTL_SSB_EU19_ACK |
1698                     GEN9_PGCTL_SSB_EU210_ACK |
1699                     GEN9_PGCTL_SSB_EU311_ACK;
1700
1701        for (s = 0; s < info->sseu.max_slices; s++) {
1702                if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
1703                        /* skip disabled slice */
1704                        continue;
1705
1706                sseu->slice_mask |= BIT(s);
1707
1708                if (IS_GEN9_BC(dev_priv))
1709                        intel_sseu_copy_subslices(&info->sseu, s,
1710                                                  sseu->subslice_mask);
1711
1712                for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1713                        unsigned int eu_cnt;
1714                        u8 ss_idx = s * info->sseu.ss_stride +
1715                                    ss / BITS_PER_BYTE;
1716
1717                        if (IS_GEN9_LP(dev_priv)) {
1718                                if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
1719                                        /* skip disabled subslice */
1720                                        continue;
1721
1722                                sseu->subslice_mask[ss_idx] |=
1723                                        BIT(ss % BITS_PER_BYTE);
1724                        }
1725
1726                        eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
1727                                               eu_mask[ss%2]);
1728                        sseu->eu_total += eu_cnt;
1729                        sseu->eu_per_subslice = max_t(unsigned int,
1730                                                      sseu->eu_per_subslice,
1731                                                      eu_cnt);
1732                }
1733        }
1734#undef SS_MAX
1735}
1736
1737static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
1738                                   struct sseu_dev_info *sseu)
1739{
1740        const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1741        u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
1742        int s;
1743
1744        sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
1745
1746        if (sseu->slice_mask) {
1747                sseu->eu_per_subslice = info->sseu.eu_per_subslice;
1748                for (s = 0; s < fls(sseu->slice_mask); s++)
1749                        intel_sseu_copy_subslices(&info->sseu, s,
1750                                                  sseu->subslice_mask);
1751                sseu->eu_total = sseu->eu_per_subslice *
1752                                 intel_sseu_subslice_total(sseu);
1753
1754                /* subtract fused off EU(s) from enabled slice(s) */
1755                for (s = 0; s < fls(sseu->slice_mask); s++) {
1756                        u8 subslice_7eu = info->sseu.subslice_7eu[s];
1757
1758                        sseu->eu_total -= hweight8(subslice_7eu);
1759                }
1760        }
1761}
1762
1763static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
1764                                 const struct sseu_dev_info *sseu)
1765{
1766        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1767        const char *type = is_available_info ? "Available" : "Enabled";
1768        int s;
1769
1770        seq_printf(m, "  %s Slice Mask: %04x\n", type,
1771                   sseu->slice_mask);
1772        seq_printf(m, "  %s Slice Total: %u\n", type,
1773                   hweight8(sseu->slice_mask));
1774        seq_printf(m, "  %s Subslice Total: %u\n", type,
1775                   intel_sseu_subslice_total(sseu));
1776        for (s = 0; s < fls(sseu->slice_mask); s++) {
1777                seq_printf(m, "  %s Slice%i subslices: %u\n", type,
1778                           s, intel_sseu_subslices_per_slice(sseu, s));
1779        }
1780        seq_printf(m, "  %s EU Total: %u\n", type,
1781                   sseu->eu_total);
1782        seq_printf(m, "  %s EU Per Subslice: %u\n", type,
1783                   sseu->eu_per_subslice);
1784
1785        if (!is_available_info)
1786                return;
1787
1788        seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
1789        if (HAS_POOLED_EU(dev_priv))
1790                seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
1791
1792        seq_printf(m, "  Has Slice Power Gating: %s\n",
1793                   yesno(sseu->has_slice_pg));
1794        seq_printf(m, "  Has Subslice Power Gating: %s\n",
1795                   yesno(sseu->has_subslice_pg));
1796        seq_printf(m, "  Has EU Power Gating: %s\n",
1797                   yesno(sseu->has_eu_pg));
1798}
1799
1800static int i915_sseu_status(struct seq_file *m, void *unused)
1801{
1802        struct drm_i915_private *dev_priv = node_to_i915(m->private);
1803        const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1804        struct sseu_dev_info sseu;
1805        intel_wakeref_t wakeref;
1806
1807        if (INTEL_GEN(dev_priv) < 8)
1808                return -ENODEV;
1809
1810        seq_puts(m, "SSEU Device Info\n");
1811        i915_print_sseu_info(m, true, &info->sseu);
1812
1813        seq_puts(m, "SSEU Device Status\n");
1814        memset(&sseu, 0, sizeof(sseu));
1815        intel_sseu_set_info(&sseu, info->sseu.max_slices,
1816                            info->sseu.max_subslices,
1817                            info->sseu.max_eus_per_subslice);
1818
1819        with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1820                if (IS_CHERRYVIEW(dev_priv))
1821                        cherryview_sseu_device_status(dev_priv, &sseu);
1822                else if (IS_BROADWELL(dev_priv))
1823                        bdw_sseu_device_status(dev_priv, &sseu);
1824                else if (IS_GEN(dev_priv, 9))
1825                        gen9_sseu_device_status(dev_priv, &sseu);
1826                else if (INTEL_GEN(dev_priv) >= 10)
1827                        gen10_sseu_device_status(dev_priv, &sseu);
1828        }
1829
1830        i915_print_sseu_info(m, false, &sseu);
1831
1832        return 0;
1833}
1834
1835static int i915_forcewake_open(struct inode *inode, struct file *file)
1836{
1837        struct drm_i915_private *i915 = inode->i_private;
1838        struct intel_gt *gt = &i915->gt;
1839
1840        atomic_inc(&gt->user_wakeref);
1841        intel_gt_pm_get(gt);
1842        if (INTEL_GEN(i915) >= 6)
1843                intel_uncore_forcewake_user_get(gt->uncore);
1844
1845        return 0;
1846}
1847
1848static int i915_forcewake_release(struct inode *inode, struct file *file)
1849{
1850        struct drm_i915_private *i915 = inode->i_private;
1851        struct intel_gt *gt = &i915->gt;
1852
1853        if (INTEL_GEN(i915) >= 6)
1854                intel_uncore_forcewake_user_put(&i915->uncore);
1855        intel_gt_pm_put(gt);
1856        atomic_dec(&gt->user_wakeref);
1857
1858        return 0;
1859}
1860
1861static const struct file_operations i915_forcewake_fops = {
1862        .owner = THIS_MODULE,
1863        .open = i915_forcewake_open,
1864        .release = i915_forcewake_release,
1865};
1866
1867static const struct drm_info_list i915_debugfs_list[] = {
1868        {"i915_capabilities", i915_capabilities, 0},
1869        {"i915_gem_objects", i915_gem_object_info, 0},
1870        {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1871        {"i915_gem_interrupt", i915_interrupt_info, 0},
1872        {"i915_frequency_info", i915_frequency_info, 0},
1873        {"i915_ring_freq_table", i915_ring_freq_table, 0},
1874        {"i915_context_status", i915_context_status, 0},
1875        {"i915_swizzle_info", i915_swizzle_info, 0},
1876        {"i915_llc", i915_llc, 0},
1877        {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1878        {"i915_engine_info", i915_engine_info, 0},
1879        {"i915_rcs_topology", i915_rcs_topology, 0},
1880        {"i915_shrinker_info", i915_shrinker_info, 0},
1881        {"i915_wa_registers", i915_wa_registers, 0},
1882        {"i915_sseu_status", i915_sseu_status, 0},
1883        {"i915_rps_boost_info", i915_rps_boost_info, 0},
1884};
1885#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1886
1887static const struct i915_debugfs_files {
1888        const char *name;
1889        const struct file_operations *fops;
1890} i915_debugfs_files[] = {
1891        {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
1892        {"i915_wedged", &i915_wedged_fops},
1893        {"i915_cache_sharing", &i915_cache_sharing_fops},
1894        {"i915_gem_drop_caches", &i915_drop_caches_fops},
1895#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
1896        {"i915_error_state", &i915_error_state_fops},
1897        {"i915_gpu_info", &i915_gpu_info_fops},
1898#endif
1899};
1900
1901void i915_debugfs_register(struct drm_i915_private *dev_priv)
1902{
1903        struct drm_minor *minor = dev_priv->drm.primary;
1904        int i;
1905
1906        i915_debugfs_params(dev_priv);
1907
1908        debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
1909                            to_i915(minor->dev), &i915_forcewake_fops);
1910        for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
1911                debugfs_create_file(i915_debugfs_files[i].name,
1912                                    S_IRUGO | S_IWUSR,
1913                                    minor->debugfs_root,
1914                                    to_i915(minor->dev),
1915                                    i915_debugfs_files[i].fops);
1916        }
1917
1918        drm_debugfs_create_files(i915_debugfs_list,
1919                                 I915_DEBUGFS_ENTRIES,
1920                                 minor->debugfs_root, minor);
1921}
1922