linux/drivers/gpu/drm/i915/selftests/intel_uncore.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include "../i915_selftest.h"
  26
  27static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
  28                                unsigned int num_ranges,
  29                                bool is_watertight)
  30{
  31        unsigned int i;
  32        s32 prev;
  33
  34        for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
  35                /* Check that the table is watertight */
  36                if (is_watertight && (prev + 1) != (s32)ranges->start) {
  37                        pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
  38                               __func__, i, ranges->start, ranges->end, prev);
  39                        return -EINVAL;
  40                }
  41
  42                /* Check that the table never goes backwards */
  43                if (prev >= (s32)ranges->start) {
  44                        pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
  45                               __func__, i, ranges->start, ranges->end, prev);
  46                        return -EINVAL;
  47                }
  48
  49                /* Check that the entry is valid */
  50                if (ranges->start >= ranges->end) {
  51                        pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
  52                               __func__, i, ranges->start, ranges->end);
  53                        return -EINVAL;
  54                }
  55
  56                prev = ranges->end;
  57        }
  58
  59        return 0;
  60}
  61
  62static int intel_shadow_table_check(void)
  63{
  64        struct {
  65                const i915_reg_t *regs;
  66                unsigned int size;
  67        } reg_lists[] = {
  68                { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
  69                { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
  70                { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
  71                { xehp_shadowed_regs, ARRAY_SIZE(xehp_shadowed_regs) },
  72        };
  73        const i915_reg_t *reg;
  74        unsigned int i, j;
  75        s32 prev;
  76
  77        for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
  78                reg = reg_lists[j].regs;
  79                for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
  80                        u32 offset = i915_mmio_reg_offset(*reg);
  81
  82                        if (prev >= (s32)offset) {
  83                                pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
  84                                       __func__, i, offset, prev);
  85                                return -EINVAL;
  86                        }
  87
  88                        prev = offset;
  89                }
  90        }
  91
  92        return 0;
  93}
  94
  95int intel_uncore_mock_selftests(void)
  96{
  97        struct {
  98                const struct intel_forcewake_range *ranges;
  99                unsigned int num_ranges;
 100                bool is_watertight;
 101        } fw[] = {
 102                { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
 103                { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
 104                { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
 105                { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
 106                { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
 107                { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
 108        };
 109        int err, i;
 110
 111        for (i = 0; i < ARRAY_SIZE(fw); i++) {
 112                err = intel_fw_table_check(fw[i].ranges,
 113                                           fw[i].num_ranges,
 114                                           fw[i].is_watertight);
 115                if (err)
 116                        return err;
 117        }
 118
 119        err = intel_shadow_table_check();
 120        if (err)
 121                return err;
 122
 123        return 0;
 124}
 125
 126static int live_forcewake_ops(void *arg)
 127{
 128        static const struct reg {
 129                const char *name;
 130                u8 min_graphics_ver;
 131                u8 max_graphics_ver;
 132                unsigned long platforms;
 133                unsigned int offset;
 134        } registers[] = {
 135                {
 136                        "RING_START",
 137                        6, 7,
 138                        0x38,
 139                },
 140                {
 141                        "RING_MI_MODE",
 142                        8, U8_MAX,
 143                        0x9c,
 144                }
 145        };
 146        const struct reg *r;
 147        struct intel_gt *gt = arg;
 148        struct intel_uncore_forcewake_domain *domain;
 149        struct intel_uncore *uncore = gt->uncore;
 150        struct intel_engine_cs *engine;
 151        enum intel_engine_id id;
 152        intel_wakeref_t wakeref;
 153        unsigned int tmp;
 154        int err = 0;
 155
 156        GEM_BUG_ON(gt->awake);
 157
 158        /* vlv/chv with their pcu behave differently wrt reads */
 159        if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
 160                pr_debug("PCU fakes forcewake badly; skipping\n");
 161                return 0;
 162        }
 163
 164        /*
 165         * Not quite as reliable across the gen as one would hope.
 166         *
 167         * Either our theory of operation is incorrect, or there remain
 168         * external parties interfering with the powerwells.
 169         *
 170         * https://bugs.freedesktop.org/show_bug.cgi?id=110210
 171         */
 172        if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
 173                return 0;
 174
 175        /* We have to pick carefully to get the exact behaviour we need */
 176        for (r = registers; r->name; r++)
 177                if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
 178                        break;
 179        if (!r->name) {
 180                pr_debug("Forcewaked register not known for %s; skipping\n",
 181                         intel_platform_name(INTEL_INFO(gt->i915)->platform));
 182                return 0;
 183        }
 184
 185        wakeref = intel_runtime_pm_get(uncore->rpm);
 186
 187        for_each_fw_domain(domain, uncore, tmp) {
 188                smp_store_mb(domain->active, false);
 189                if (!hrtimer_cancel(&domain->timer))
 190                        continue;
 191
 192                intel_uncore_fw_release_timer(&domain->timer);
 193        }
 194
 195        for_each_engine(engine, gt, id) {
 196                i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
 197                u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
 198                enum forcewake_domains fw_domains;
 199                u32 val;
 200
 201                if (!engine->default_state)
 202                        continue;
 203
 204                fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
 205                                                            FW_REG_READ);
 206                if (!fw_domains)
 207                        continue;
 208
 209                for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 210                        if (!domain->wake_count)
 211                                continue;
 212
 213                        pr_err("fw_domain %s still active, aborting test!\n",
 214                               intel_uncore_forcewake_domain_to_str(domain->id));
 215                        err = -EINVAL;
 216                        goto out_rpm;
 217                }
 218
 219                intel_uncore_forcewake_get(uncore, fw_domains);
 220                val = readl(reg);
 221                intel_uncore_forcewake_put(uncore, fw_domains);
 222
 223                /* Flush the forcewake release (delayed onto a timer) */
 224                for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 225                        smp_store_mb(domain->active, false);
 226                        if (hrtimer_cancel(&domain->timer))
 227                                intel_uncore_fw_release_timer(&domain->timer);
 228
 229                        preempt_disable();
 230                        err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
 231                        preempt_enable();
 232                        if (err) {
 233                                pr_err("Failed to clear fw_domain %s\n",
 234                                       intel_uncore_forcewake_domain_to_str(domain->id));
 235                                goto out_rpm;
 236                        }
 237                }
 238
 239                if (!val) {
 240                        pr_err("%s:%s was zero while fw was held!\n",
 241                               engine->name, r->name);
 242                        err = -EINVAL;
 243                        goto out_rpm;
 244                }
 245
 246                /* We then expect the read to return 0 outside of the fw */
 247                if (wait_for(readl(reg) == 0, 100)) {
 248                        pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
 249                               engine->name, r->name, readl(reg), fw_domains);
 250                        err = -ETIMEDOUT;
 251                        goto out_rpm;
 252                }
 253        }
 254
 255out_rpm:
 256        intel_runtime_pm_put(uncore->rpm, wakeref);
 257        return err;
 258}
 259
 260static int live_forcewake_domains(void *arg)
 261{
 262#define FW_RANGE 0x40000
 263        struct intel_gt *gt = arg;
 264        struct intel_uncore *uncore = gt->uncore;
 265        unsigned long *valid;
 266        u32 offset;
 267        int err;
 268
 269        if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
 270            !IS_VALLEYVIEW(gt->i915) &&
 271            !IS_CHERRYVIEW(gt->i915))
 272                return 0;
 273
 274        /*
 275         * This test may lockup the machine or cause GPU hangs afterwards.
 276         */
 277        if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
 278                return 0;
 279
 280        valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
 281        if (!valid)
 282                return -ENOMEM;
 283
 284        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 285
 286        check_for_unclaimed_mmio(uncore);
 287        for (offset = 0; offset < FW_RANGE; offset += 4) {
 288                i915_reg_t reg = { offset };
 289
 290                intel_uncore_posting_read_fw(uncore, reg);
 291                if (!check_for_unclaimed_mmio(uncore))
 292                        set_bit(offset, valid);
 293        }
 294
 295        intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 296
 297        err = 0;
 298        for_each_set_bit(offset, valid, FW_RANGE) {
 299                i915_reg_t reg = { offset };
 300
 301                iosf_mbi_punit_acquire();
 302                intel_uncore_forcewake_reset(uncore);
 303                iosf_mbi_punit_release();
 304
 305                check_for_unclaimed_mmio(uncore);
 306
 307                intel_uncore_posting_read_fw(uncore, reg);
 308                if (check_for_unclaimed_mmio(uncore)) {
 309                        pr_err("Unclaimed mmio read to register 0x%04x\n",
 310                               offset);
 311                        err = -EINVAL;
 312                }
 313        }
 314
 315        bitmap_free(valid);
 316        return err;
 317}
 318
 319static int live_fw_table(void *arg)
 320{
 321        struct intel_gt *gt = arg;
 322
 323        /* Confirm the table we load is still valid */
 324        return intel_fw_table_check(gt->uncore->fw_domains_table,
 325                                    gt->uncore->fw_domains_table_entries,
 326                                    GRAPHICS_VER(gt->i915) >= 9);
 327}
 328
 329int intel_uncore_live_selftests(struct drm_i915_private *i915)
 330{
 331        static const struct i915_subtest tests[] = {
 332                SUBTEST(live_fw_table),
 333                SUBTEST(live_forcewake_ops),
 334                SUBTEST(live_forcewake_domains),
 335        };
 336
 337        return intel_gt_live_subtests(tests, &i915->gt);
 338}
 339