linux/drivers/gpu/drm/i915/selftests/intel_uncore.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include "../i915_selftest.h"
  26
  27static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
  28                                unsigned int num_ranges,
  29                                bool is_watertight)
  30{
  31        unsigned int i;
  32        s32 prev;
  33
  34        for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
  35                /* Check that the table is watertight */
  36                if (is_watertight && (prev + 1) != (s32)ranges->start) {
  37                        pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
  38                               __func__, i, ranges->start, ranges->end, prev);
  39                        return -EINVAL;
  40                }
  41
  42                /* Check that the table never goes backwards */
  43                if (prev >= (s32)ranges->start) {
  44                        pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
  45                               __func__, i, ranges->start, ranges->end, prev);
  46                        return -EINVAL;
  47                }
  48
  49                /* Check that the entry is valid */
  50                if (ranges->start >= ranges->end) {
  51                        pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
  52                               __func__, i, ranges->start, ranges->end);
  53                        return -EINVAL;
  54                }
  55
  56                prev = ranges->end;
  57        }
  58
  59        return 0;
  60}
  61
  62static int intel_shadow_table_check(void)
  63{
  64        struct {
  65                const struct i915_range *regs;
  66                unsigned int size;
  67        } range_lists[] = {
  68                { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
  69                { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
  70                { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
  71                { dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
  72        };
  73        const struct i915_range *range;
  74        unsigned int i, j;
  75        s32 prev;
  76
  77        for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
  78                range = range_lists[j].regs;
  79                for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
  80                        if (range->end < range->start) {
  81                                pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
  82                                       __func__, i, range->start, range->end);
  83                                return -EINVAL;
  84                        }
  85
  86                        if (prev >= (s32)range->start) {
  87                                pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
  88                                       __func__, i, range->start, range->end, prev);
  89                                return -EINVAL;
  90                        }
  91
  92                        if (range->start % 4) {
  93                                pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
  94                                       __func__, i, range->start, range->end);
  95                                return -EINVAL;
  96                        }
  97
  98                        prev = range->end;
  99                }
 100        }
 101
 102        return 0;
 103}
 104
 105int intel_uncore_mock_selftests(void)
 106{
 107        struct {
 108                const struct intel_forcewake_range *ranges;
 109                unsigned int num_ranges;
 110                bool is_watertight;
 111        } fw[] = {
 112                { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
 113                { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
 114                { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
 115                { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
 116                { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
 117                { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
 118        };
 119        int err, i;
 120
 121        for (i = 0; i < ARRAY_SIZE(fw); i++) {
 122                err = intel_fw_table_check(fw[i].ranges,
 123                                           fw[i].num_ranges,
 124                                           fw[i].is_watertight);
 125                if (err)
 126                        return err;
 127        }
 128
 129        err = intel_shadow_table_check();
 130        if (err)
 131                return err;
 132
 133        return 0;
 134}
 135
 136static int live_forcewake_ops(void *arg)
 137{
 138        static const struct reg {
 139                const char *name;
 140                u8 min_graphics_ver;
 141                u8 max_graphics_ver;
 142                unsigned long platforms;
 143                unsigned int offset;
 144        } registers[] = {
 145                {
 146                        "RING_START",
 147                        6, 7,
 148                        0x38,
 149                },
 150                {
 151                        "RING_MI_MODE",
 152                        8, U8_MAX,
 153                        0x9c,
 154                }
 155        };
 156        const struct reg *r;
 157        struct intel_gt *gt = arg;
 158        struct intel_uncore_forcewake_domain *domain;
 159        struct intel_uncore *uncore = gt->uncore;
 160        struct intel_engine_cs *engine;
 161        enum intel_engine_id id;
 162        intel_wakeref_t wakeref;
 163        unsigned int tmp;
 164        int err = 0;
 165
 166        GEM_BUG_ON(gt->awake);
 167
 168        /* vlv/chv with their pcu behave differently wrt reads */
 169        if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
 170                pr_debug("PCU fakes forcewake badly; skipping\n");
 171                return 0;
 172        }
 173
 174        /*
 175         * Not quite as reliable across the gen as one would hope.
 176         *
 177         * Either our theory of operation is incorrect, or there remain
 178         * external parties interfering with the powerwells.
 179         *
 180         * https://bugs.freedesktop.org/show_bug.cgi?id=110210
 181         */
 182        if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
 183                return 0;
 184
 185        /* We have to pick carefully to get the exact behaviour we need */
 186        for (r = registers; r->name; r++)
 187                if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
 188                        break;
 189        if (!r->name) {
 190                pr_debug("Forcewaked register not known for %s; skipping\n",
 191                         intel_platform_name(INTEL_INFO(gt->i915)->platform));
 192                return 0;
 193        }
 194
 195        wakeref = intel_runtime_pm_get(uncore->rpm);
 196
 197        for_each_fw_domain(domain, uncore, tmp) {
 198                smp_store_mb(domain->active, false);
 199                if (!hrtimer_cancel(&domain->timer))
 200                        continue;
 201
 202                intel_uncore_fw_release_timer(&domain->timer);
 203        }
 204
 205        for_each_engine(engine, gt, id) {
 206                i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
 207                u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
 208                enum forcewake_domains fw_domains;
 209                u32 val;
 210
 211                if (!engine->default_state)
 212                        continue;
 213
 214                fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
 215                                                            FW_REG_READ);
 216                if (!fw_domains)
 217                        continue;
 218
 219                for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 220                        if (!domain->wake_count)
 221                                continue;
 222
 223                        pr_err("fw_domain %s still active, aborting test!\n",
 224                               intel_uncore_forcewake_domain_to_str(domain->id));
 225                        err = -EINVAL;
 226                        goto out_rpm;
 227                }
 228
 229                intel_uncore_forcewake_get(uncore, fw_domains);
 230                val = readl(reg);
 231                intel_uncore_forcewake_put(uncore, fw_domains);
 232
 233                /* Flush the forcewake release (delayed onto a timer) */
 234                for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 235                        smp_store_mb(domain->active, false);
 236                        if (hrtimer_cancel(&domain->timer))
 237                                intel_uncore_fw_release_timer(&domain->timer);
 238
 239                        preempt_disable();
 240                        err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
 241                        preempt_enable();
 242                        if (err) {
 243                                pr_err("Failed to clear fw_domain %s\n",
 244                                       intel_uncore_forcewake_domain_to_str(domain->id));
 245                                goto out_rpm;
 246                        }
 247                }
 248
 249                if (!val) {
 250                        pr_err("%s:%s was zero while fw was held!\n",
 251                               engine->name, r->name);
 252                        err = -EINVAL;
 253                        goto out_rpm;
 254                }
 255
 256                /* We then expect the read to return 0 outside of the fw */
 257                if (wait_for(readl(reg) == 0, 100)) {
 258                        pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
 259                               engine->name, r->name, readl(reg), fw_domains);
 260                        err = -ETIMEDOUT;
 261                        goto out_rpm;
 262                }
 263        }
 264
 265out_rpm:
 266        intel_runtime_pm_put(uncore->rpm, wakeref);
 267        return err;
 268}
 269
 270static int live_forcewake_domains(void *arg)
 271{
 272#define FW_RANGE 0x40000
 273        struct intel_gt *gt = arg;
 274        struct intel_uncore *uncore = gt->uncore;
 275        unsigned long *valid;
 276        u32 offset;
 277        int err;
 278
 279        if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
 280            !IS_VALLEYVIEW(gt->i915) &&
 281            !IS_CHERRYVIEW(gt->i915))
 282                return 0;
 283
 284        /*
 285         * This test may lockup the machine or cause GPU hangs afterwards.
 286         */
 287        if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
 288                return 0;
 289
 290        valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
 291        if (!valid)
 292                return -ENOMEM;
 293
 294        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 295
 296        check_for_unclaimed_mmio(uncore);
 297        for (offset = 0; offset < FW_RANGE; offset += 4) {
 298                i915_reg_t reg = { offset };
 299
 300                intel_uncore_posting_read_fw(uncore, reg);
 301                if (!check_for_unclaimed_mmio(uncore))
 302                        set_bit(offset, valid);
 303        }
 304
 305        intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 306
 307        err = 0;
 308        for_each_set_bit(offset, valid, FW_RANGE) {
 309                i915_reg_t reg = { offset };
 310
 311                iosf_mbi_punit_acquire();
 312                intel_uncore_forcewake_reset(uncore);
 313                iosf_mbi_punit_release();
 314
 315                check_for_unclaimed_mmio(uncore);
 316
 317                intel_uncore_posting_read_fw(uncore, reg);
 318                if (check_for_unclaimed_mmio(uncore)) {
 319                        pr_err("Unclaimed mmio read to register 0x%04x\n",
 320                               offset);
 321                        err = -EINVAL;
 322                }
 323        }
 324
 325        bitmap_free(valid);
 326        return err;
 327}
 328
 329static int live_fw_table(void *arg)
 330{
 331        struct intel_gt *gt = arg;
 332
 333        /* Confirm the table we load is still valid */
 334        return intel_fw_table_check(gt->uncore->fw_domains_table,
 335                                    gt->uncore->fw_domains_table_entries,
 336                                    GRAPHICS_VER(gt->i915) >= 9);
 337}
 338
 339int intel_uncore_live_selftests(struct drm_i915_private *i915)
 340{
 341        static const struct i915_subtest tests[] = {
 342                SUBTEST(live_fw_table),
 343                SUBTEST(live_forcewake_ops),
 344                SUBTEST(live_forcewake_domains),
 345        };
 346
 347        return intel_gt_live_subtests(tests, to_gt(i915));
 348}
 349