linux/drivers/gpu/drm/i915/intel_hangcheck.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include "i915_drv.h"
  26
  27static bool
  28ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
  29{
  30        if (INTEL_GEN(engine->i915) >= 8) {
  31                return (ipehr >> 23) == 0x1c;
  32        } else {
  33                ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
  34                return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
  35                                 MI_SEMAPHORE_REGISTER);
  36        }
  37}
  38
  39static struct intel_engine_cs *
  40semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
  41                                 u64 offset)
  42{
  43        struct drm_i915_private *dev_priv = engine->i915;
  44        struct intel_engine_cs *signaller;
  45        enum intel_engine_id id;
  46
  47        if (INTEL_GEN(dev_priv) >= 8) {
  48                for_each_engine(signaller, dev_priv, id) {
  49                        if (engine == signaller)
  50                                continue;
  51
  52                        if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
  53                                return signaller;
  54                }
  55        } else {
  56                u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
  57
  58                for_each_engine(signaller, dev_priv, id) {
  59                        if(engine == signaller)
  60                                continue;
  61
  62                        if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
  63                                return signaller;
  64                }
  65        }
  66
  67        DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
  68                         engine->name, ipehr, offset);
  69
  70        return ERR_PTR(-ENODEV);
  71}
  72
  73static struct intel_engine_cs *
  74semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
  75{
  76        struct drm_i915_private *dev_priv = engine->i915;
  77        void __iomem *vaddr;
  78        u32 cmd, ipehr, head;
  79        u64 offset = 0;
  80        int i, backwards;
  81
  82        /*
  83         * This function does not support execlist mode - any attempt to
  84         * proceed further into this function will result in a kernel panic
  85         * when dereferencing ring->buffer, which is not set up in execlist
  86         * mode.
  87         *
  88         * The correct way of doing it would be to derive the currently
  89         * executing ring buffer from the current context, which is derived
  90         * from the currently running request. Unfortunately, to get the
  91         * current request we would have to grab the struct_mutex before doing
  92         * anything else, which would be ill-advised since some other thread
  93         * might have grabbed it already and managed to hang itself, causing
  94         * the hang checker to deadlock.
  95         *
  96         * Therefore, this function does not support execlist mode in its
  97         * current form. Just return NULL and move on.
  98         */
  99        if (engine->buffer == NULL)
 100                return NULL;
 101
 102        ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
 103        if (!ipehr_is_semaphore_wait(engine, ipehr))
 104                return NULL;
 105
 106        /*
 107         * HEAD is likely pointing to the dword after the actual command,
 108         * so scan backwards until we find the MBOX. But limit it to just 3
 109         * or 4 dwords depending on the semaphore wait command size.
 110         * Note that we don't care about ACTHD here since that might
 111         * point at at batch, and semaphores are always emitted into the
 112         * ringbuffer itself.
 113         */
 114        head = I915_READ_HEAD(engine) & HEAD_ADDR;
 115        backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
 116        vaddr = (void __iomem *)engine->buffer->vaddr;
 117
 118        for (i = backwards; i; --i) {
 119                /*
 120                 * Be paranoid and presume the hw has gone off into the wild -
 121                 * our ring is smaller than what the hardware (and hence
 122                 * HEAD_ADDR) allows. Also handles wrap-around.
 123                 */
 124                head &= engine->buffer->size - 1;
 125
 126                /* This here seems to blow up */
 127                cmd = ioread32(vaddr + head);
 128                if (cmd == ipehr)
 129                        break;
 130
 131                head -= 4;
 132        }
 133
 134        if (!i)
 135                return NULL;
 136
 137        *seqno = ioread32(vaddr + head + 4) + 1;
 138        if (INTEL_GEN(dev_priv) >= 8) {
 139                offset = ioread32(vaddr + head + 12);
 140                offset <<= 32;
 141                offset |= ioread32(vaddr + head + 8);
 142        }
 143        return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
 144}
 145
 146static int semaphore_passed(struct intel_engine_cs *engine)
 147{
 148        struct drm_i915_private *dev_priv = engine->i915;
 149        struct intel_engine_cs *signaller;
 150        u32 seqno;
 151
 152        engine->hangcheck.deadlock++;
 153
 154        signaller = semaphore_waits_for(engine, &seqno);
 155        if (signaller == NULL)
 156                return -1;
 157
 158        if (IS_ERR(signaller))
 159                return 0;
 160
 161        /* Prevent pathological recursion due to driver bugs */
 162        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
 163                return -1;
 164
 165        if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
 166                return 1;
 167
 168        /* cursory check for an unkickable deadlock */
 169        if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
 170            semaphore_passed(signaller) < 0)
 171                return -1;
 172
 173        return 0;
 174}
 175
 176static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 177{
 178        struct intel_engine_cs *engine;
 179        enum intel_engine_id id;
 180
 181        for_each_engine(engine, dev_priv, id)
 182                engine->hangcheck.deadlock = 0;
 183}
 184
 185static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
 186{
 187        u32 tmp = current_instdone | *old_instdone;
 188        bool unchanged;
 189
 190        unchanged = tmp == *old_instdone;
 191        *old_instdone |= tmp;
 192
 193        return unchanged;
 194}
 195
 196static bool subunits_stuck(struct intel_engine_cs *engine)
 197{
 198        struct drm_i915_private *dev_priv = engine->i915;
 199        struct intel_instdone instdone;
 200        struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
 201        bool stuck;
 202        int slice;
 203        int subslice;
 204
 205        if (engine->id != RCS)
 206                return true;
 207
 208        intel_engine_get_instdone(engine, &instdone);
 209
 210        /* There might be unstable subunit states even when
 211         * actual head is not moving. Filter out the unstable ones by
 212         * accumulating the undone -> done transitions and only
 213         * consider those as progress.
 214         */
 215        stuck = instdone_unchanged(instdone.instdone,
 216                                   &accu_instdone->instdone);
 217        stuck &= instdone_unchanged(instdone.slice_common,
 218                                    &accu_instdone->slice_common);
 219
 220        for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
 221                stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
 222                                            &accu_instdone->sampler[slice][subslice]);
 223                stuck &= instdone_unchanged(instdone.row[slice][subslice],
 224                                            &accu_instdone->row[slice][subslice]);
 225        }
 226
 227        return stuck;
 228}
 229
 230static enum intel_engine_hangcheck_action
 231head_stuck(struct intel_engine_cs *engine, u64 acthd)
 232{
 233        if (acthd != engine->hangcheck.acthd) {
 234
 235                /* Clear subunit states on head movement */
 236                memset(&engine->hangcheck.instdone, 0,
 237                       sizeof(engine->hangcheck.instdone));
 238
 239                return ENGINE_ACTIVE_HEAD;
 240        }
 241
 242        if (!subunits_stuck(engine))
 243                return ENGINE_ACTIVE_SUBUNITS;
 244
 245        return ENGINE_DEAD;
 246}
 247
 248static enum intel_engine_hangcheck_action
 249engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 250{
 251        struct drm_i915_private *dev_priv = engine->i915;
 252        enum intel_engine_hangcheck_action ha;
 253        u32 tmp;
 254
 255        ha = head_stuck(engine, acthd);
 256        if (ha != ENGINE_DEAD)
 257                return ha;
 258
 259        if (IS_GEN2(dev_priv))
 260                return ENGINE_DEAD;
 261
 262        /* Is the chip hanging on a WAIT_FOR_EVENT?
 263         * If so we can simply poke the RB_WAIT bit
 264         * and break the hang. This should work on
 265         * all but the second generation chipsets.
 266         */
 267        tmp = I915_READ_CTL(engine);
 268        if (tmp & RING_WAIT) {
 269                i915_handle_error(dev_priv, 0,
 270                                  "Kicking stuck wait on %s",
 271                                  engine->name);
 272                I915_WRITE_CTL(engine, tmp);
 273                return ENGINE_WAIT_KICK;
 274        }
 275
 276        if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
 277                switch (semaphore_passed(engine)) {
 278                default:
 279                        return ENGINE_DEAD;
 280                case 1:
 281                        i915_handle_error(dev_priv, 0,
 282                                          "Kicking stuck semaphore on %s",
 283                                          engine->name);
 284                        I915_WRITE_CTL(engine, tmp);
 285                        return ENGINE_WAIT_KICK;
 286                case 0:
 287                        return ENGINE_WAIT;
 288                }
 289        }
 290
 291        return ENGINE_DEAD;
 292}
 293
 294static void hangcheck_load_sample(struct intel_engine_cs *engine,
 295                                  struct intel_engine_hangcheck *hc)
 296{
 297        /* We don't strictly need an irq-barrier here, as we are not
 298         * serving an interrupt request, be paranoid in case the
 299         * barrier has side-effects (such as preventing a broken
 300         * cacheline snoop) and so be sure that we can see the seqno
 301         * advance. If the seqno should stick, due to a stale
 302         * cacheline, we would erroneously declare the GPU hung.
 303         */
 304        if (engine->irq_seqno_barrier)
 305                engine->irq_seqno_barrier(engine);
 306
 307        hc->acthd = intel_engine_get_active_head(engine);
 308        hc->seqno = intel_engine_get_seqno(engine);
 309}
 310
 311static void hangcheck_store_sample(struct intel_engine_cs *engine,
 312                                   const struct intel_engine_hangcheck *hc)
 313{
 314        engine->hangcheck.acthd = hc->acthd;
 315        engine->hangcheck.seqno = hc->seqno;
 316        engine->hangcheck.action = hc->action;
 317        engine->hangcheck.stalled = hc->stalled;
 318}
 319
 320static enum intel_engine_hangcheck_action
 321hangcheck_get_action(struct intel_engine_cs *engine,
 322                     const struct intel_engine_hangcheck *hc)
 323{
 324        if (engine->hangcheck.seqno != hc->seqno)
 325                return ENGINE_ACTIVE_SEQNO;
 326
 327        if (intel_engine_is_idle(engine))
 328                return ENGINE_IDLE;
 329
 330        return engine_stuck(engine, hc->acthd);
 331}
 332
 333static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
 334                                        struct intel_engine_hangcheck *hc)
 335{
 336        unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
 337
 338        hc->action = hangcheck_get_action(engine, hc);
 339
 340        /* We always increment the progress
 341         * if the engine is busy and still processing
 342         * the same request, so that no single request
 343         * can run indefinitely (such as a chain of
 344         * batches). The only time we do not increment
 345         * the hangcheck score on this ring, if this
 346         * engine is in a legitimate wait for another
 347         * engine. In that case the waiting engine is a
 348         * victim and we want to be sure we catch the
 349         * right culprit. Then every time we do kick
 350         * the ring, make it as a progress as the seqno
 351         * advancement might ensure and if not, it
 352         * will catch the hanging engine.
 353         */
 354
 355        switch (hc->action) {
 356        case ENGINE_IDLE:
 357        case ENGINE_ACTIVE_SEQNO:
 358                /* Clear head and subunit states on seqno movement */
 359                hc->acthd = 0;
 360
 361                memset(&engine->hangcheck.instdone, 0,
 362                       sizeof(engine->hangcheck.instdone));
 363
 364                /* Intentional fall through */
 365        case ENGINE_WAIT_KICK:
 366        case ENGINE_WAIT:
 367                engine->hangcheck.action_timestamp = jiffies;
 368                break;
 369
 370        case ENGINE_ACTIVE_HEAD:
 371        case ENGINE_ACTIVE_SUBUNITS:
 372                /* Seqno stuck with still active engine gets leeway,
 373                 * in hopes that it is just a long shader.
 374                 */
 375                timeout = I915_SEQNO_DEAD_TIMEOUT;
 376                break;
 377
 378        case ENGINE_DEAD:
 379                break;
 380
 381        default:
 382                MISSING_CASE(hc->action);
 383        }
 384
 385        hc->stalled = time_after(jiffies,
 386                                 engine->hangcheck.action_timestamp + timeout);
 387}
 388
 389static void hangcheck_declare_hang(struct drm_i915_private *i915,
 390                                   unsigned int hung,
 391                                   unsigned int stuck)
 392{
 393        struct intel_engine_cs *engine;
 394        char msg[80];
 395        unsigned int tmp;
 396        int len;
 397
 398        /* If some rings hung but others were still busy, only
 399         * blame the hanging rings in the synopsis.
 400         */
 401        if (stuck != hung)
 402                hung &= ~stuck;
 403        len = scnprintf(msg, sizeof(msg),
 404                        "%s on ", stuck == hung ? "No progress" : "Hang");
 405        for_each_engine_masked(engine, i915, hung, tmp)
 406                len += scnprintf(msg + len, sizeof(msg) - len,
 407                                 "%s, ", engine->name);
 408        msg[len-2] = '\0';
 409
 410        return i915_handle_error(i915, hung, "%s", msg);
 411}
 412
 413/*
 414 * This is called when the chip hasn't reported back with completed
 415 * batchbuffers in a long time. We keep track per ring seqno progress and
 416 * if there are no progress, hangcheck score for that ring is increased.
 417 * Further, acthd is inspected to see if the ring is stuck. On stuck case
 418 * we kick the ring. If we see no progress on three subsequent calls
 419 * we assume chip is wedged and try to fix it by resetting the chip.
 420 */
 421static void i915_hangcheck_elapsed(struct work_struct *work)
 422{
 423        struct drm_i915_private *dev_priv =
 424                container_of(work, typeof(*dev_priv),
 425                             gpu_error.hangcheck_work.work);
 426        struct intel_engine_cs *engine;
 427        enum intel_engine_id id;
 428        unsigned int hung = 0, stuck = 0;
 429        int busy_count = 0;
 430
 431        if (!i915.enable_hangcheck)
 432                return;
 433
 434        if (!READ_ONCE(dev_priv->gt.awake))
 435                return;
 436
 437        if (i915_terminally_wedged(&dev_priv->gpu_error))
 438                return;
 439
 440        /* As enabling the GPU requires fairly extensive mmio access,
 441         * periodically arm the mmio checker to see if we are triggering
 442         * any invalid access.
 443         */
 444        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 445
 446        for_each_engine(engine, dev_priv, id) {
 447                struct intel_engine_hangcheck cur_state, *hc = &cur_state;
 448                const bool busy = intel_engine_has_waiter(engine);
 449
 450                semaphore_clear_deadlocks(dev_priv);
 451
 452                hangcheck_load_sample(engine, hc);
 453                hangcheck_accumulate_sample(engine, hc);
 454                hangcheck_store_sample(engine, hc);
 455
 456                if (engine->hangcheck.stalled) {
 457                        hung |= intel_engine_flag(engine);
 458                        if (hc->action != ENGINE_DEAD)
 459                                stuck |= intel_engine_flag(engine);
 460                }
 461
 462                busy_count += busy;
 463        }
 464
 465        if (hung)
 466                hangcheck_declare_hang(dev_priv, hung, stuck);
 467
 468        /* Reset timer in case GPU hangs without another request being added */
 469        if (busy_count)
 470                i915_queue_hangcheck(dev_priv);
 471}
 472
 473void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 474{
 475        memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
 476}
 477
 478void intel_hangcheck_init(struct drm_i915_private *i915)
 479{
 480        INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
 481                          i915_hangcheck_elapsed);
 482}
 483
 484#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 485#include "selftests/intel_hangcheck.c"
 486#endif
 487