linux/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
<<
>>
Prefs
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Dave Airlie
  30 */
  31#include <linux/seq_file.h>
  32#include <linux/atomic.h>
  33#include <linux/wait.h>
  34#include <linux/kref.h>
  35#include <linux/slab.h>
  36#include <linux/firmware.h>
  37#include <drm/drmP.h>
  38#include "amdgpu.h"
  39#include "amdgpu_trace.h"
  40
  41/*
  42 * Fences
  43 * Fences mark an event in the GPUs pipeline and are used
  44 * for GPU/CPU synchronization.  When the fence is written,
  45 * it is expected that all buffers associated with that fence
  46 * are no longer in use by the associated ring on the GPU and
  47 * that the the relevant GPU caches have been flushed.
  48 */
  49
  50struct amdgpu_fence {
  51        struct dma_fence base;
  52
  53        /* RB, DMA, etc. */
  54        struct amdgpu_ring              *ring;
  55};
  56
  57static struct kmem_cache *amdgpu_fence_slab;
  58
  59int amdgpu_fence_slab_init(void)
  60{
  61        amdgpu_fence_slab = kmem_cache_create(
  62                "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
  63                SLAB_HWCACHE_ALIGN, NULL);
  64        if (!amdgpu_fence_slab)
  65                return -ENOMEM;
  66        return 0;
  67}
  68
  69void amdgpu_fence_slab_fini(void)
  70{
  71        rcu_barrier();
  72        kmem_cache_destroy(amdgpu_fence_slab);
  73}
  74/*
  75 * Cast helper
  76 */
  77static const struct dma_fence_ops amdgpu_fence_ops;
  78static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
  79{
  80        struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
  81
  82        if (__f->base.ops == &amdgpu_fence_ops)
  83                return __f;
  84
  85        return NULL;
  86}
  87
  88/**
  89 * amdgpu_fence_write - write a fence value
  90 *
  91 * @ring: ring the fence is associated with
  92 * @seq: sequence number to write
  93 *
  94 * Writes a fence value to memory (all asics).
  95 */
  96static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
  97{
  98        struct amdgpu_fence_driver *drv = &ring->fence_drv;
  99
 100        if (drv->cpu_addr)
 101                *drv->cpu_addr = cpu_to_le32(seq);
 102}
 103
 104/**
 105 * amdgpu_fence_read - read a fence value
 106 *
 107 * @ring: ring the fence is associated with
 108 *
 109 * Reads a fence value from memory (all asics).
 110 * Returns the value of the fence read from memory.
 111 */
 112static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
 113{
 114        struct amdgpu_fence_driver *drv = &ring->fence_drv;
 115        u32 seq = 0;
 116
 117        if (drv->cpu_addr)
 118                seq = le32_to_cpu(*drv->cpu_addr);
 119        else
 120                seq = atomic_read(&drv->last_seq);
 121
 122        return seq;
 123}
 124
 125/**
 126 * amdgpu_fence_emit - emit a fence on the requested ring
 127 *
 128 * @ring: ring the fence is associated with
 129 * @f: resulting fence object
 130 *
 131 * Emits a fence command on the requested ring (all asics).
 132 * Returns 0 on success, -ENOMEM on failure.
 133 */
 134int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
 135                      unsigned flags)
 136{
 137        struct amdgpu_device *adev = ring->adev;
 138        struct amdgpu_fence *fence;
 139        struct dma_fence *old, **ptr;
 140        uint32_t seq;
 141
 142        fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
 143        if (fence == NULL)
 144                return -ENOMEM;
 145
 146        seq = ++ring->fence_drv.sync_seq;
 147        fence->ring = ring;
 148        dma_fence_init(&fence->base, &amdgpu_fence_ops,
 149                       &ring->fence_drv.lock,
 150                       adev->fence_context + ring->idx,
 151                       seq);
 152        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 153                               seq, flags | AMDGPU_FENCE_FLAG_INT);
 154
 155        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 156        /* This function can't be called concurrently anyway, otherwise
 157         * emitting the fence would mess up the hardware ring buffer.
 158         */
 159        old = rcu_dereference_protected(*ptr, 1);
 160        if (old && !dma_fence_is_signaled(old)) {
 161                DRM_INFO("rcu slot is busy\n");
 162                dma_fence_wait(old, false);
 163        }
 164
 165        rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
 166
 167        *f = &fence->base;
 168
 169        return 0;
 170}
 171
 172/**
 173 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
 174 *
 175 * @ring: ring the fence is associated with
 176 * @s: resulting sequence number
 177 *
 178 * Emits a fence command on the requested ring (all asics).
 179 * Used For polling fence.
 180 * Returns 0 on success, -ENOMEM on failure.
 181 */
 182int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
 183{
 184        uint32_t seq;
 185
 186        if (!s)
 187                return -EINVAL;
 188
 189        seq = ++ring->fence_drv.sync_seq;
 190        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 191                               seq, 0);
 192
 193        *s = seq;
 194
 195        return 0;
 196}
 197
 198/**
 199 * amdgpu_fence_schedule_fallback - schedule fallback check
 200 *
 201 * @ring: pointer to struct amdgpu_ring
 202 *
 203 * Start a timer as fallback to our interrupts.
 204 */
 205static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
 206{
 207        mod_timer(&ring->fence_drv.fallback_timer,
 208                  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
 209}
 210
 211/**
 212 * amdgpu_fence_process - check for fence activity
 213 *
 214 * @ring: pointer to struct amdgpu_ring
 215 *
 216 * Checks the current fence value and calculates the last
 217 * signalled fence value. Wakes the fence queue if the
 218 * sequence number has increased.
 219 */
 220void amdgpu_fence_process(struct amdgpu_ring *ring)
 221{
 222        struct amdgpu_fence_driver *drv = &ring->fence_drv;
 223        uint32_t seq, last_seq;
 224        int r;
 225
 226        do {
 227                last_seq = atomic_read(&ring->fence_drv.last_seq);
 228                seq = amdgpu_fence_read(ring);
 229
 230        } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
 231
 232        if (seq != ring->fence_drv.sync_seq)
 233                amdgpu_fence_schedule_fallback(ring);
 234
 235        if (unlikely(seq == last_seq))
 236                return;
 237
 238        last_seq &= drv->num_fences_mask;
 239        seq &= drv->num_fences_mask;
 240
 241        do {
 242                struct dma_fence *fence, **ptr;
 243
 244                ++last_seq;
 245                last_seq &= drv->num_fences_mask;
 246                ptr = &drv->fences[last_seq];
 247
 248                /* There is always exactly one thread signaling this fence slot */
 249                fence = rcu_dereference_protected(*ptr, 1);
 250                RCU_INIT_POINTER(*ptr, NULL);
 251
 252                if (!fence)
 253                        continue;
 254
 255                r = dma_fence_signal(fence);
 256                if (!r)
 257                        DMA_FENCE_TRACE(fence, "signaled from irq context\n");
 258                else
 259                        BUG();
 260
 261                dma_fence_put(fence);
 262        } while (last_seq != seq);
 263}
 264
 265/**
 266 * amdgpu_fence_fallback - fallback for hardware interrupts
 267 *
 268 * @work: delayed work item
 269 *
 270 * Checks for fence activity.
 271 */
 272static void amdgpu_fence_fallback(struct timer_list *t)
 273{
 274        struct amdgpu_ring *ring = from_timer(ring, t,
 275                                              fence_drv.fallback_timer);
 276
 277        amdgpu_fence_process(ring);
 278}
 279
 280/**
 281 * amdgpu_fence_wait_empty - wait for all fences to signal
 282 *
 283 * @adev: amdgpu device pointer
 284 * @ring: ring index the fence is associated with
 285 *
 286 * Wait for all fences on the requested ring to signal (all asics).
 287 * Returns 0 if the fences have passed, error for all other cases.
 288 */
 289int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 290{
 291        uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
 292        struct dma_fence *fence, **ptr;
 293        int r;
 294
 295        if (!seq)
 296                return 0;
 297
 298        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 299        rcu_read_lock();
 300        fence = rcu_dereference(*ptr);
 301        if (!fence || !dma_fence_get_rcu(fence)) {
 302                rcu_read_unlock();
 303                return 0;
 304        }
 305        rcu_read_unlock();
 306
 307        r = dma_fence_wait(fence, false);
 308        dma_fence_put(fence);
 309        return r;
 310}
 311
 312/**
 313 * amdgpu_fence_wait_polling - busy wait for givn sequence number
 314 *
 315 * @ring: ring index the fence is associated with
 316 * @wait_seq: sequence number to wait
 317 * @timeout: the timeout for waiting in usecs
 318 *
 319 * Wait for all fences on the requested ring to signal (all asics).
 320 * Returns left time if no timeout, 0 or minus if timeout.
 321 */
 322signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
 323                                      uint32_t wait_seq,
 324                                      signed long timeout)
 325{
 326        uint32_t seq;
 327
 328        do {
 329                seq = amdgpu_fence_read(ring);
 330                udelay(5);
 331                timeout -= 5;
 332        } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
 333
 334        return timeout > 0 ? timeout : 0;
 335}
 336/**
 337 * amdgpu_fence_count_emitted - get the count of emitted fences
 338 *
 339 * @ring: ring the fence is associated with
 340 *
 341 * Get the number of fences emitted on the requested ring (all asics).
 342 * Returns the number of emitted fences on the ring.  Used by the
 343 * dynpm code to ring track activity.
 344 */
 345unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
 346{
 347        uint64_t emitted;
 348
 349        /* We are not protected by ring lock when reading the last sequence
 350         * but it's ok to report slightly wrong fence count here.
 351         */
 352        amdgpu_fence_process(ring);
 353        emitted = 0x100000000ull;
 354        emitted -= atomic_read(&ring->fence_drv.last_seq);
 355        emitted += READ_ONCE(ring->fence_drv.sync_seq);
 356        return lower_32_bits(emitted);
 357}
 358
 359/**
 360 * amdgpu_fence_driver_start_ring - make the fence driver
 361 * ready for use on the requested ring.
 362 *
 363 * @ring: ring to start the fence driver on
 364 * @irq_src: interrupt source to use for this ring
 365 * @irq_type: interrupt type to use for this ring
 366 *
 367 * Make the fence driver ready for processing (all asics).
 368 * Not all asics have all rings, so each asic will only
 369 * start the fence driver on the rings it has.
 370 * Returns 0 for success, errors for failure.
 371 */
 372int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 373                                   struct amdgpu_irq_src *irq_src,
 374                                   unsigned irq_type)
 375{
 376        struct amdgpu_device *adev = ring->adev;
 377        uint64_t index;
 378
 379        if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
 380                ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
 381                ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
 382        } else {
 383                /* put fence directly behind firmware */
 384                index = ALIGN(adev->uvd.fw->size, 8);
 385                ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
 386                ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
 387        }
 388        amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
 389        amdgpu_irq_get(adev, irq_src, irq_type);
 390
 391        ring->fence_drv.irq_src = irq_src;
 392        ring->fence_drv.irq_type = irq_type;
 393        ring->fence_drv.initialized = true;
 394
 395        dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
 396                "cpu addr 0x%p\n", ring->idx,
 397                ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
 398        return 0;
 399}
 400
 401/**
 402 * amdgpu_fence_driver_init_ring - init the fence driver
 403 * for the requested ring.
 404 *
 405 * @ring: ring to init the fence driver on
 406 * @num_hw_submission: number of entries on the hardware queue
 407 *
 408 * Init the fence driver for the requested ring (all asics).
 409 * Helper function for amdgpu_fence_driver_init().
 410 */
 411int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 412                                  unsigned num_hw_submission)
 413{
 414        long timeout;
 415        int r;
 416
 417        /* Check that num_hw_submission is a power of two */
 418        if ((num_hw_submission & (num_hw_submission - 1)) != 0)
 419                return -EINVAL;
 420
 421        ring->fence_drv.cpu_addr = NULL;
 422        ring->fence_drv.gpu_addr = 0;
 423        ring->fence_drv.sync_seq = 0;
 424        atomic_set(&ring->fence_drv.last_seq, 0);
 425        ring->fence_drv.initialized = false;
 426
 427        timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
 428
 429        ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
 430        spin_lock_init(&ring->fence_drv.lock);
 431        ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
 432                                         GFP_KERNEL);
 433        if (!ring->fence_drv.fences)
 434                return -ENOMEM;
 435
 436        /* No need to setup the GPU scheduler for KIQ ring */
 437        if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
 438                /* for non-sriov case, no timeout enforce on compute ring */
 439                if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
 440                                && !amdgpu_sriov_vf(ring->adev))
 441                        timeout = MAX_SCHEDULE_TIMEOUT;
 442                else
 443                        timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
 444
 445                r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
 446                                   num_hw_submission, amdgpu_job_hang_limit,
 447                                   timeout, ring->name);
 448                if (r) {
 449                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
 450                                  ring->name);
 451                        return r;
 452                }
 453        }
 454
 455        return 0;
 456}
 457
 458/**
 459 * amdgpu_fence_driver_init - init the fence driver
 460 * for all possible rings.
 461 *
 462 * @adev: amdgpu device pointer
 463 *
 464 * Init the fence driver for all possible rings (all asics).
 465 * Not all asics have all rings, so each asic will only
 466 * start the fence driver on the rings it has using
 467 * amdgpu_fence_driver_start_ring().
 468 * Returns 0 for success.
 469 */
 470int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 471{
 472        if (amdgpu_debugfs_fence_init(adev))
 473                dev_err(adev->dev, "fence debugfs file creation failed\n");
 474
 475        return 0;
 476}
 477
 478/**
 479 * amdgpu_fence_driver_fini - tear down the fence driver
 480 * for all possible rings.
 481 *
 482 * @adev: amdgpu device pointer
 483 *
 484 * Tear down the fence driver for all possible rings (all asics).
 485 */
 486void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 487{
 488        unsigned i, j;
 489        int r;
 490
 491        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 492                struct amdgpu_ring *ring = adev->rings[i];
 493
 494                if (!ring || !ring->fence_drv.initialized)
 495                        continue;
 496                r = amdgpu_fence_wait_empty(ring);
 497                if (r) {
 498                        /* no need to trigger GPU reset as we are unloading */
 499                        amdgpu_fence_driver_force_completion(ring);
 500                }
 501                amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 502                               ring->fence_drv.irq_type);
 503                drm_sched_fini(&ring->sched);
 504                del_timer_sync(&ring->fence_drv.fallback_timer);
 505                for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
 506                        dma_fence_put(ring->fence_drv.fences[j]);
 507                kfree(ring->fence_drv.fences);
 508                ring->fence_drv.fences = NULL;
 509                ring->fence_drv.initialized = false;
 510        }
 511}
 512
 513/**
 514 * amdgpu_fence_driver_suspend - suspend the fence driver
 515 * for all possible rings.
 516 *
 517 * @adev: amdgpu device pointer
 518 *
 519 * Suspend the fence driver for all possible rings (all asics).
 520 */
 521void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
 522{
 523        int i, r;
 524
 525        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 526                struct amdgpu_ring *ring = adev->rings[i];
 527                if (!ring || !ring->fence_drv.initialized)
 528                        continue;
 529
 530                /* wait for gpu to finish processing current batch */
 531                r = amdgpu_fence_wait_empty(ring);
 532                if (r) {
 533                        /* delay GPU reset to resume */
 534                        amdgpu_fence_driver_force_completion(ring);
 535                }
 536
 537                /* disable the interrupt */
 538                amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 539                               ring->fence_drv.irq_type);
 540        }
 541}
 542
 543/**
 544 * amdgpu_fence_driver_resume - resume the fence driver
 545 * for all possible rings.
 546 *
 547 * @adev: amdgpu device pointer
 548 *
 549 * Resume the fence driver for all possible rings (all asics).
 550 * Not all asics have all rings, so each asic will only
 551 * start the fence driver on the rings it has using
 552 * amdgpu_fence_driver_start_ring().
 553 * Returns 0 for success.
 554 */
 555void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
 556{
 557        int i;
 558
 559        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 560                struct amdgpu_ring *ring = adev->rings[i];
 561                if (!ring || !ring->fence_drv.initialized)
 562                        continue;
 563
 564                /* enable the interrupt */
 565                amdgpu_irq_get(adev, ring->fence_drv.irq_src,
 566                               ring->fence_drv.irq_type);
 567        }
 568}
 569
 570/**
 571 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
 572 *
 573 * @ring: fence of the ring to signal
 574 *
 575 */
 576void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
 577{
 578        amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
 579        amdgpu_fence_process(ring);
 580}
 581
 582/*
 583 * Common fence implementation
 584 */
 585
 586static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
 587{
 588        return "amdgpu";
 589}
 590
 591static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
 592{
 593        struct amdgpu_fence *fence = to_amdgpu_fence(f);
 594        return (const char *)fence->ring->name;
 595}
 596
 597/**
 598 * amdgpu_fence_enable_signaling - enable signalling on fence
 599 * @fence: fence
 600 *
 601 * This function is called with fence_queue lock held, and adds a callback
 602 * to fence_queue that checks if this fence is signaled, and if so it
 603 * signals the fence and removes itself.
 604 */
 605static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 606{
 607        struct amdgpu_fence *fence = to_amdgpu_fence(f);
 608        struct amdgpu_ring *ring = fence->ring;
 609
 610        if (!timer_pending(&ring->fence_drv.fallback_timer))
 611                amdgpu_fence_schedule_fallback(ring);
 612
 613        DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 614
 615        return true;
 616}
 617
 618/**
 619 * amdgpu_fence_free - free up the fence memory
 620 *
 621 * @rcu: RCU callback head
 622 *
 623 * Free up the fence memory after the RCU grace period.
 624 */
 625static void amdgpu_fence_free(struct rcu_head *rcu)
 626{
 627        struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 628        struct amdgpu_fence *fence = to_amdgpu_fence(f);
 629        kmem_cache_free(amdgpu_fence_slab, fence);
 630}
 631
 632/**
 633 * amdgpu_fence_release - callback that fence can be freed
 634 *
 635 * @fence: fence
 636 *
 637 * This function is called when the reference count becomes zero.
 638 * It just RCU schedules freeing up the fence.
 639 */
 640static void amdgpu_fence_release(struct dma_fence *f)
 641{
 642        call_rcu(&f->rcu, amdgpu_fence_free);
 643}
 644
 645static const struct dma_fence_ops amdgpu_fence_ops = {
 646        .get_driver_name = amdgpu_fence_get_driver_name,
 647        .get_timeline_name = amdgpu_fence_get_timeline_name,
 648        .enable_signaling = amdgpu_fence_enable_signaling,
 649        .release = amdgpu_fence_release,
 650};
 651
 652/*
 653 * Fence debugfs
 654 */
 655#if defined(CONFIG_DEBUG_FS)
 656static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
 657{
 658        struct drm_info_node *node = (struct drm_info_node *)m->private;
 659        struct drm_device *dev = node->minor->dev;
 660        struct amdgpu_device *adev = dev->dev_private;
 661        int i;
 662
 663        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 664                struct amdgpu_ring *ring = adev->rings[i];
 665                if (!ring || !ring->fence_drv.initialized)
 666                        continue;
 667
 668                amdgpu_fence_process(ring);
 669
 670                seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
 671                seq_printf(m, "Last signaled fence 0x%08x\n",
 672                           atomic_read(&ring->fence_drv.last_seq));
 673                seq_printf(m, "Last emitted        0x%08x\n",
 674                           ring->fence_drv.sync_seq);
 675
 676                if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
 677                        continue;
 678
 679                /* set in CP_VMID_PREEMPT and preemption occurred */
 680                seq_printf(m, "Last preempted      0x%08x\n",
 681                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
 682                /* set in CP_VMID_RESET and reset occurred */
 683                seq_printf(m, "Last reset          0x%08x\n",
 684                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
 685                /* Both preemption and reset occurred */
 686                seq_printf(m, "Last both           0x%08x\n",
 687                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
 688        }
 689        return 0;
 690}
 691
 692/**
 693 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
 694 *
 695 * Manually trigger a gpu reset at the next fence wait.
 696 */
 697static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
 698{
 699        struct drm_info_node *node = (struct drm_info_node *) m->private;
 700        struct drm_device *dev = node->minor->dev;
 701        struct amdgpu_device *adev = dev->dev_private;
 702
 703        seq_printf(m, "gpu recover\n");
 704        amdgpu_device_gpu_recover(adev, NULL, true);
 705
 706        return 0;
 707}
 708
 709static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
 710        {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
 711        {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
 712};
 713
 714static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
 715        {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
 716};
 717#endif
 718
 719int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
 720{
 721#if defined(CONFIG_DEBUG_FS)
 722        if (amdgpu_sriov_vf(adev))
 723                return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
 724        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
 725#else
 726        return 0;
 727#endif
 728}
 729
 730