linux/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
<<
>>
Prefs
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Dave Airlie
  30 */
  31#include <linux/seq_file.h>
  32#include <linux/atomic.h>
  33#include <linux/wait.h>
  34#include <linux/kref.h>
  35#include <linux/slab.h>
  36#include <linux/firmware.h>
  37#include <linux/pm_runtime.h>
  38
  39#include <drm/drm_debugfs.h>
  40
  41#include "amdgpu.h"
  42#include "amdgpu_trace.h"
  43
  44/*
  45 * Fences
  46 * Fences mark an event in the GPUs pipeline and are used
  47 * for GPU/CPU synchronization.  When the fence is written,
  48 * it is expected that all buffers associated with that fence
  49 * are no longer in use by the associated ring on the GPU and
  50 * that the the relevant GPU caches have been flushed.
  51 */
  52
  53struct amdgpu_fence {
  54        struct dma_fence base;
  55
  56        /* RB, DMA, etc. */
  57        struct amdgpu_ring              *ring;
  58};
  59
  60static struct kmem_cache *amdgpu_fence_slab;
  61
  62int amdgpu_fence_slab_init(void)
  63{
  64        amdgpu_fence_slab = kmem_cache_create(
  65                "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
  66                SLAB_HWCACHE_ALIGN, NULL);
  67        if (!amdgpu_fence_slab)
  68                return -ENOMEM;
  69        return 0;
  70}
  71
  72void amdgpu_fence_slab_fini(void)
  73{
  74        rcu_barrier();
  75        kmem_cache_destroy(amdgpu_fence_slab);
  76}
  77/*
  78 * Cast helper
  79 */
  80static const struct dma_fence_ops amdgpu_fence_ops;
  81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
  82{
  83        struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
  84
  85        if (__f->base.ops == &amdgpu_fence_ops)
  86                return __f;
  87
  88        return NULL;
  89}
  90
  91/**
  92 * amdgpu_fence_write - write a fence value
  93 *
  94 * @ring: ring the fence is associated with
  95 * @seq: sequence number to write
  96 *
  97 * Writes a fence value to memory (all asics).
  98 */
  99static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
 100{
 101        struct amdgpu_fence_driver *drv = &ring->fence_drv;
 102
 103        if (drv->cpu_addr)
 104                *drv->cpu_addr = cpu_to_le32(seq);
 105}
 106
 107/**
 108 * amdgpu_fence_read - read a fence value
 109 *
 110 * @ring: ring the fence is associated with
 111 *
 112 * Reads a fence value from memory (all asics).
 113 * Returns the value of the fence read from memory.
 114 */
 115static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
 116{
 117        struct amdgpu_fence_driver *drv = &ring->fence_drv;
 118        u32 seq = 0;
 119
 120        if (drv->cpu_addr)
 121                seq = le32_to_cpu(*drv->cpu_addr);
 122        else
 123                seq = atomic_read(&drv->last_seq);
 124
 125        return seq;
 126}
 127
 128/**
 129 * amdgpu_fence_emit - emit a fence on the requested ring
 130 *
 131 * @ring: ring the fence is associated with
 132 * @f: resulting fence object
 133 *
 134 * Emits a fence command on the requested ring (all asics).
 135 * Returns 0 on success, -ENOMEM on failure.
 136 */
 137int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
 138                      unsigned flags)
 139{
 140        struct amdgpu_device *adev = ring->adev;
 141        struct amdgpu_fence *fence;
 142        struct dma_fence __rcu **ptr;
 143        uint32_t seq;
 144        int r;
 145
 146        fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
 147        if (fence == NULL)
 148                return -ENOMEM;
 149
 150        seq = ++ring->fence_drv.sync_seq;
 151        fence->ring = ring;
 152        dma_fence_init(&fence->base, &amdgpu_fence_ops,
 153                       &ring->fence_drv.lock,
 154                       adev->fence_context + ring->idx,
 155                       seq);
 156        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 157                               seq, flags | AMDGPU_FENCE_FLAG_INT);
 158        pm_runtime_get_noresume(adev->ddev->dev);
 159        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 160        if (unlikely(rcu_dereference_protected(*ptr, 1))) {
 161                struct dma_fence *old;
 162
 163                rcu_read_lock();
 164                old = dma_fence_get_rcu_safe(ptr);
 165                rcu_read_unlock();
 166
 167                if (old) {
 168                        r = dma_fence_wait(old, false);
 169                        dma_fence_put(old);
 170                        if (r)
 171                                return r;
 172                }
 173        }
 174
 175        /* This function can't be called concurrently anyway, otherwise
 176         * emitting the fence would mess up the hardware ring buffer.
 177         */
 178        rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
 179
 180        *f = &fence->base;
 181
 182        return 0;
 183}
 184
 185/**
 186 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
 187 *
 188 * @ring: ring the fence is associated with
 189 * @s: resulting sequence number
 190 *
 191 * Emits a fence command on the requested ring (all asics).
 192 * Used For polling fence.
 193 * Returns 0 on success, -ENOMEM on failure.
 194 */
 195int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
 196{
 197        uint32_t seq;
 198
 199        if (!s)
 200                return -EINVAL;
 201
 202        seq = ++ring->fence_drv.sync_seq;
 203        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 204                               seq, 0);
 205
 206        *s = seq;
 207
 208        return 0;
 209}
 210
 211/**
 212 * amdgpu_fence_schedule_fallback - schedule fallback check
 213 *
 214 * @ring: pointer to struct amdgpu_ring
 215 *
 216 * Start a timer as fallback to our interrupts.
 217 */
 218static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
 219{
 220        mod_timer(&ring->fence_drv.fallback_timer,
 221                  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
 222}
 223
 224/**
 225 * amdgpu_fence_process - check for fence activity
 226 *
 227 * @ring: pointer to struct amdgpu_ring
 228 *
 229 * Checks the current fence value and calculates the last
 230 * signalled fence value. Wakes the fence queue if the
 231 * sequence number has increased.
 232 *
 233 * Returns true if fence was processed
 234 */
 235bool amdgpu_fence_process(struct amdgpu_ring *ring)
 236{
 237        struct amdgpu_fence_driver *drv = &ring->fence_drv;
 238        struct amdgpu_device *adev = ring->adev;
 239        uint32_t seq, last_seq;
 240        int r;
 241
 242        do {
 243                last_seq = atomic_read(&ring->fence_drv.last_seq);
 244                seq = amdgpu_fence_read(ring);
 245
 246        } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
 247
 248        if (del_timer(&ring->fence_drv.fallback_timer) &&
 249            seq != ring->fence_drv.sync_seq)
 250                amdgpu_fence_schedule_fallback(ring);
 251
 252        if (unlikely(seq == last_seq))
 253                return false;
 254
 255        last_seq &= drv->num_fences_mask;
 256        seq &= drv->num_fences_mask;
 257
 258        do {
 259                struct dma_fence *fence, **ptr;
 260
 261                ++last_seq;
 262                last_seq &= drv->num_fences_mask;
 263                ptr = &drv->fences[last_seq];
 264
 265                /* There is always exactly one thread signaling this fence slot */
 266                fence = rcu_dereference_protected(*ptr, 1);
 267                RCU_INIT_POINTER(*ptr, NULL);
 268
 269                if (!fence)
 270                        continue;
 271
 272                r = dma_fence_signal(fence);
 273                if (!r)
 274                        DMA_FENCE_TRACE(fence, "signaled from irq context\n");
 275                else
 276                        BUG();
 277
 278                dma_fence_put(fence);
 279                pm_runtime_mark_last_busy(adev->ddev->dev);
 280                pm_runtime_put_autosuspend(adev->ddev->dev);
 281        } while (last_seq != seq);
 282
 283        return true;
 284}
 285
 286/**
 287 * amdgpu_fence_fallback - fallback for hardware interrupts
 288 *
 289 * @work: delayed work item
 290 *
 291 * Checks for fence activity.
 292 */
 293static void amdgpu_fence_fallback(struct timer_list *t)
 294{
 295        struct amdgpu_ring *ring = from_timer(ring, t,
 296                                              fence_drv.fallback_timer);
 297
 298        if (amdgpu_fence_process(ring))
 299                DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
 300}
 301
 302/**
 303 * amdgpu_fence_wait_empty - wait for all fences to signal
 304 *
 305 * @adev: amdgpu device pointer
 306 * @ring: ring index the fence is associated with
 307 *
 308 * Wait for all fences on the requested ring to signal (all asics).
 309 * Returns 0 if the fences have passed, error for all other cases.
 310 */
 311int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 312{
 313        uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
 314        struct dma_fence *fence, **ptr;
 315        int r;
 316
 317        if (!seq)
 318                return 0;
 319
 320        ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
 321        rcu_read_lock();
 322        fence = rcu_dereference(*ptr);
 323        if (!fence || !dma_fence_get_rcu(fence)) {
 324                rcu_read_unlock();
 325                return 0;
 326        }
 327        rcu_read_unlock();
 328
 329        r = dma_fence_wait(fence, false);
 330        dma_fence_put(fence);
 331        return r;
 332}
 333
 334/**
 335 * amdgpu_fence_wait_polling - busy wait for givn sequence number
 336 *
 337 * @ring: ring index the fence is associated with
 338 * @wait_seq: sequence number to wait
 339 * @timeout: the timeout for waiting in usecs
 340 *
 341 * Wait for all fences on the requested ring to signal (all asics).
 342 * Returns left time if no timeout, 0 or minus if timeout.
 343 */
 344signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
 345                                      uint32_t wait_seq,
 346                                      signed long timeout)
 347{
 348        uint32_t seq;
 349
 350        do {
 351                seq = amdgpu_fence_read(ring);
 352                udelay(5);
 353                timeout -= 5;
 354        } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
 355
 356        return timeout > 0 ? timeout : 0;
 357}
 358/**
 359 * amdgpu_fence_count_emitted - get the count of emitted fences
 360 *
 361 * @ring: ring the fence is associated with
 362 *
 363 * Get the number of fences emitted on the requested ring (all asics).
 364 * Returns the number of emitted fences on the ring.  Used by the
 365 * dynpm code to ring track activity.
 366 */
 367unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
 368{
 369        uint64_t emitted;
 370
 371        /* We are not protected by ring lock when reading the last sequence
 372         * but it's ok to report slightly wrong fence count here.
 373         */
 374        amdgpu_fence_process(ring);
 375        emitted = 0x100000000ull;
 376        emitted -= atomic_read(&ring->fence_drv.last_seq);
 377        emitted += READ_ONCE(ring->fence_drv.sync_seq);
 378        return lower_32_bits(emitted);
 379}
 380
 381/**
 382 * amdgpu_fence_driver_start_ring - make the fence driver
 383 * ready for use on the requested ring.
 384 *
 385 * @ring: ring to start the fence driver on
 386 * @irq_src: interrupt source to use for this ring
 387 * @irq_type: interrupt type to use for this ring
 388 *
 389 * Make the fence driver ready for processing (all asics).
 390 * Not all asics have all rings, so each asic will only
 391 * start the fence driver on the rings it has.
 392 * Returns 0 for success, errors for failure.
 393 */
 394int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 395                                   struct amdgpu_irq_src *irq_src,
 396                                   unsigned irq_type)
 397{
 398        struct amdgpu_device *adev = ring->adev;
 399        uint64_t index;
 400
 401        if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
 402                ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
 403                ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
 404        } else {
 405                /* put fence directly behind firmware */
 406                index = ALIGN(adev->uvd.fw->size, 8);
 407                ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
 408                ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
 409        }
 410        amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
 411        amdgpu_irq_get(adev, irq_src, irq_type);
 412
 413        ring->fence_drv.irq_src = irq_src;
 414        ring->fence_drv.irq_type = irq_type;
 415        ring->fence_drv.initialized = true;
 416
 417        DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
 418                      "0x%016llx, cpu addr 0x%p\n", ring->name,
 419                      ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
 420        return 0;
 421}
 422
 423/**
 424 * amdgpu_fence_driver_init_ring - init the fence driver
 425 * for the requested ring.
 426 *
 427 * @ring: ring to init the fence driver on
 428 * @num_hw_submission: number of entries on the hardware queue
 429 *
 430 * Init the fence driver for the requested ring (all asics).
 431 * Helper function for amdgpu_fence_driver_init().
 432 */
 433int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 434                                  unsigned num_hw_submission)
 435{
 436        struct amdgpu_device *adev = ring->adev;
 437        long timeout;
 438        int r;
 439
 440        if (!adev)
 441                return -EINVAL;
 442
 443        /* Check that num_hw_submission is a power of two */
 444        if ((num_hw_submission & (num_hw_submission - 1)) != 0)
 445                return -EINVAL;
 446
 447        ring->fence_drv.cpu_addr = NULL;
 448        ring->fence_drv.gpu_addr = 0;
 449        ring->fence_drv.sync_seq = 0;
 450        atomic_set(&ring->fence_drv.last_seq, 0);
 451        ring->fence_drv.initialized = false;
 452
 453        timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
 454
 455        ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
 456        spin_lock_init(&ring->fence_drv.lock);
 457        ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
 458                                         GFP_KERNEL);
 459        if (!ring->fence_drv.fences)
 460                return -ENOMEM;
 461
 462        /* No need to setup the GPU scheduler for KIQ ring */
 463        if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
 464                switch (ring->funcs->type) {
 465                case AMDGPU_RING_TYPE_GFX:
 466                        timeout = adev->gfx_timeout;
 467                        break;
 468                case AMDGPU_RING_TYPE_COMPUTE:
 469                        timeout = adev->compute_timeout;
 470                        break;
 471                case AMDGPU_RING_TYPE_SDMA:
 472                        timeout = adev->sdma_timeout;
 473                        break;
 474                default:
 475                        timeout = adev->video_timeout;
 476                        break;
 477                }
 478
 479                r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
 480                                   num_hw_submission, amdgpu_job_hang_limit,
 481                                   timeout, ring->name);
 482                if (r) {
 483                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
 484                                  ring->name);
 485                        return r;
 486                }
 487        }
 488
 489        return 0;
 490}
 491
 492/**
 493 * amdgpu_fence_driver_init - init the fence driver
 494 * for all possible rings.
 495 *
 496 * @adev: amdgpu device pointer
 497 *
 498 * Init the fence driver for all possible rings (all asics).
 499 * Not all asics have all rings, so each asic will only
 500 * start the fence driver on the rings it has using
 501 * amdgpu_fence_driver_start_ring().
 502 * Returns 0 for success.
 503 */
 504int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 505{
 506        if (amdgpu_debugfs_fence_init(adev))
 507                dev_err(adev->dev, "fence debugfs file creation failed\n");
 508
 509        return 0;
 510}
 511
 512/**
 513 * amdgpu_fence_driver_fini - tear down the fence driver
 514 * for all possible rings.
 515 *
 516 * @adev: amdgpu device pointer
 517 *
 518 * Tear down the fence driver for all possible rings (all asics).
 519 */
 520void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 521{
 522        unsigned i, j;
 523        int r;
 524
 525        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 526                struct amdgpu_ring *ring = adev->rings[i];
 527
 528                if (!ring || !ring->fence_drv.initialized)
 529                        continue;
 530                r = amdgpu_fence_wait_empty(ring);
 531                if (r) {
 532                        /* no need to trigger GPU reset as we are unloading */
 533                        amdgpu_fence_driver_force_completion(ring);
 534                }
 535                amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 536                               ring->fence_drv.irq_type);
 537                drm_sched_fini(&ring->sched);
 538                del_timer_sync(&ring->fence_drv.fallback_timer);
 539                for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
 540                        dma_fence_put(ring->fence_drv.fences[j]);
 541                kfree(ring->fence_drv.fences);
 542                ring->fence_drv.fences = NULL;
 543                ring->fence_drv.initialized = false;
 544        }
 545}
 546
 547/**
 548 * amdgpu_fence_driver_suspend - suspend the fence driver
 549 * for all possible rings.
 550 *
 551 * @adev: amdgpu device pointer
 552 *
 553 * Suspend the fence driver for all possible rings (all asics).
 554 */
 555void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
 556{
 557        int i, r;
 558
 559        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 560                struct amdgpu_ring *ring = adev->rings[i];
 561                if (!ring || !ring->fence_drv.initialized)
 562                        continue;
 563
 564                /* wait for gpu to finish processing current batch */
 565                r = amdgpu_fence_wait_empty(ring);
 566                if (r) {
 567                        /* delay GPU reset to resume */
 568                        amdgpu_fence_driver_force_completion(ring);
 569                }
 570
 571                /* disable the interrupt */
 572                amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 573                               ring->fence_drv.irq_type);
 574        }
 575}
 576
 577/**
 578 * amdgpu_fence_driver_resume - resume the fence driver
 579 * for all possible rings.
 580 *
 581 * @adev: amdgpu device pointer
 582 *
 583 * Resume the fence driver for all possible rings (all asics).
 584 * Not all asics have all rings, so each asic will only
 585 * start the fence driver on the rings it has using
 586 * amdgpu_fence_driver_start_ring().
 587 * Returns 0 for success.
 588 */
 589void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
 590{
 591        int i;
 592
 593        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 594                struct amdgpu_ring *ring = adev->rings[i];
 595                if (!ring || !ring->fence_drv.initialized)
 596                        continue;
 597
 598                /* enable the interrupt */
 599                amdgpu_irq_get(adev, ring->fence_drv.irq_src,
 600                               ring->fence_drv.irq_type);
 601        }
 602}
 603
 604/**
 605 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
 606 *
 607 * @ring: fence of the ring to signal
 608 *
 609 */
 610void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
 611{
 612        amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
 613        amdgpu_fence_process(ring);
 614}
 615
 616/*
 617 * Common fence implementation
 618 */
 619
 620static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
 621{
 622        return "amdgpu";
 623}
 624
 625static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
 626{
 627        struct amdgpu_fence *fence = to_amdgpu_fence(f);
 628        return (const char *)fence->ring->name;
 629}
 630
 631/**
 632 * amdgpu_fence_enable_signaling - enable signalling on fence
 633 * @fence: fence
 634 *
 635 * This function is called with fence_queue lock held, and adds a callback
 636 * to fence_queue that checks if this fence is signaled, and if so it
 637 * signals the fence and removes itself.
 638 */
 639static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 640{
 641        struct amdgpu_fence *fence = to_amdgpu_fence(f);
 642        struct amdgpu_ring *ring = fence->ring;
 643
 644        if (!timer_pending(&ring->fence_drv.fallback_timer))
 645                amdgpu_fence_schedule_fallback(ring);
 646
 647        DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 648
 649        return true;
 650}
 651
 652/**
 653 * amdgpu_fence_free - free up the fence memory
 654 *
 655 * @rcu: RCU callback head
 656 *
 657 * Free up the fence memory after the RCU grace period.
 658 */
 659static void amdgpu_fence_free(struct rcu_head *rcu)
 660{
 661        struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
 662        struct amdgpu_fence *fence = to_amdgpu_fence(f);
 663        kmem_cache_free(amdgpu_fence_slab, fence);
 664}
 665
 666/**
 667 * amdgpu_fence_release - callback that fence can be freed
 668 *
 669 * @fence: fence
 670 *
 671 * This function is called when the reference count becomes zero.
 672 * It just RCU schedules freeing up the fence.
 673 */
 674static void amdgpu_fence_release(struct dma_fence *f)
 675{
 676        call_rcu(&f->rcu, amdgpu_fence_free);
 677}
 678
 679static const struct dma_fence_ops amdgpu_fence_ops = {
 680        .get_driver_name = amdgpu_fence_get_driver_name,
 681        .get_timeline_name = amdgpu_fence_get_timeline_name,
 682        .enable_signaling = amdgpu_fence_enable_signaling,
 683        .release = amdgpu_fence_release,
 684};
 685
 686/*
 687 * Fence debugfs
 688 */
 689#if defined(CONFIG_DEBUG_FS)
 690static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
 691{
 692        struct drm_info_node *node = (struct drm_info_node *)m->private;
 693        struct drm_device *dev = node->minor->dev;
 694        struct amdgpu_device *adev = dev->dev_private;
 695        int i;
 696
 697        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 698                struct amdgpu_ring *ring = adev->rings[i];
 699                if (!ring || !ring->fence_drv.initialized)
 700                        continue;
 701
 702                amdgpu_fence_process(ring);
 703
 704                seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
 705                seq_printf(m, "Last signaled fence          0x%08x\n",
 706                           atomic_read(&ring->fence_drv.last_seq));
 707                seq_printf(m, "Last emitted                 0x%08x\n",
 708                           ring->fence_drv.sync_seq);
 709
 710                if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
 711                    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
 712                        seq_printf(m, "Last signaled trailing fence 0x%08x\n",
 713                                   le32_to_cpu(*ring->trail_fence_cpu_addr));
 714                        seq_printf(m, "Last emitted                 0x%08x\n",
 715                                   ring->trail_seq);
 716                }
 717
 718                if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
 719                        continue;
 720
 721                /* set in CP_VMID_PREEMPT and preemption occurred */
 722                seq_printf(m, "Last preempted               0x%08x\n",
 723                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
 724                /* set in CP_VMID_RESET and reset occurred */
 725                seq_printf(m, "Last reset                   0x%08x\n",
 726                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
 727                /* Both preemption and reset occurred */
 728                seq_printf(m, "Last both                    0x%08x\n",
 729                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
 730        }
 731        return 0;
 732}
 733
 734/**
 735 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
 736 *
 737 * Manually trigger a gpu reset at the next fence wait.
 738 */
 739static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
 740{
 741        struct drm_info_node *node = (struct drm_info_node *) m->private;
 742        struct drm_device *dev = node->minor->dev;
 743        struct amdgpu_device *adev = dev->dev_private;
 744        int r;
 745
 746        r = pm_runtime_get_sync(dev->dev);
 747        if (r < 0)
 748                return 0;
 749
 750        seq_printf(m, "gpu recover\n");
 751        amdgpu_device_gpu_recover(adev, NULL);
 752
 753        pm_runtime_mark_last_busy(dev->dev);
 754        pm_runtime_put_autosuspend(dev->dev);
 755
 756        return 0;
 757}
 758
 759static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
 760        {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
 761        {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
 762};
 763
 764static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
 765        {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
 766};
 767#endif
 768
 769int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
 770{
 771#if defined(CONFIG_DEBUG_FS)
 772        if (amdgpu_sriov_vf(adev))
 773                return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
 774        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
 775#else
 776        return 0;
 777#endif
 778}
 779
 780