linux/drivers/gpu/drm/amd/amdgpu/si_dma.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24
  25#include "amdgpu.h"
  26#include "amdgpu_trace.h"
  27#include "si.h"
  28#include "sid.h"
  29
  30const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  31{
  32        DMA0_REGISTER_OFFSET,
  33        DMA1_REGISTER_OFFSET
  34};
  35
  36static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
  37static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
  38static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
  39static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
  40
  41static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
  42{
  43        return ring->adev->wb.wb[ring->rptr_offs>>2];
  44}
  45
  46static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
  47{
  48        struct amdgpu_device *adev = ring->adev;
  49        u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
  50
  51        return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
  52}
  53
  54static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
  55{
  56        struct amdgpu_device *adev = ring->adev;
  57        u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
  58
  59        WREG32(DMA_RB_WPTR + sdma_offsets[me],
  60               (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
  61}
  62
  63static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
  64                                struct amdgpu_job *job,
  65                                struct amdgpu_ib *ib,
  66                                uint32_t flags)
  67{
  68        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
  69        /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
  70         * Pad as necessary with NOPs.
  71         */
  72        while ((lower_32_bits(ring->wptr) & 7) != 5)
  73                amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
  74        amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
  75        amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
  76        amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
  77
  78}
  79
  80/**
  81 * si_dma_ring_emit_fence - emit a fence on the DMA ring
  82 *
  83 * @ring: amdgpu ring pointer
  84 * @fence: amdgpu fence object
  85 *
  86 * Add a DMA fence packet to the ring to write
  87 * the fence seq number and DMA trap packet to generate
  88 * an interrupt if needed (VI).
  89 */
  90static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
  91                                      unsigned flags)
  92{
  93
  94        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  95        /* write the fence */
  96        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
  97        amdgpu_ring_write(ring, addr & 0xfffffffc);
  98        amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
  99        amdgpu_ring_write(ring, seq);
 100        /* optionally write high bits as well */
 101        if (write64bit) {
 102                addr += 4;
 103                amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
 104                amdgpu_ring_write(ring, addr & 0xfffffffc);
 105                amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
 106                amdgpu_ring_write(ring, upper_32_bits(seq));
 107        }
 108        /* generate an interrupt */
 109        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
 110}
 111
 112static void si_dma_stop(struct amdgpu_device *adev)
 113{
 114        struct amdgpu_ring *ring;
 115        u32 rb_cntl;
 116        unsigned i;
 117
 118        for (i = 0; i < adev->sdma.num_instances; i++) {
 119                ring = &adev->sdma.instance[i].ring;
 120                /* dma0 */
 121                rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
 122                rb_cntl &= ~DMA_RB_ENABLE;
 123                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
 124
 125                if (adev->mman.buffer_funcs_ring == ring)
 126                        amdgpu_ttm_set_buffer_funcs_status(adev, false);
 127        }
 128}
 129
 130static int si_dma_start(struct amdgpu_device *adev)
 131{
 132        struct amdgpu_ring *ring;
 133        u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
 134        int i, r;
 135        uint64_t rptr_addr;
 136
 137        for (i = 0; i < adev->sdma.num_instances; i++) {
 138                ring = &adev->sdma.instance[i].ring;
 139
 140                WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
 141                WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 142
 143                /* Set ring buffer size in dwords */
 144                rb_bufsz = order_base_2(ring->ring_size / 4);
 145                rb_cntl = rb_bufsz << 1;
 146#ifdef __BIG_ENDIAN
 147                rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
 148#endif
 149                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
 150
 151                /* Initialize the ring buffer's read and write pointers */
 152                WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
 153                WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
 154
 155                rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
 156
 157                WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
 158                WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
 159
 160                rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
 161
 162                WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 163
 164                /* enable DMA IBs */
 165                ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
 166#ifdef __BIG_ENDIAN
 167                ib_cntl |= DMA_IB_SWAP_ENABLE;
 168#endif
 169                WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
 170
 171                dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
 172                dma_cntl &= ~CTXEMPTY_INT_ENABLE;
 173                WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
 174
 175                ring->wptr = 0;
 176                WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
 177                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
 178
 179                ring->sched.ready = true;
 180
 181                r = amdgpu_ring_test_helper(ring);
 182                if (r)
 183                        return r;
 184
 185                if (adev->mman.buffer_funcs_ring == ring)
 186                        amdgpu_ttm_set_buffer_funcs_status(adev, true);
 187        }
 188
 189        return 0;
 190}
 191
 192/**
 193 * si_dma_ring_test_ring - simple async dma engine test
 194 *
 195 * @ring: amdgpu_ring structure holding ring information
 196 *
 197 * Test the DMA engine by writing using it to write an
 198 * value to memory. (VI).
 199 * Returns 0 for success, error for failure.
 200 */
 201static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
 202{
 203        struct amdgpu_device *adev = ring->adev;
 204        unsigned i;
 205        unsigned index;
 206        int r;
 207        u32 tmp;
 208        u64 gpu_addr;
 209
 210        r = amdgpu_device_wb_get(adev, &index);
 211        if (r)
 212                return r;
 213
 214        gpu_addr = adev->wb.gpu_addr + (index * 4);
 215        tmp = 0xCAFEDEAD;
 216        adev->wb.wb[index] = cpu_to_le32(tmp);
 217
 218        r = amdgpu_ring_alloc(ring, 4);
 219        if (r)
 220                goto error_free_wb;
 221
 222        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
 223        amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 224        amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
 225        amdgpu_ring_write(ring, 0xDEADBEEF);
 226        amdgpu_ring_commit(ring);
 227
 228        for (i = 0; i < adev->usec_timeout; i++) {
 229                tmp = le32_to_cpu(adev->wb.wb[index]);
 230                if (tmp == 0xDEADBEEF)
 231                        break;
 232                udelay(1);
 233        }
 234
 235        if (i >= adev->usec_timeout)
 236                r = -ETIMEDOUT;
 237
 238error_free_wb:
 239        amdgpu_device_wb_free(adev, index);
 240        return r;
 241}
 242
 243/**
 244 * si_dma_ring_test_ib - test an IB on the DMA engine
 245 *
 246 * @ring: amdgpu_ring structure holding ring information
 247 *
 248 * Test a simple IB in the DMA ring (VI).
 249 * Returns 0 on success, error on failure.
 250 */
 251static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 252{
 253        struct amdgpu_device *adev = ring->adev;
 254        struct amdgpu_ib ib;
 255        struct dma_fence *f = NULL;
 256        unsigned index;
 257        u32 tmp = 0;
 258        u64 gpu_addr;
 259        long r;
 260
 261        r = amdgpu_device_wb_get(adev, &index);
 262        if (r)
 263                return r;
 264
 265        gpu_addr = adev->wb.gpu_addr + (index * 4);
 266        tmp = 0xCAFEDEAD;
 267        adev->wb.wb[index] = cpu_to_le32(tmp);
 268        memset(&ib, 0, sizeof(ib));
 269        r = amdgpu_ib_get(adev, NULL, 256,
 270                                        AMDGPU_IB_POOL_DIRECT, &ib);
 271        if (r)
 272                goto err0;
 273
 274        ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
 275        ib.ptr[1] = lower_32_bits(gpu_addr);
 276        ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
 277        ib.ptr[3] = 0xDEADBEEF;
 278        ib.length_dw = 4;
 279        r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 280        if (r)
 281                goto err1;
 282
 283        r = dma_fence_wait_timeout(f, false, timeout);
 284        if (r == 0) {
 285                r = -ETIMEDOUT;
 286                goto err1;
 287        } else if (r < 0) {
 288                goto err1;
 289        }
 290        tmp = le32_to_cpu(adev->wb.wb[index]);
 291        if (tmp == 0xDEADBEEF)
 292                r = 0;
 293        else
 294                r = -EINVAL;
 295
 296err1:
 297        amdgpu_ib_free(adev, &ib, NULL);
 298        dma_fence_put(f);
 299err0:
 300        amdgpu_device_wb_free(adev, index);
 301        return r;
 302}
 303
 304/**
 305 * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
 306 *
 307 * @ib: indirect buffer to fill with commands
 308 * @pe: addr of the page entry
 309 * @src: src addr to copy from
 310 * @count: number of page entries to update
 311 *
 312 * Update PTEs by copying them from the GART using DMA (SI).
 313 */
 314static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
 315                               uint64_t pe, uint64_t src,
 316                               unsigned count)
 317{
 318        unsigned bytes = count * 8;
 319
 320        ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
 321                                              1, 0, 0, bytes);
 322        ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 323        ib->ptr[ib->length_dw++] = lower_32_bits(src);
 324        ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
 325        ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
 326}
 327
 328/**
 329 * si_dma_vm_write_pte - update PTEs by writing them manually
 330 *
 331 * @ib: indirect buffer to fill with commands
 332 * @pe: addr of the page entry
 333 * @value: dst addr to write into pe
 334 * @count: number of page entries to update
 335 * @incr: increase next addr by incr bytes
 336 *
 337 * Update PTEs by writing them manually using DMA (SI).
 338 */
 339static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
 340                                uint64_t value, unsigned count,
 341                                uint32_t incr)
 342{
 343        unsigned ndw = count * 2;
 344
 345        ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
 346        ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 347        ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 348        for (; ndw > 0; ndw -= 2) {
 349                ib->ptr[ib->length_dw++] = lower_32_bits(value);
 350                ib->ptr[ib->length_dw++] = upper_32_bits(value);
 351                value += incr;
 352        }
 353}
 354
 355/**
 356 * si_dma_vm_set_pte_pde - update the page tables using sDMA
 357 *
 358 * @ib: indirect buffer to fill with commands
 359 * @pe: addr of the page entry
 360 * @addr: dst addr to write into pe
 361 * @count: number of page entries to update
 362 * @incr: increase next addr by incr bytes
 363 * @flags: access flags
 364 *
 365 * Update the page tables using sDMA (CIK).
 366 */
 367static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
 368                                     uint64_t pe,
 369                                     uint64_t addr, unsigned count,
 370                                     uint32_t incr, uint64_t flags)
 371{
 372        uint64_t value;
 373        unsigned ndw;
 374
 375        while (count) {
 376                ndw = count * 2;
 377                if (ndw > 0xFFFFE)
 378                        ndw = 0xFFFFE;
 379
 380                if (flags & AMDGPU_PTE_VALID)
 381                        value = addr;
 382                else
 383                        value = 0;
 384
 385                /* for physically contiguous pages (vram) */
 386                ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
 387                ib->ptr[ib->length_dw++] = pe; /* dst addr */
 388                ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
 389                ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
 390                ib->ptr[ib->length_dw++] = upper_32_bits(flags);
 391                ib->ptr[ib->length_dw++] = value; /* value */
 392                ib->ptr[ib->length_dw++] = upper_32_bits(value);
 393                ib->ptr[ib->length_dw++] = incr; /* increment size */
 394                ib->ptr[ib->length_dw++] = 0;
 395                pe += ndw * 4;
 396                addr += (ndw / 2) * incr;
 397                count -= ndw / 2;
 398        }
 399}
 400
 401/**
 402 * si_dma_pad_ib - pad the IB to the required number of dw
 403 *
 404 * @ib: indirect buffer to fill with padding
 405 *
 406 */
 407static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 408{
 409        while (ib->length_dw & 0x7)
 410                ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
 411}
 412
 413/**
 414 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
 415 *
 416 * @ring: amdgpu_ring pointer
 417 *
 418 * Make sure all previous operations are completed (CIK).
 419 */
 420static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 421{
 422        uint32_t seq = ring->fence_drv.sync_seq;
 423        uint64_t addr = ring->fence_drv.gpu_addr;
 424
 425        /* wait for idle */
 426        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
 427                          (1 << 27)); /* Poll memory */
 428        amdgpu_ring_write(ring, lower_32_bits(addr));
 429        amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
 430        amdgpu_ring_write(ring, 0xffffffff); /* mask */
 431        amdgpu_ring_write(ring, seq); /* value */
 432        amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
 433}
 434
 435/**
 436 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
 437 *
 438 * @ring: amdgpu_ring pointer
 439 * @vm: amdgpu_vm pointer
 440 *
 441 * Update the page table base and flush the VM TLB
 442 * using sDMA (VI).
 443 */
 444static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 445                                      unsigned vmid, uint64_t pd_addr)
 446{
 447        amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
 448
 449        /* wait for invalidate to complete */
 450        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
 451        amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
 452        amdgpu_ring_write(ring, 0xff << 16); /* retry */
 453        amdgpu_ring_write(ring, 1 << vmid); /* mask */
 454        amdgpu_ring_write(ring, 0); /* value */
 455        amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
 456}
 457
 458static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
 459                                  uint32_t reg, uint32_t val)
 460{
 461        amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
 462        amdgpu_ring_write(ring, (0xf << 16) | reg);
 463        amdgpu_ring_write(ring, val);
 464}
 465
 466static int si_dma_early_init(void *handle)
 467{
 468        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 469
 470        adev->sdma.num_instances = 2;
 471
 472        si_dma_set_ring_funcs(adev);
 473        si_dma_set_buffer_funcs(adev);
 474        si_dma_set_vm_pte_funcs(adev);
 475        si_dma_set_irq_funcs(adev);
 476
 477        return 0;
 478}
 479
 480static int si_dma_sw_init(void *handle)
 481{
 482        struct amdgpu_ring *ring;
 483        int r, i;
 484        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 485
 486        /* DMA0 trap event */
 487        r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
 488                              &adev->sdma.trap_irq);
 489        if (r)
 490                return r;
 491
 492        /* DMA1 trap event */
 493        r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
 494                              &adev->sdma.trap_irq);
 495        if (r)
 496                return r;
 497
 498        for (i = 0; i < adev->sdma.num_instances; i++) {
 499                ring = &adev->sdma.instance[i].ring;
 500                ring->ring_obj = NULL;
 501                ring->use_doorbell = false;
 502                sprintf(ring->name, "sdma%d", i);
 503                r = amdgpu_ring_init(adev, ring, 1024,
 504                                     &adev->sdma.trap_irq,
 505                                     (i == 0) ?
 506                                     AMDGPU_SDMA_IRQ_INSTANCE0 :
 507                                     AMDGPU_SDMA_IRQ_INSTANCE1,
 508                                     AMDGPU_RING_PRIO_DEFAULT);
 509                if (r)
 510                        return r;
 511        }
 512
 513        return r;
 514}
 515
 516static int si_dma_sw_fini(void *handle)
 517{
 518        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 519        int i;
 520
 521        for (i = 0; i < adev->sdma.num_instances; i++)
 522                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 523
 524        return 0;
 525}
 526
 527static int si_dma_hw_init(void *handle)
 528{
 529        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 530
 531        return si_dma_start(adev);
 532}
 533
 534static int si_dma_hw_fini(void *handle)
 535{
 536        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 537
 538        si_dma_stop(adev);
 539
 540        return 0;
 541}
 542
 543static int si_dma_suspend(void *handle)
 544{
 545        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 546
 547        return si_dma_hw_fini(adev);
 548}
 549
 550static int si_dma_resume(void *handle)
 551{
 552        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 553
 554        return si_dma_hw_init(adev);
 555}
 556
 557static bool si_dma_is_idle(void *handle)
 558{
 559        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 560        u32 tmp = RREG32(SRBM_STATUS2);
 561
 562        if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
 563            return false;
 564
 565        return true;
 566}
 567
 568static int si_dma_wait_for_idle(void *handle)
 569{
 570        unsigned i;
 571        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 572
 573        for (i = 0; i < adev->usec_timeout; i++) {
 574                if (si_dma_is_idle(handle))
 575                        return 0;
 576                udelay(1);
 577        }
 578        return -ETIMEDOUT;
 579}
 580
 581static int si_dma_soft_reset(void *handle)
 582{
 583        DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
 584        return 0;
 585}
 586
 587static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
 588                                        struct amdgpu_irq_src *src,
 589                                        unsigned type,
 590                                        enum amdgpu_interrupt_state state)
 591{
 592        u32 sdma_cntl;
 593
 594        switch (type) {
 595        case AMDGPU_SDMA_IRQ_INSTANCE0:
 596                switch (state) {
 597                case AMDGPU_IRQ_STATE_DISABLE:
 598                        sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
 599                        sdma_cntl &= ~TRAP_ENABLE;
 600                        WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
 601                        break;
 602                case AMDGPU_IRQ_STATE_ENABLE:
 603                        sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
 604                        sdma_cntl |= TRAP_ENABLE;
 605                        WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
 606                        break;
 607                default:
 608                        break;
 609                }
 610                break;
 611        case AMDGPU_SDMA_IRQ_INSTANCE1:
 612                switch (state) {
 613                case AMDGPU_IRQ_STATE_DISABLE:
 614                        sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
 615                        sdma_cntl &= ~TRAP_ENABLE;
 616                        WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
 617                        break;
 618                case AMDGPU_IRQ_STATE_ENABLE:
 619                        sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
 620                        sdma_cntl |= TRAP_ENABLE;
 621                        WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
 622                        break;
 623                default:
 624                        break;
 625                }
 626                break;
 627        default:
 628                break;
 629        }
 630        return 0;
 631}
 632
 633static int si_dma_process_trap_irq(struct amdgpu_device *adev,
 634                                      struct amdgpu_irq_src *source,
 635                                      struct amdgpu_iv_entry *entry)
 636{
 637        if (entry->src_id == 224)
 638                amdgpu_fence_process(&adev->sdma.instance[0].ring);
 639        else
 640                amdgpu_fence_process(&adev->sdma.instance[1].ring);
 641        return 0;
 642}
 643
 644static int si_dma_set_clockgating_state(void *handle,
 645                                          enum amd_clockgating_state state)
 646{
 647        u32 orig, data, offset;
 648        int i;
 649        bool enable;
 650        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 651
 652        enable = (state == AMD_CG_STATE_GATE);
 653
 654        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
 655                for (i = 0; i < adev->sdma.num_instances; i++) {
 656                        if (i == 0)
 657                                offset = DMA0_REGISTER_OFFSET;
 658                        else
 659                                offset = DMA1_REGISTER_OFFSET;
 660                        orig = data = RREG32(DMA_POWER_CNTL + offset);
 661                        data &= ~MEM_POWER_OVERRIDE;
 662                        if (data != orig)
 663                                WREG32(DMA_POWER_CNTL + offset, data);
 664                        WREG32(DMA_CLK_CTRL + offset, 0x00000100);
 665                }
 666        } else {
 667                for (i = 0; i < adev->sdma.num_instances; i++) {
 668                        if (i == 0)
 669                                offset = DMA0_REGISTER_OFFSET;
 670                        else
 671                                offset = DMA1_REGISTER_OFFSET;
 672                        orig = data = RREG32(DMA_POWER_CNTL + offset);
 673                        data |= MEM_POWER_OVERRIDE;
 674                        if (data != orig)
 675                                WREG32(DMA_POWER_CNTL + offset, data);
 676
 677                        orig = data = RREG32(DMA_CLK_CTRL + offset);
 678                        data = 0xff000000;
 679                        if (data != orig)
 680                                WREG32(DMA_CLK_CTRL + offset, data);
 681                }
 682        }
 683
 684        return 0;
 685}
 686
 687static int si_dma_set_powergating_state(void *handle,
 688                                          enum amd_powergating_state state)
 689{
 690        u32 tmp;
 691
 692        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 693
 694        WREG32(DMA_PGFSM_WRITE,  0x00002000);
 695        WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
 696
 697        for (tmp = 0; tmp < 5; tmp++)
 698                WREG32(DMA_PGFSM_WRITE, 0);
 699
 700        return 0;
 701}
 702
 703static const struct amd_ip_funcs si_dma_ip_funcs = {
 704        .name = "si_dma",
 705        .early_init = si_dma_early_init,
 706        .late_init = NULL,
 707        .sw_init = si_dma_sw_init,
 708        .sw_fini = si_dma_sw_fini,
 709        .hw_init = si_dma_hw_init,
 710        .hw_fini = si_dma_hw_fini,
 711        .suspend = si_dma_suspend,
 712        .resume = si_dma_resume,
 713        .is_idle = si_dma_is_idle,
 714        .wait_for_idle = si_dma_wait_for_idle,
 715        .soft_reset = si_dma_soft_reset,
 716        .set_clockgating_state = si_dma_set_clockgating_state,
 717        .set_powergating_state = si_dma_set_powergating_state,
 718};
 719
 720static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
 721        .type = AMDGPU_RING_TYPE_SDMA,
 722        .align_mask = 0xf,
 723        .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
 724        .support_64bit_ptrs = false,
 725        .get_rptr = si_dma_ring_get_rptr,
 726        .get_wptr = si_dma_ring_get_wptr,
 727        .set_wptr = si_dma_ring_set_wptr,
 728        .emit_frame_size =
 729                3 + 3 + /* hdp flush / invalidate */
 730                6 + /* si_dma_ring_emit_pipeline_sync */
 731                SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
 732                9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
 733        .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
 734        .emit_ib = si_dma_ring_emit_ib,
 735        .emit_fence = si_dma_ring_emit_fence,
 736        .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
 737        .emit_vm_flush = si_dma_ring_emit_vm_flush,
 738        .test_ring = si_dma_ring_test_ring,
 739        .test_ib = si_dma_ring_test_ib,
 740        .insert_nop = amdgpu_ring_insert_nop,
 741        .pad_ib = si_dma_ring_pad_ib,
 742        .emit_wreg = si_dma_ring_emit_wreg,
 743};
 744
 745static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
 746{
 747        int i;
 748
 749        for (i = 0; i < adev->sdma.num_instances; i++)
 750                adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
 751}
 752
 753static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
 754        .set = si_dma_set_trap_irq_state,
 755        .process = si_dma_process_trap_irq,
 756};
 757
 758static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
 759{
 760        adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
 761        adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
 762}
 763
 764/**
 765 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
 766 *
 767 * @ring: amdgpu_ring structure holding ring information
 768 * @src_offset: src GPU address
 769 * @dst_offset: dst GPU address
 770 * @byte_count: number of bytes to xfer
 771 *
 772 * Copy GPU buffers using the DMA engine (VI).
 773 * Used by the amdgpu ttm implementation to move pages if
 774 * registered as the asic copy callback.
 775 */
 776static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
 777                                       uint64_t src_offset,
 778                                       uint64_t dst_offset,
 779                                       uint32_t byte_count,
 780                                       bool tmz)
 781{
 782        ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
 783                                              1, 0, 0, byte_count);
 784        ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
 785        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
 786        ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
 787        ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
 788}
 789
 790/**
 791 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
 792 *
 793 * @ring: amdgpu_ring structure holding ring information
 794 * @src_data: value to write to buffer
 795 * @dst_offset: dst GPU address
 796 * @byte_count: number of bytes to xfer
 797 *
 798 * Fill GPU buffers using the DMA engine (VI).
 799 */
 800static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
 801                                       uint32_t src_data,
 802                                       uint64_t dst_offset,
 803                                       uint32_t byte_count)
 804{
 805        ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
 806                                              0, 0, 0, byte_count / 4);
 807        ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
 808        ib->ptr[ib->length_dw++] = src_data;
 809        ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
 810}
 811
 812
 813static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
 814        .copy_max_bytes = 0xffff8,
 815        .copy_num_dw = 5,
 816        .emit_copy_buffer = si_dma_emit_copy_buffer,
 817
 818        .fill_max_bytes = 0xffff8,
 819        .fill_num_dw = 4,
 820        .emit_fill_buffer = si_dma_emit_fill_buffer,
 821};
 822
 823static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
 824{
 825        adev->mman.buffer_funcs = &si_dma_buffer_funcs;
 826        adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
 827}
 828
 829static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
 830        .copy_pte_num_dw = 5,
 831        .copy_pte = si_dma_vm_copy_pte,
 832
 833        .write_pte = si_dma_vm_write_pte,
 834        .set_pte_pde = si_dma_vm_set_pte_pde,
 835};
 836
 837static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
 838{
 839        unsigned i;
 840
 841        adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
 842        for (i = 0; i < adev->sdma.num_instances; i++) {
 843                adev->vm_manager.vm_pte_scheds[i] =
 844                        &adev->sdma.instance[i].ring.sched;
 845        }
 846        adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 847}
 848
 849const struct amdgpu_ip_block_version si_dma_ip_block =
 850{
 851        .type = AMD_IP_BLOCK_TYPE_SDMA,
 852        .major = 1,
 853        .minor = 0,
 854        .rev = 0,
 855        .funcs = &si_dma_ip_funcs,
 856};
 857