linux/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27#include "amdgpu_ucode.h"
  28#include "amdgpu_trace.h"
  29#include "cikd.h"
  30#include "cik.h"
  31
  32#include "bif/bif_4_1_d.h"
  33#include "bif/bif_4_1_sh_mask.h"
  34
  35#include "gca/gfx_7_2_d.h"
  36#include "gca/gfx_7_2_enum.h"
  37#include "gca/gfx_7_2_sh_mask.h"
  38
  39#include "gmc/gmc_7_1_d.h"
  40#include "gmc/gmc_7_1_sh_mask.h"
  41
  42#include "oss/oss_2_0_d.h"
  43#include "oss/oss_2_0_sh_mask.h"
  44
  45static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  46{
  47        SDMA0_REGISTER_OFFSET,
  48        SDMA1_REGISTER_OFFSET
  49};
  50
  51static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
  52static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
  53static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
  54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
  55
  56MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
  57MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
  58MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
  59MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
  60MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
  61MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
  62MODULE_FIRMWARE("radeon/kabini_sdma.bin");
  63MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
  64MODULE_FIRMWARE("radeon/mullins_sdma.bin");
  65MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
  66
  67u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
  68
  69/*
  70 * sDMA - System DMA
  71 * Starting with CIK, the GPU has new asynchronous
  72 * DMA engines.  These engines are used for compute
  73 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  74 * and each one supports 1 ring buffer used for gfx
  75 * and 2 queues used for compute.
  76 *
  77 * The programming model is very similar to the CP
  78 * (ring buffer, IBs, etc.), but sDMA has it's own
  79 * packet format that is different from the PM4 format
  80 * used by the CP. sDMA supports copying data, writing
  81 * embedded data, solid fills, and a number of other
  82 * things.  It also has support for tiling/detiling of
  83 * buffers.
  84 */
  85
  86/**
  87 * cik_sdma_init_microcode - load ucode images from disk
  88 *
  89 * @adev: amdgpu_device pointer
  90 *
  91 * Use the firmware interface to load the ucode images into
  92 * the driver (not loaded into hw).
  93 * Returns 0 on success, error on failure.
  94 */
  95static int cik_sdma_init_microcode(struct amdgpu_device *adev)
  96{
  97        const char *chip_name;
  98        char fw_name[30];
  99        int err, i;
 100
 101        DRM_DEBUG("\n");
 102
 103        switch (adev->asic_type) {
 104        case CHIP_BONAIRE:
 105                chip_name = "bonaire";
 106                break;
 107        case CHIP_HAWAII:
 108                chip_name = "hawaii";
 109                break;
 110        case CHIP_KAVERI:
 111                chip_name = "kaveri";
 112                break;
 113        case CHIP_KABINI:
 114                chip_name = "kabini";
 115                break;
 116        case CHIP_MULLINS:
 117                chip_name = "mullins";
 118                break;
 119        default: BUG();
 120        }
 121
 122        for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
 123                if (i == 0)
 124                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
 125                else
 126                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
 127                err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
 128                if (err)
 129                        goto out;
 130                err = amdgpu_ucode_validate(adev->sdma[i].fw);
 131        }
 132out:
 133        if (err) {
 134                printk(KERN_ERR
 135                       "cik_sdma: Failed to load firmware \"%s\"\n",
 136                       fw_name);
 137                for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
 138                        release_firmware(adev->sdma[i].fw);
 139                        adev->sdma[i].fw = NULL;
 140                }
 141        }
 142        return err;
 143}
 144
 145/**
 146 * cik_sdma_ring_get_rptr - get the current read pointer
 147 *
 148 * @ring: amdgpu ring pointer
 149 *
 150 * Get the current rptr from the hardware (CIK+).
 151 */
 152static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
 153{
 154        u32 rptr;
 155
 156        rptr = ring->adev->wb.wb[ring->rptr_offs];
 157
 158        return (rptr & 0x3fffc) >> 2;
 159}
 160
 161/**
 162 * cik_sdma_ring_get_wptr - get the current write pointer
 163 *
 164 * @ring: amdgpu ring pointer
 165 *
 166 * Get the current wptr from the hardware (CIK+).
 167 */
 168static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 169{
 170        struct amdgpu_device *adev = ring->adev;
 171        u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
 172
 173        return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
 174}
 175
 176/**
 177 * cik_sdma_ring_set_wptr - commit the write pointer
 178 *
 179 * @ring: amdgpu ring pointer
 180 *
 181 * Write the wptr back to the hardware (CIK+).
 182 */
 183static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 184{
 185        struct amdgpu_device *adev = ring->adev;
 186        u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
 187
 188        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 189}
 190
 191static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 192{
 193        struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
 194        int i;
 195
 196        for (i = 0; i < count; i++)
 197                if (sdma && sdma->burst_nop && (i == 0))
 198                        amdgpu_ring_write(ring, ring->nop |
 199                                          SDMA_NOP_COUNT(count - 1));
 200                else
 201                        amdgpu_ring_write(ring, ring->nop);
 202}
 203
 204/**
 205 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
 206 *
 207 * @ring: amdgpu ring pointer
 208 * @ib: IB object to schedule
 209 *
 210 * Schedule an IB in the DMA ring (CIK).
 211 */
 212static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
 213                           struct amdgpu_ib *ib)
 214{
 215        u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
 216        u32 next_rptr = ring->wptr + 5;
 217
 218        while ((next_rptr & 7) != 4)
 219                next_rptr++;
 220
 221        next_rptr += 4;
 222        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 223        amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
 224        amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
 225        amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 226        amdgpu_ring_write(ring, next_rptr);
 227
 228        /* IB packet must end on a 8 DW boundary */
 229        cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
 230
 231        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 232        amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 233        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
 234        amdgpu_ring_write(ring, ib->length_dw);
 235
 236}
 237
 238/**
 239 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 240 *
 241 * @ring: amdgpu ring pointer
 242 *
 243 * Emit an hdp flush packet on the requested DMA ring.
 244 */
 245static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 246{
 247        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
 248                          SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
 249        u32 ref_and_mask;
 250
 251        if (ring == &ring->adev->sdma[0].ring)
 252                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
 253        else
 254                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
 255
 256        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 257        amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 258        amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 259        amdgpu_ring_write(ring, ref_and_mask); /* reference */
 260        amdgpu_ring_write(ring, ref_and_mask); /* mask */
 261        amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 262}
 263
 264/**
 265 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
 266 *
 267 * @ring: amdgpu ring pointer
 268 * @fence: amdgpu fence object
 269 *
 270 * Add a DMA fence packet to the ring to write
 271 * the fence seq number and DMA trap packet to generate
 272 * an interrupt if needed (CIK).
 273 */
 274static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 275                                     unsigned flags)
 276{
 277        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 278        /* write the fence */
 279        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 280        amdgpu_ring_write(ring, lower_32_bits(addr));
 281        amdgpu_ring_write(ring, upper_32_bits(addr));
 282        amdgpu_ring_write(ring, lower_32_bits(seq));
 283
 284        /* optionally write high bits as well */
 285        if (write64bit) {
 286                addr += 4;
 287                amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 288                amdgpu_ring_write(ring, lower_32_bits(addr));
 289                amdgpu_ring_write(ring, upper_32_bits(addr));
 290                amdgpu_ring_write(ring, upper_32_bits(seq));
 291        }
 292
 293        /* generate an interrupt */
 294        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
 295}
 296
 297/**
 298 * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
 299 *
 300 * @ring: amdgpu_ring structure holding ring information
 301 * @semaphore: amdgpu semaphore object
 302 * @emit_wait: wait or signal semaphore
 303 *
 304 * Add a DMA semaphore packet to the ring wait on or signal
 305 * other rings (CIK).
 306 */
 307static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
 308                                         struct amdgpu_semaphore *semaphore,
 309                                         bool emit_wait)
 310{
 311        u64 addr = semaphore->gpu_addr;
 312        u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
 313
 314        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
 315        amdgpu_ring_write(ring, addr & 0xfffffff8);
 316        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 317
 318        return true;
 319}
 320
 321/**
 322 * cik_sdma_gfx_stop - stop the gfx async dma engines
 323 *
 324 * @adev: amdgpu_device pointer
 325 *
 326 * Stop the gfx async dma ring buffers (CIK).
 327 */
 328static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 329{
 330        struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
 331        struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
 332        u32 rb_cntl;
 333        int i;
 334
 335        if ((adev->mman.buffer_funcs_ring == sdma0) ||
 336            (adev->mman.buffer_funcs_ring == sdma1))
 337                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 338
 339        for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
 340                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 341                rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
 342                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 343                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
 344        }
 345        sdma0->ready = false;
 346        sdma1->ready = false;
 347}
 348
 349/**
 350 * cik_sdma_rlc_stop - stop the compute async dma engines
 351 *
 352 * @adev: amdgpu_device pointer
 353 *
 354 * Stop the compute async dma queues (CIK).
 355 */
 356static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
 357{
 358        /* XXX todo */
 359}
 360
 361/**
 362 * cik_sdma_enable - stop the async dma engines
 363 *
 364 * @adev: amdgpu_device pointer
 365 * @enable: enable/disable the DMA MEs.
 366 *
 367 * Halt or unhalt the async dma engines (CIK).
 368 */
 369static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
 370{
 371        u32 me_cntl;
 372        int i;
 373
 374        if (enable == false) {
 375                cik_sdma_gfx_stop(adev);
 376                cik_sdma_rlc_stop(adev);
 377        }
 378
 379        for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
 380                me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 381                if (enable)
 382                        me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
 383                else
 384                        me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
 385                WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
 386        }
 387}
 388
 389/**
 390 * cik_sdma_gfx_resume - setup and start the async dma engines
 391 *
 392 * @adev: amdgpu_device pointer
 393 *
 394 * Set up the gfx DMA ring buffers and enable them (CIK).
 395 * Returns 0 for success, error for failure.
 396 */
 397static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 398{
 399        struct amdgpu_ring *ring;
 400        u32 rb_cntl, ib_cntl;
 401        u32 rb_bufsz;
 402        u32 wb_offset;
 403        int i, j, r;
 404
 405        for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
 406                ring = &adev->sdma[i].ring;
 407                wb_offset = (ring->rptr_offs * 4);
 408
 409                mutex_lock(&adev->srbm_mutex);
 410                for (j = 0; j < 16; j++) {
 411                        cik_srbm_select(adev, 0, 0, 0, j);
 412                        /* SDMA GFX */
 413                        WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 414                        WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 415                        /* XXX SDMA RLC - todo */
 416                }
 417                cik_srbm_select(adev, 0, 0, 0, 0);
 418                mutex_unlock(&adev->srbm_mutex);
 419
 420                WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
 421                WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 422
 423                /* Set ring buffer size in dwords */
 424                rb_bufsz = order_base_2(ring->ring_size / 4);
 425                rb_cntl = rb_bufsz << 1;
 426#ifdef __BIG_ENDIAN
 427                rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
 428                        SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
 429#endif
 430                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 431
 432                /* Initialize the ring buffer's read and write pointers */
 433                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 434                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 435
 436                /* set the wb address whether it's enabled or not */
 437                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 438                       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 439                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 440                       ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
 441
 442                rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
 443
 444                WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 445                WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 446
 447                ring->wptr = 0;
 448                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
 449
 450                /* enable DMA RB */
 451                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
 452                       rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
 453
 454                ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
 455#ifdef __BIG_ENDIAN
 456                ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
 457#endif
 458                /* enable DMA IBs */
 459                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 460
 461                ring->ready = true;
 462
 463                r = amdgpu_ring_test_ring(ring);
 464                if (r) {
 465                        ring->ready = false;
 466                        return r;
 467                }
 468
 469                if (adev->mman.buffer_funcs_ring == ring)
 470                        amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
 471        }
 472
 473        return 0;
 474}
 475
 476/**
 477 * cik_sdma_rlc_resume - setup and start the async dma engines
 478 *
 479 * @adev: amdgpu_device pointer
 480 *
 481 * Set up the compute DMA queues and enable them (CIK).
 482 * Returns 0 for success, error for failure.
 483 */
 484static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
 485{
 486        /* XXX todo */
 487        return 0;
 488}
 489
 490/**
 491 * cik_sdma_load_microcode - load the sDMA ME ucode
 492 *
 493 * @adev: amdgpu_device pointer
 494 *
 495 * Loads the sDMA0/1 ucode.
 496 * Returns 0 for success, -EINVAL if the ucode is not available.
 497 */
 498static int cik_sdma_load_microcode(struct amdgpu_device *adev)
 499{
 500        const struct sdma_firmware_header_v1_0 *hdr;
 501        const __le32 *fw_data;
 502        u32 fw_size;
 503        int i, j;
 504
 505        if (!adev->sdma[0].fw || !adev->sdma[1].fw)
 506                return -EINVAL;
 507
 508        /* halt the MEs */
 509        cik_sdma_enable(adev, false);
 510
 511        for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
 512                hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
 513                amdgpu_ucode_print_sdma_hdr(&hdr->header);
 514                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 515                adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 516                adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 517                if (adev->sdma[i].feature_version >= 20)
 518                        adev->sdma[i].burst_nop = true;
 519                fw_data = (const __le32 *)
 520                        (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 521                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
 522                for (j = 0; j < fw_size; j++)
 523                        WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
 524                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
 525        }
 526
 527        return 0;
 528}
 529
 530/**
 531 * cik_sdma_start - setup and start the async dma engines
 532 *
 533 * @adev: amdgpu_device pointer
 534 *
 535 * Set up the DMA engines and enable them (CIK).
 536 * Returns 0 for success, error for failure.
 537 */
 538static int cik_sdma_start(struct amdgpu_device *adev)
 539{
 540        int r;
 541
 542        r = cik_sdma_load_microcode(adev);
 543        if (r)
 544                return r;
 545
 546        /* unhalt the MEs */
 547        cik_sdma_enable(adev, true);
 548
 549        /* start the gfx rings and rlc compute queues */
 550        r = cik_sdma_gfx_resume(adev);
 551        if (r)
 552                return r;
 553        r = cik_sdma_rlc_resume(adev);
 554        if (r)
 555                return r;
 556
 557        return 0;
 558}
 559
 560/**
 561 * cik_sdma_ring_test_ring - simple async dma engine test
 562 *
 563 * @ring: amdgpu_ring structure holding ring information
 564 *
 565 * Test the DMA engine by writing using it to write an
 566 * value to memory. (CIK).
 567 * Returns 0 for success, error for failure.
 568 */
 569static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 570{
 571        struct amdgpu_device *adev = ring->adev;
 572        unsigned i;
 573        unsigned index;
 574        int r;
 575        u32 tmp;
 576        u64 gpu_addr;
 577
 578        r = amdgpu_wb_get(adev, &index);
 579        if (r) {
 580                dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 581                return r;
 582        }
 583
 584        gpu_addr = adev->wb.gpu_addr + (index * 4);
 585        tmp = 0xCAFEDEAD;
 586        adev->wb.wb[index] = cpu_to_le32(tmp);
 587
 588        r = amdgpu_ring_lock(ring, 5);
 589        if (r) {
 590                DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 591                amdgpu_wb_free(adev, index);
 592                return r;
 593        }
 594        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 595        amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 596        amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 597        amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 598        amdgpu_ring_write(ring, 0xDEADBEEF);
 599        amdgpu_ring_unlock_commit(ring);
 600
 601        for (i = 0; i < adev->usec_timeout; i++) {
 602                tmp = le32_to_cpu(adev->wb.wb[index]);
 603                if (tmp == 0xDEADBEEF)
 604                        break;
 605                DRM_UDELAY(1);
 606        }
 607
 608        if (i < adev->usec_timeout) {
 609                DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 610        } else {
 611                DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 612                          ring->idx, tmp);
 613                r = -EINVAL;
 614        }
 615        amdgpu_wb_free(adev, index);
 616
 617        return r;
 618}
 619
 620/**
 621 * cik_sdma_ring_test_ib - test an IB on the DMA engine
 622 *
 623 * @ring: amdgpu_ring structure holding ring information
 624 *
 625 * Test a simple IB in the DMA ring (CIK).
 626 * Returns 0 on success, error on failure.
 627 */
 628static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
 629{
 630        struct amdgpu_device *adev = ring->adev;
 631        struct amdgpu_ib ib;
 632        struct fence *f = NULL;
 633        unsigned i;
 634        unsigned index;
 635        int r;
 636        u32 tmp = 0;
 637        u64 gpu_addr;
 638
 639        r = amdgpu_wb_get(adev, &index);
 640        if (r) {
 641                dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 642                return r;
 643        }
 644
 645        gpu_addr = adev->wb.gpu_addr + (index * 4);
 646        tmp = 0xCAFEDEAD;
 647        adev->wb.wb[index] = cpu_to_le32(tmp);
 648        memset(&ib, 0, sizeof(ib));
 649        r = amdgpu_ib_get(ring, NULL, 256, &ib);
 650        if (r) {
 651                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 652                goto err0;
 653        }
 654
 655        ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 656        ib.ptr[1] = lower_32_bits(gpu_addr);
 657        ib.ptr[2] = upper_32_bits(gpu_addr);
 658        ib.ptr[3] = 1;
 659        ib.ptr[4] = 0xDEADBEEF;
 660        ib.length_dw = 5;
 661        r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
 662                                                 AMDGPU_FENCE_OWNER_UNDEFINED,
 663                                                 &f);
 664        if (r)
 665                goto err1;
 666
 667        r = fence_wait(f, false);
 668        if (r) {
 669                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 670                goto err1;
 671        }
 672        for (i = 0; i < adev->usec_timeout; i++) {
 673                tmp = le32_to_cpu(adev->wb.wb[index]);
 674                if (tmp == 0xDEADBEEF)
 675                        break;
 676                DRM_UDELAY(1);
 677        }
 678        if (i < adev->usec_timeout) {
 679                DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
 680                         ring->idx, i);
 681                goto err1;
 682        } else {
 683                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 684                r = -EINVAL;
 685        }
 686
 687err1:
 688        fence_put(f);
 689        amdgpu_ib_free(adev, &ib);
 690err0:
 691        amdgpu_wb_free(adev, index);
 692        return r;
 693}
 694
 695/**
 696 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
 697 *
 698 * @ib: indirect buffer to fill with commands
 699 * @pe: addr of the page entry
 700 * @src: src addr to copy from
 701 * @count: number of page entries to update
 702 *
 703 * Update PTEs by copying them from the GART using sDMA (CIK).
 704 */
 705static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
 706                                 uint64_t pe, uint64_t src,
 707                                 unsigned count)
 708{
 709        while (count) {
 710                unsigned bytes = count * 8;
 711                if (bytes > 0x1FFFF8)
 712                        bytes = 0x1FFFF8;
 713
 714                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
 715                        SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 716                ib->ptr[ib->length_dw++] = bytes;
 717                ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 718                ib->ptr[ib->length_dw++] = lower_32_bits(src);
 719                ib->ptr[ib->length_dw++] = upper_32_bits(src);
 720                ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 721                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 722
 723                pe += bytes;
 724                src += bytes;
 725                count -= bytes / 8;
 726        }
 727}
 728
 729/**
 730 * cik_sdma_vm_write_pages - update PTEs by writing them manually
 731 *
 732 * @ib: indirect buffer to fill with commands
 733 * @pe: addr of the page entry
 734 * @addr: dst addr to write into pe
 735 * @count: number of page entries to update
 736 * @incr: increase next addr by incr bytes
 737 * @flags: access flags
 738 *
 739 * Update PTEs by writing them manually using sDMA (CIK).
 740 */
 741static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
 742                                  uint64_t pe,
 743                                  uint64_t addr, unsigned count,
 744                                  uint32_t incr, uint32_t flags)
 745{
 746        uint64_t value;
 747        unsigned ndw;
 748
 749        while (count) {
 750                ndw = count * 2;
 751                if (ndw > 0xFFFFE)
 752                        ndw = 0xFFFFE;
 753
 754                /* for non-physically contiguous pages (system) */
 755                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 756                        SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 757                ib->ptr[ib->length_dw++] = pe;
 758                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 759                ib->ptr[ib->length_dw++] = ndw;
 760                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
 761                        if (flags & AMDGPU_PTE_SYSTEM) {
 762                                value = amdgpu_vm_map_gart(ib->ring->adev, addr);
 763                                value &= 0xFFFFFFFFFFFFF000ULL;
 764                        } else if (flags & AMDGPU_PTE_VALID) {
 765                                value = addr;
 766                        } else {
 767                                value = 0;
 768                        }
 769                        addr += incr;
 770                        value |= flags;
 771                        ib->ptr[ib->length_dw++] = value;
 772                        ib->ptr[ib->length_dw++] = upper_32_bits(value);
 773                }
 774        }
 775}
 776
 777/**
 778 * cik_sdma_vm_set_pages - update the page tables using sDMA
 779 *
 780 * @ib: indirect buffer to fill with commands
 781 * @pe: addr of the page entry
 782 * @addr: dst addr to write into pe
 783 * @count: number of page entries to update
 784 * @incr: increase next addr by incr bytes
 785 * @flags: access flags
 786 *
 787 * Update the page tables using sDMA (CIK).
 788 */
 789static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
 790                                    uint64_t pe,
 791                                    uint64_t addr, unsigned count,
 792                                    uint32_t incr, uint32_t flags)
 793{
 794        uint64_t value;
 795        unsigned ndw;
 796
 797        while (count) {
 798                ndw = count;
 799                if (ndw > 0x7FFFF)
 800                        ndw = 0x7FFFF;
 801
 802                if (flags & AMDGPU_PTE_VALID)
 803                        value = addr;
 804                else
 805                        value = 0;
 806
 807                /* for physically contiguous pages (vram) */
 808                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
 809                ib->ptr[ib->length_dw++] = pe; /* dst addr */
 810                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 811                ib->ptr[ib->length_dw++] = flags; /* mask */
 812                ib->ptr[ib->length_dw++] = 0;
 813                ib->ptr[ib->length_dw++] = value; /* value */
 814                ib->ptr[ib->length_dw++] = upper_32_bits(value);
 815                ib->ptr[ib->length_dw++] = incr; /* increment size */
 816                ib->ptr[ib->length_dw++] = 0;
 817                ib->ptr[ib->length_dw++] = ndw; /* number of entries */
 818
 819                pe += ndw * 8;
 820                addr += ndw * incr;
 821                count -= ndw;
 822        }
 823}
 824
 825/**
 826 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
 827 *
 828 * @ib: indirect buffer to fill with padding
 829 *
 830 */
 831static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 832{
 833        struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
 834        u32 pad_count;
 835        int i;
 836
 837        pad_count = (8 - (ib->length_dw & 0x7)) % 8;
 838        for (i = 0; i < pad_count; i++)
 839                if (sdma && sdma->burst_nop && (i == 0))
 840                        ib->ptr[ib->length_dw++] =
 841                                        SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
 842                                        SDMA_NOP_COUNT(pad_count - 1);
 843                else
 844                        ib->ptr[ib->length_dw++] =
 845                                        SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 846}
 847
 848/**
 849 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
 850 *
 851 * @ring: amdgpu_ring pointer
 852 * @vm: amdgpu_vm pointer
 853 *
 854 * Update the page table base and flush the VM TLB
 855 * using sDMA (CIK).
 856 */
 857static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 858                                        unsigned vm_id, uint64_t pd_addr)
 859{
 860        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
 861                          SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
 862
 863        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 864        if (vm_id < 8) {
 865                amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
 866        } else {
 867                amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
 868        }
 869        amdgpu_ring_write(ring, pd_addr >> 12);
 870
 871        /* flush TLB */
 872        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 873        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
 874        amdgpu_ring_write(ring, 1 << vm_id);
 875
 876        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 877        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 878        amdgpu_ring_write(ring, 0);
 879        amdgpu_ring_write(ring, 0); /* reference */
 880        amdgpu_ring_write(ring, 0); /* mask */
 881        amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 882}
 883
 884static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 885                                 bool enable)
 886{
 887        u32 orig, data;
 888
 889        if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
 890                WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
 891                WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
 892        } else {
 893                orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
 894                data |= 0xff000000;
 895                if (data != orig)
 896                        WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
 897
 898                orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
 899                data |= 0xff000000;
 900                if (data != orig)
 901                        WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
 902        }
 903}
 904
 905static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
 906                                 bool enable)
 907{
 908        u32 orig, data;
 909
 910        if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
 911                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 912                data |= 0x100;
 913                if (orig != data)
 914                        WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 915
 916                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 917                data |= 0x100;
 918                if (orig != data)
 919                        WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 920        } else {
 921                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 922                data &= ~0x100;
 923                if (orig != data)
 924                        WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 925
 926                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 927                data &= ~0x100;
 928                if (orig != data)
 929                        WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 930        }
 931}
 932
 933static int cik_sdma_early_init(void *handle)
 934{
 935        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 936
 937        cik_sdma_set_ring_funcs(adev);
 938        cik_sdma_set_irq_funcs(adev);
 939        cik_sdma_set_buffer_funcs(adev);
 940        cik_sdma_set_vm_pte_funcs(adev);
 941
 942        return 0;
 943}
 944
 945static int cik_sdma_sw_init(void *handle)
 946{
 947        struct amdgpu_ring *ring;
 948        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 949        int r;
 950
 951        r = cik_sdma_init_microcode(adev);
 952        if (r) {
 953                DRM_ERROR("Failed to load sdma firmware!\n");
 954                return r;
 955        }
 956
 957        /* SDMA trap event */
 958        r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
 959        if (r)
 960                return r;
 961
 962        /* SDMA Privileged inst */
 963        r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
 964        if (r)
 965                return r;
 966
 967        /* SDMA Privileged inst */
 968        r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
 969        if (r)
 970                return r;
 971
 972        ring = &adev->sdma[0].ring;
 973        ring->ring_obj = NULL;
 974
 975        ring = &adev->sdma[1].ring;
 976        ring->ring_obj = NULL;
 977
 978        ring = &adev->sdma[0].ring;
 979        sprintf(ring->name, "sdma0");
 980        r = amdgpu_ring_init(adev, ring, 256 * 1024,
 981                             SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
 982                             &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
 983                             AMDGPU_RING_TYPE_SDMA);
 984        if (r)
 985                return r;
 986
 987        ring = &adev->sdma[1].ring;
 988        sprintf(ring->name, "sdma1");
 989        r = amdgpu_ring_init(adev, ring, 256 * 1024,
 990                             SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
 991                             &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
 992                             AMDGPU_RING_TYPE_SDMA);
 993        if (r)
 994                return r;
 995
 996        return r;
 997}
 998
 999static int cik_sdma_sw_fini(void *handle)
1000{
1001        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1002
1003        amdgpu_ring_fini(&adev->sdma[0].ring);
1004        amdgpu_ring_fini(&adev->sdma[1].ring);
1005
1006        return 0;
1007}
1008
1009static int cik_sdma_hw_init(void *handle)
1010{
1011        int r;
1012        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1013
1014        r = cik_sdma_start(adev);
1015        if (r)
1016                return r;
1017
1018        return r;
1019}
1020
1021static int cik_sdma_hw_fini(void *handle)
1022{
1023        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1024
1025        cik_sdma_enable(adev, false);
1026
1027        return 0;
1028}
1029
1030static int cik_sdma_suspend(void *handle)
1031{
1032        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034        return cik_sdma_hw_fini(adev);
1035}
1036
1037static int cik_sdma_resume(void *handle)
1038{
1039        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1040
1041        return cik_sdma_hw_init(adev);
1042}
1043
1044static bool cik_sdma_is_idle(void *handle)
1045{
1046        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1047        u32 tmp = RREG32(mmSRBM_STATUS2);
1048
1049        if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1050                                SRBM_STATUS2__SDMA1_BUSY_MASK))
1051            return false;
1052
1053        return true;
1054}
1055
1056static int cik_sdma_wait_for_idle(void *handle)
1057{
1058        unsigned i;
1059        u32 tmp;
1060        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1061
1062        for (i = 0; i < adev->usec_timeout; i++) {
1063                tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1064                                SRBM_STATUS2__SDMA1_BUSY_MASK);
1065
1066                if (!tmp)
1067                        return 0;
1068                udelay(1);
1069        }
1070        return -ETIMEDOUT;
1071}
1072
1073static void cik_sdma_print_status(void *handle)
1074{
1075        int i, j;
1076        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1077
1078        dev_info(adev->dev, "CIK SDMA registers\n");
1079        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
1080                 RREG32(mmSRBM_STATUS2));
1081        for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
1082                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
1083                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1084                dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
1085                         i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1086                dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
1087                         i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1088                dev_info(adev->dev, "  SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
1089                         i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
1090                dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1091                         i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1092                dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
1093                         i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1094                dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
1095                         i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1096                dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
1097                         i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1098                dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
1099                         i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1100                dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1101                         i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1102                dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1103                         i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1104                dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
1105                         i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1106                dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1107                         i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1108                mutex_lock(&adev->srbm_mutex);
1109                for (j = 0; j < 16; j++) {
1110                        cik_srbm_select(adev, 0, 0, 0, j);
1111                        dev_info(adev->dev, "  VM %d:\n", j);
1112                        dev_info(adev->dev, "  SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
1113                                 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1114                        dev_info(adev->dev, "  SDMA0_GFX_APE1_CNTL=0x%08X\n",
1115                                 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1116                }
1117                cik_srbm_select(adev, 0, 0, 0, 0);
1118                mutex_unlock(&adev->srbm_mutex);
1119        }
1120}
1121
1122static int cik_sdma_soft_reset(void *handle)
1123{
1124        u32 srbm_soft_reset = 0;
1125        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1126        u32 tmp = RREG32(mmSRBM_STATUS2);
1127
1128        if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1129                /* sdma0 */
1130                tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1131                tmp |= SDMA0_F32_CNTL__HALT_MASK;
1132                WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1133                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1134        }
1135        if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1136                /* sdma1 */
1137                tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1138                tmp |= SDMA0_F32_CNTL__HALT_MASK;
1139                WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1140                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1141        }
1142
1143        if (srbm_soft_reset) {
1144                cik_sdma_print_status((void *)adev);
1145
1146                tmp = RREG32(mmSRBM_SOFT_RESET);
1147                tmp |= srbm_soft_reset;
1148                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1149                WREG32(mmSRBM_SOFT_RESET, tmp);
1150                tmp = RREG32(mmSRBM_SOFT_RESET);
1151
1152                udelay(50);
1153
1154                tmp &= ~srbm_soft_reset;
1155                WREG32(mmSRBM_SOFT_RESET, tmp);
1156                tmp = RREG32(mmSRBM_SOFT_RESET);
1157
1158                /* Wait a little for things to settle down */
1159                udelay(50);
1160
1161                cik_sdma_print_status((void *)adev);
1162        }
1163
1164        return 0;
1165}
1166
1167static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
1168                                       struct amdgpu_irq_src *src,
1169                                       unsigned type,
1170                                       enum amdgpu_interrupt_state state)
1171{
1172        u32 sdma_cntl;
1173
1174        switch (type) {
1175        case AMDGPU_SDMA_IRQ_TRAP0:
1176                switch (state) {
1177                case AMDGPU_IRQ_STATE_DISABLE:
1178                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1179                        sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1180                        WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1181                        break;
1182                case AMDGPU_IRQ_STATE_ENABLE:
1183                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1184                        sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1185                        WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1186                        break;
1187                default:
1188                        break;
1189                }
1190                break;
1191        case AMDGPU_SDMA_IRQ_TRAP1:
1192                switch (state) {
1193                case AMDGPU_IRQ_STATE_DISABLE:
1194                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1195                        sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1196                        WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1197                        break;
1198                case AMDGPU_IRQ_STATE_ENABLE:
1199                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1200                        sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1201                        WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1202                        break;
1203                default:
1204                        break;
1205                }
1206                break;
1207        default:
1208                break;
1209        }
1210        return 0;
1211}
1212
1213static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1214                                     struct amdgpu_irq_src *source,
1215                                     struct amdgpu_iv_entry *entry)
1216{
1217        u8 instance_id, queue_id;
1218
1219        instance_id = (entry->ring_id & 0x3) >> 0;
1220        queue_id = (entry->ring_id & 0xc) >> 2;
1221        DRM_DEBUG("IH: SDMA trap\n");
1222        switch (instance_id) {
1223        case 0:
1224                switch (queue_id) {
1225                case 0:
1226                        amdgpu_fence_process(&adev->sdma[0].ring);
1227                        break;
1228                case 1:
1229                        /* XXX compute */
1230                        break;
1231                case 2:
1232                        /* XXX compute */
1233                        break;
1234                }
1235                break;
1236        case 1:
1237                switch (queue_id) {
1238                case 0:
1239                        amdgpu_fence_process(&adev->sdma[1].ring);
1240                        break;
1241                case 1:
1242                        /* XXX compute */
1243                        break;
1244                case 2:
1245                        /* XXX compute */
1246                        break;
1247                }
1248                break;
1249        }
1250
1251        return 0;
1252}
1253
1254static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
1255                                             struct amdgpu_irq_src *source,
1256                                             struct amdgpu_iv_entry *entry)
1257{
1258        DRM_ERROR("Illegal instruction in SDMA command stream\n");
1259        schedule_work(&adev->reset_work);
1260        return 0;
1261}
1262
1263static int cik_sdma_set_clockgating_state(void *handle,
1264                                          enum amd_clockgating_state state)
1265{
1266        bool gate = false;
1267        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1268
1269        if (state == AMD_CG_STATE_GATE)
1270                gate = true;
1271
1272        cik_enable_sdma_mgcg(adev, gate);
1273        cik_enable_sdma_mgls(adev, gate);
1274
1275        return 0;
1276}
1277
1278static int cik_sdma_set_powergating_state(void *handle,
1279                                          enum amd_powergating_state state)
1280{
1281        return 0;
1282}
1283
1284const struct amd_ip_funcs cik_sdma_ip_funcs = {
1285        .early_init = cik_sdma_early_init,
1286        .late_init = NULL,
1287        .sw_init = cik_sdma_sw_init,
1288        .sw_fini = cik_sdma_sw_fini,
1289        .hw_init = cik_sdma_hw_init,
1290        .hw_fini = cik_sdma_hw_fini,
1291        .suspend = cik_sdma_suspend,
1292        .resume = cik_sdma_resume,
1293        .is_idle = cik_sdma_is_idle,
1294        .wait_for_idle = cik_sdma_wait_for_idle,
1295        .soft_reset = cik_sdma_soft_reset,
1296        .print_status = cik_sdma_print_status,
1297        .set_clockgating_state = cik_sdma_set_clockgating_state,
1298        .set_powergating_state = cik_sdma_set_powergating_state,
1299};
1300
1301/**
1302 * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up
1303 *
1304 * @ring: amdgpu_ring structure holding ring information
1305 *
1306 * Check if the async DMA engine is locked up (CIK).
1307 * Returns true if the engine appears to be locked up, false if not.
1308 */
1309static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring)
1310{
1311
1312        if (cik_sdma_is_idle(ring->adev)) {
1313                amdgpu_ring_lockup_update(ring);
1314                return false;
1315        }
1316        return amdgpu_ring_test_lockup(ring);
1317}
1318
1319static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1320        .get_rptr = cik_sdma_ring_get_rptr,
1321        .get_wptr = cik_sdma_ring_get_wptr,
1322        .set_wptr = cik_sdma_ring_set_wptr,
1323        .parse_cs = NULL,
1324        .emit_ib = cik_sdma_ring_emit_ib,
1325        .emit_fence = cik_sdma_ring_emit_fence,
1326        .emit_semaphore = cik_sdma_ring_emit_semaphore,
1327        .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
1328        .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
1329        .test_ring = cik_sdma_ring_test_ring,
1330        .test_ib = cik_sdma_ring_test_ib,
1331        .is_lockup = cik_sdma_ring_is_lockup,
1332        .insert_nop = cik_sdma_ring_insert_nop,
1333};
1334
1335static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1336{
1337        adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
1338        adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
1339}
1340
1341static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
1342        .set = cik_sdma_set_trap_irq_state,
1343        .process = cik_sdma_process_trap_irq,
1344};
1345
1346static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
1347        .process = cik_sdma_process_illegal_inst_irq,
1348};
1349
1350static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
1351{
1352        adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1353        adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
1354        adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
1355}
1356
1357/**
1358 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
1359 *
1360 * @ring: amdgpu_ring structure holding ring information
1361 * @src_offset: src GPU address
1362 * @dst_offset: dst GPU address
1363 * @byte_count: number of bytes to xfer
1364 *
1365 * Copy GPU buffers using the DMA engine (CIK).
1366 * Used by the amdgpu ttm implementation to move pages if
1367 * registered as the asic copy callback.
1368 */
1369static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
1370                                      uint64_t src_offset,
1371                                      uint64_t dst_offset,
1372                                      uint32_t byte_count)
1373{
1374        ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1375        ib->ptr[ib->length_dw++] = byte_count;
1376        ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1377        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1378        ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1379        ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1380        ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1381}
1382
1383/**
1384 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
1385 *
1386 * @ring: amdgpu_ring structure holding ring information
1387 * @src_data: value to write to buffer
1388 * @dst_offset: dst GPU address
1389 * @byte_count: number of bytes to xfer
1390 *
1391 * Fill GPU buffers using the DMA engine (CIK).
1392 */
1393static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
1394                                      uint32_t src_data,
1395                                      uint64_t dst_offset,
1396                                      uint32_t byte_count)
1397{
1398        ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
1399        ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1400        ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1401        ib->ptr[ib->length_dw++] = src_data;
1402        ib->ptr[ib->length_dw++] = byte_count;
1403}
1404
1405static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
1406        .copy_max_bytes = 0x1fffff,
1407        .copy_num_dw = 7,
1408        .emit_copy_buffer = cik_sdma_emit_copy_buffer,
1409
1410        .fill_max_bytes = 0x1fffff,
1411        .fill_num_dw = 5,
1412        .emit_fill_buffer = cik_sdma_emit_fill_buffer,
1413};
1414
1415static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
1416{
1417        if (adev->mman.buffer_funcs == NULL) {
1418                adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
1419                adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
1420        }
1421}
1422
1423static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
1424        .copy_pte = cik_sdma_vm_copy_pte,
1425        .write_pte = cik_sdma_vm_write_pte,
1426        .set_pte_pde = cik_sdma_vm_set_pte_pde,
1427        .pad_ib = cik_sdma_vm_pad_ib,
1428};
1429
1430static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1431{
1432        if (adev->vm_manager.vm_pte_funcs == NULL) {
1433                adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
1434                adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
1435                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1436        }
1437}
1438