linux/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27#include "amdgpu_ucode.h"
  28#include "amdgpu_trace.h"
  29#include "cikd.h"
  30#include "cik.h"
  31
  32#include "bif/bif_4_1_d.h"
  33#include "bif/bif_4_1_sh_mask.h"
  34
  35#include "gca/gfx_7_2_d.h"
  36#include "gca/gfx_7_2_enum.h"
  37#include "gca/gfx_7_2_sh_mask.h"
  38
  39#include "gmc/gmc_7_1_d.h"
  40#include "gmc/gmc_7_1_sh_mask.h"
  41
  42#include "oss/oss_2_0_d.h"
  43#include "oss/oss_2_0_sh_mask.h"
  44
  45static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  46{
  47        SDMA0_REGISTER_OFFSET,
  48        SDMA1_REGISTER_OFFSET
  49};
  50
  51static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
  52static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
  53static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
  54static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
  55
  56MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
  57MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
  58MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
  59MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
  60MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
  61MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
  62MODULE_FIRMWARE("radeon/kabini_sdma.bin");
  63MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
  64MODULE_FIRMWARE("radeon/mullins_sdma.bin");
  65MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
  66
  67u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
  68
  69/*
  70 * sDMA - System DMA
  71 * Starting with CIK, the GPU has new asynchronous
  72 * DMA engines.  These engines are used for compute
  73 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  74 * and each one supports 1 ring buffer used for gfx
  75 * and 2 queues used for compute.
  76 *
  77 * The programming model is very similar to the CP
  78 * (ring buffer, IBs, etc.), but sDMA has it's own
  79 * packet format that is different from the PM4 format
  80 * used by the CP. sDMA supports copying data, writing
  81 * embedded data, solid fills, and a number of other
  82 * things.  It also has support for tiling/detiling of
  83 * buffers.
  84 */
  85
  86/**
  87 * cik_sdma_init_microcode - load ucode images from disk
  88 *
  89 * @adev: amdgpu_device pointer
  90 *
  91 * Use the firmware interface to load the ucode images into
  92 * the driver (not loaded into hw).
  93 * Returns 0 on success, error on failure.
  94 */
  95static int cik_sdma_init_microcode(struct amdgpu_device *adev)
  96{
  97        const char *chip_name;
  98        char fw_name[30];
  99        int err = 0, i;
 100
 101        DRM_DEBUG("\n");
 102
 103        switch (adev->asic_type) {
 104        case CHIP_BONAIRE:
 105                chip_name = "bonaire";
 106                break;
 107        case CHIP_HAWAII:
 108                chip_name = "hawaii";
 109                break;
 110        case CHIP_KAVERI:
 111                chip_name = "kaveri";
 112                break;
 113        case CHIP_KABINI:
 114                chip_name = "kabini";
 115                break;
 116        case CHIP_MULLINS:
 117                chip_name = "mullins";
 118                break;
 119        default: BUG();
 120        }
 121
 122        for (i = 0; i < adev->sdma.num_instances; i++) {
 123                if (i == 0)
 124                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
 125                else
 126                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
 127                err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
 128                if (err)
 129                        goto out;
 130                err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
 131        }
 132out:
 133        if (err) {
 134                printk(KERN_ERR
 135                       "cik_sdma: Failed to load firmware \"%s\"\n",
 136                       fw_name);
 137                for (i = 0; i < adev->sdma.num_instances; i++) {
 138                        release_firmware(adev->sdma.instance[i].fw);
 139                        adev->sdma.instance[i].fw = NULL;
 140                }
 141        }
 142        return err;
 143}
 144
 145/**
 146 * cik_sdma_ring_get_rptr - get the current read pointer
 147 *
 148 * @ring: amdgpu ring pointer
 149 *
 150 * Get the current rptr from the hardware (CIK+).
 151 */
 152static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
 153{
 154        u32 rptr;
 155
 156        rptr = ring->adev->wb.wb[ring->rptr_offs];
 157
 158        return (rptr & 0x3fffc) >> 2;
 159}
 160
 161/**
 162 * cik_sdma_ring_get_wptr - get the current write pointer
 163 *
 164 * @ring: amdgpu ring pointer
 165 *
 166 * Get the current wptr from the hardware (CIK+).
 167 */
 168static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 169{
 170        struct amdgpu_device *adev = ring->adev;
 171        u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 172
 173        return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
 174}
 175
 176/**
 177 * cik_sdma_ring_set_wptr - commit the write pointer
 178 *
 179 * @ring: amdgpu ring pointer
 180 *
 181 * Write the wptr back to the hardware (CIK+).
 182 */
 183static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 184{
 185        struct amdgpu_device *adev = ring->adev;
 186        u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 187
 188        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 189}
 190
 191static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 192{
 193        struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
 194        int i;
 195
 196        for (i = 0; i < count; i++)
 197                if (sdma && sdma->burst_nop && (i == 0))
 198                        amdgpu_ring_write(ring, ring->nop |
 199                                          SDMA_NOP_COUNT(count - 1));
 200                else
 201                        amdgpu_ring_write(ring, ring->nop);
 202}
 203
 204/**
 205 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
 206 *
 207 * @ring: amdgpu ring pointer
 208 * @ib: IB object to schedule
 209 *
 210 * Schedule an IB in the DMA ring (CIK).
 211 */
 212static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
 213                           struct amdgpu_ib *ib)
 214{
 215        u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
 216        u32 next_rptr = ring->wptr + 5;
 217
 218        while ((next_rptr & 7) != 4)
 219                next_rptr++;
 220
 221        next_rptr += 4;
 222        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 223        amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
 224        amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
 225        amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 226        amdgpu_ring_write(ring, next_rptr);
 227
 228        /* IB packet must end on a 8 DW boundary */
 229        cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
 230
 231        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 232        amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 233        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
 234        amdgpu_ring_write(ring, ib->length_dw);
 235
 236}
 237
 238/**
 239 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
 240 *
 241 * @ring: amdgpu ring pointer
 242 *
 243 * Emit an hdp flush packet on the requested DMA ring.
 244 */
 245static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 246{
 247        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
 248                          SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
 249        u32 ref_and_mask;
 250
 251        if (ring == &ring->adev->sdma.instance[0].ring)
 252                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
 253        else
 254                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
 255
 256        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 257        amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
 258        amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
 259        amdgpu_ring_write(ring, ref_and_mask); /* reference */
 260        amdgpu_ring_write(ring, ref_and_mask); /* mask */
 261        amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 262}
 263
 264/**
 265 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
 266 *
 267 * @ring: amdgpu ring pointer
 268 * @fence: amdgpu fence object
 269 *
 270 * Add a DMA fence packet to the ring to write
 271 * the fence seq number and DMA trap packet to generate
 272 * an interrupt if needed (CIK).
 273 */
 274static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 275                                     unsigned flags)
 276{
 277        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
 278        /* write the fence */
 279        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 280        amdgpu_ring_write(ring, lower_32_bits(addr));
 281        amdgpu_ring_write(ring, upper_32_bits(addr));
 282        amdgpu_ring_write(ring, lower_32_bits(seq));
 283
 284        /* optionally write high bits as well */
 285        if (write64bit) {
 286                addr += 4;
 287                amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 288                amdgpu_ring_write(ring, lower_32_bits(addr));
 289                amdgpu_ring_write(ring, upper_32_bits(addr));
 290                amdgpu_ring_write(ring, upper_32_bits(seq));
 291        }
 292
 293        /* generate an interrupt */
 294        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
 295}
 296
 297/**
 298 * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
 299 *
 300 * @ring: amdgpu_ring structure holding ring information
 301 * @semaphore: amdgpu semaphore object
 302 * @emit_wait: wait or signal semaphore
 303 *
 304 * Add a DMA semaphore packet to the ring wait on or signal
 305 * other rings (CIK).
 306 */
 307static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
 308                                         struct amdgpu_semaphore *semaphore,
 309                                         bool emit_wait)
 310{
 311        u64 addr = semaphore->gpu_addr;
 312        u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
 313
 314        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
 315        amdgpu_ring_write(ring, addr & 0xfffffff8);
 316        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
 317
 318        return true;
 319}
 320
 321/**
 322 * cik_sdma_gfx_stop - stop the gfx async dma engines
 323 *
 324 * @adev: amdgpu_device pointer
 325 *
 326 * Stop the gfx async dma ring buffers (CIK).
 327 */
 328static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 329{
 330        struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
 331        struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
 332        u32 rb_cntl;
 333        int i;
 334
 335        if ((adev->mman.buffer_funcs_ring == sdma0) ||
 336            (adev->mman.buffer_funcs_ring == sdma1))
 337                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 338
 339        for (i = 0; i < adev->sdma.num_instances; i++) {
 340                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
 341                rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
 342                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 343                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
 344        }
 345        sdma0->ready = false;
 346        sdma1->ready = false;
 347}
 348
 349/**
 350 * cik_sdma_rlc_stop - stop the compute async dma engines
 351 *
 352 * @adev: amdgpu_device pointer
 353 *
 354 * Stop the compute async dma queues (CIK).
 355 */
 356static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
 357{
 358        /* XXX todo */
 359}
 360
 361/**
 362 * cik_sdma_enable - stop the async dma engines
 363 *
 364 * @adev: amdgpu_device pointer
 365 * @enable: enable/disable the DMA MEs.
 366 *
 367 * Halt or unhalt the async dma engines (CIK).
 368 */
 369static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
 370{
 371        u32 me_cntl;
 372        int i;
 373
 374        if (enable == false) {
 375                cik_sdma_gfx_stop(adev);
 376                cik_sdma_rlc_stop(adev);
 377        }
 378
 379        for (i = 0; i < adev->sdma.num_instances; i++) {
 380                me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
 381                if (enable)
 382                        me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
 383                else
 384                        me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
 385                WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
 386        }
 387}
 388
 389/**
 390 * cik_sdma_gfx_resume - setup and start the async dma engines
 391 *
 392 * @adev: amdgpu_device pointer
 393 *
 394 * Set up the gfx DMA ring buffers and enable them (CIK).
 395 * Returns 0 for success, error for failure.
 396 */
 397static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
 398{
 399        struct amdgpu_ring *ring;
 400        u32 rb_cntl, ib_cntl;
 401        u32 rb_bufsz;
 402        u32 wb_offset;
 403        int i, j, r;
 404
 405        for (i = 0; i < adev->sdma.num_instances; i++) {
 406                ring = &adev->sdma.instance[i].ring;
 407                wb_offset = (ring->rptr_offs * 4);
 408
 409                mutex_lock(&adev->srbm_mutex);
 410                for (j = 0; j < 16; j++) {
 411                        cik_srbm_select(adev, 0, 0, 0, j);
 412                        /* SDMA GFX */
 413                        WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
 414                        WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
 415                        /* XXX SDMA RLC - todo */
 416                }
 417                cik_srbm_select(adev, 0, 0, 0, 0);
 418                mutex_unlock(&adev->srbm_mutex);
 419
 420                WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
 421                WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
 422
 423                /* Set ring buffer size in dwords */
 424                rb_bufsz = order_base_2(ring->ring_size / 4);
 425                rb_cntl = rb_bufsz << 1;
 426#ifdef __BIG_ENDIAN
 427                rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
 428                        SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
 429#endif
 430                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
 431
 432                /* Initialize the ring buffer's read and write pointers */
 433                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
 434                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
 435
 436                /* set the wb address whether it's enabled or not */
 437                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
 438                       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 439                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
 440                       ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
 441
 442                rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
 443
 444                WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
 445                WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
 446
 447                ring->wptr = 0;
 448                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
 449
 450                /* enable DMA RB */
 451                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
 452                       rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
 453
 454                ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
 455#ifdef __BIG_ENDIAN
 456                ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
 457#endif
 458                /* enable DMA IBs */
 459                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 460
 461                ring->ready = true;
 462
 463                r = amdgpu_ring_test_ring(ring);
 464                if (r) {
 465                        ring->ready = false;
 466                        return r;
 467                }
 468
 469                if (adev->mman.buffer_funcs_ring == ring)
 470                        amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
 471        }
 472
 473        return 0;
 474}
 475
 476/**
 477 * cik_sdma_rlc_resume - setup and start the async dma engines
 478 *
 479 * @adev: amdgpu_device pointer
 480 *
 481 * Set up the compute DMA queues and enable them (CIK).
 482 * Returns 0 for success, error for failure.
 483 */
 484static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
 485{
 486        /* XXX todo */
 487        return 0;
 488}
 489
 490/**
 491 * cik_sdma_load_microcode - load the sDMA ME ucode
 492 *
 493 * @adev: amdgpu_device pointer
 494 *
 495 * Loads the sDMA0/1 ucode.
 496 * Returns 0 for success, -EINVAL if the ucode is not available.
 497 */
 498static int cik_sdma_load_microcode(struct amdgpu_device *adev)
 499{
 500        const struct sdma_firmware_header_v1_0 *hdr;
 501        const __le32 *fw_data;
 502        u32 fw_size;
 503        int i, j;
 504
 505        /* halt the MEs */
 506        cik_sdma_enable(adev, false);
 507
 508        for (i = 0; i < adev->sdma.num_instances; i++) {
 509                if (!adev->sdma.instance[i].fw)
 510                        return -EINVAL;
 511                hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
 512                amdgpu_ucode_print_sdma_hdr(&hdr->header);
 513                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 514                adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 515                adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 516                if (adev->sdma.instance[i].feature_version >= 20)
 517                        adev->sdma.instance[i].burst_nop = true;
 518                fw_data = (const __le32 *)
 519                        (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 520                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
 521                for (j = 0; j < fw_size; j++)
 522                        WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
 523                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
 524        }
 525
 526        return 0;
 527}
 528
 529/**
 530 * cik_sdma_start - setup and start the async dma engines
 531 *
 532 * @adev: amdgpu_device pointer
 533 *
 534 * Set up the DMA engines and enable them (CIK).
 535 * Returns 0 for success, error for failure.
 536 */
 537static int cik_sdma_start(struct amdgpu_device *adev)
 538{
 539        int r;
 540
 541        r = cik_sdma_load_microcode(adev);
 542        if (r)
 543                return r;
 544
 545        /* unhalt the MEs */
 546        cik_sdma_enable(adev, true);
 547
 548        /* start the gfx rings and rlc compute queues */
 549        r = cik_sdma_gfx_resume(adev);
 550        if (r)
 551                return r;
 552        r = cik_sdma_rlc_resume(adev);
 553        if (r)
 554                return r;
 555
 556        return 0;
 557}
 558
 559/**
 560 * cik_sdma_ring_test_ring - simple async dma engine test
 561 *
 562 * @ring: amdgpu_ring structure holding ring information
 563 *
 564 * Test the DMA engine by writing using it to write an
 565 * value to memory. (CIK).
 566 * Returns 0 for success, error for failure.
 567 */
 568static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
 569{
 570        struct amdgpu_device *adev = ring->adev;
 571        unsigned i;
 572        unsigned index;
 573        int r;
 574        u32 tmp;
 575        u64 gpu_addr;
 576
 577        r = amdgpu_wb_get(adev, &index);
 578        if (r) {
 579                dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 580                return r;
 581        }
 582
 583        gpu_addr = adev->wb.gpu_addr + (index * 4);
 584        tmp = 0xCAFEDEAD;
 585        adev->wb.wb[index] = cpu_to_le32(tmp);
 586
 587        r = amdgpu_ring_lock(ring, 5);
 588        if (r) {
 589                DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
 590                amdgpu_wb_free(adev, index);
 591                return r;
 592        }
 593        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 594        amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
 595        amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
 596        amdgpu_ring_write(ring, 1); /* number of DWs to follow */
 597        amdgpu_ring_write(ring, 0xDEADBEEF);
 598        amdgpu_ring_unlock_commit(ring);
 599
 600        for (i = 0; i < adev->usec_timeout; i++) {
 601                tmp = le32_to_cpu(adev->wb.wb[index]);
 602                if (tmp == 0xDEADBEEF)
 603                        break;
 604                DRM_UDELAY(1);
 605        }
 606
 607        if (i < adev->usec_timeout) {
 608                DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 609        } else {
 610                DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 611                          ring->idx, tmp);
 612                r = -EINVAL;
 613        }
 614        amdgpu_wb_free(adev, index);
 615
 616        return r;
 617}
 618
 619/**
 620 * cik_sdma_ring_test_ib - test an IB on the DMA engine
 621 *
 622 * @ring: amdgpu_ring structure holding ring information
 623 *
 624 * Test a simple IB in the DMA ring (CIK).
 625 * Returns 0 on success, error on failure.
 626 */
 627static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
 628{
 629        struct amdgpu_device *adev = ring->adev;
 630        struct amdgpu_ib ib;
 631        struct fence *f = NULL;
 632        unsigned i;
 633        unsigned index;
 634        int r;
 635        u32 tmp = 0;
 636        u64 gpu_addr;
 637
 638        r = amdgpu_wb_get(adev, &index);
 639        if (r) {
 640                dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
 641                return r;
 642        }
 643
 644        gpu_addr = adev->wb.gpu_addr + (index * 4);
 645        tmp = 0xCAFEDEAD;
 646        adev->wb.wb[index] = cpu_to_le32(tmp);
 647        memset(&ib, 0, sizeof(ib));
 648        r = amdgpu_ib_get(ring, NULL, 256, &ib);
 649        if (r) {
 650                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 651                goto err0;
 652        }
 653
 654        ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 655        ib.ptr[1] = lower_32_bits(gpu_addr);
 656        ib.ptr[2] = upper_32_bits(gpu_addr);
 657        ib.ptr[3] = 1;
 658        ib.ptr[4] = 0xDEADBEEF;
 659        ib.length_dw = 5;
 660        r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
 661                                                 AMDGPU_FENCE_OWNER_UNDEFINED,
 662                                                 &f);
 663        if (r)
 664                goto err1;
 665
 666        r = fence_wait(f, false);
 667        if (r) {
 668                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 669                goto err1;
 670        }
 671        for (i = 0; i < adev->usec_timeout; i++) {
 672                tmp = le32_to_cpu(adev->wb.wb[index]);
 673                if (tmp == 0xDEADBEEF)
 674                        break;
 675                DRM_UDELAY(1);
 676        }
 677        if (i < adev->usec_timeout) {
 678                DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
 679                         ring->idx, i);
 680                goto err1;
 681        } else {
 682                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 683                r = -EINVAL;
 684        }
 685
 686err1:
 687        fence_put(f);
 688        amdgpu_ib_free(adev, &ib);
 689err0:
 690        amdgpu_wb_free(adev, index);
 691        return r;
 692}
 693
 694/**
 695 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
 696 *
 697 * @ib: indirect buffer to fill with commands
 698 * @pe: addr of the page entry
 699 * @src: src addr to copy from
 700 * @count: number of page entries to update
 701 *
 702 * Update PTEs by copying them from the GART using sDMA (CIK).
 703 */
 704static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
 705                                 uint64_t pe, uint64_t src,
 706                                 unsigned count)
 707{
 708        while (count) {
 709                unsigned bytes = count * 8;
 710                if (bytes > 0x1FFFF8)
 711                        bytes = 0x1FFFF8;
 712
 713                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
 714                        SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 715                ib->ptr[ib->length_dw++] = bytes;
 716                ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 717                ib->ptr[ib->length_dw++] = lower_32_bits(src);
 718                ib->ptr[ib->length_dw++] = upper_32_bits(src);
 719                ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 720                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 721
 722                pe += bytes;
 723                src += bytes;
 724                count -= bytes / 8;
 725        }
 726}
 727
 728/**
 729 * cik_sdma_vm_write_pages - update PTEs by writing them manually
 730 *
 731 * @ib: indirect buffer to fill with commands
 732 * @pe: addr of the page entry
 733 * @addr: dst addr to write into pe
 734 * @count: number of page entries to update
 735 * @incr: increase next addr by incr bytes
 736 * @flags: access flags
 737 *
 738 * Update PTEs by writing them manually using sDMA (CIK).
 739 */
 740static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
 741                                  uint64_t pe,
 742                                  uint64_t addr, unsigned count,
 743                                  uint32_t incr, uint32_t flags)
 744{
 745        uint64_t value;
 746        unsigned ndw;
 747
 748        while (count) {
 749                ndw = count * 2;
 750                if (ndw > 0xFFFFE)
 751                        ndw = 0xFFFFE;
 752
 753                /* for non-physically contiguous pages (system) */
 754                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 755                        SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 756                ib->ptr[ib->length_dw++] = pe;
 757                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 758                ib->ptr[ib->length_dw++] = ndw;
 759                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
 760                        if (flags & AMDGPU_PTE_SYSTEM) {
 761                                value = amdgpu_vm_map_gart(ib->ring->adev, addr);
 762                                value &= 0xFFFFFFFFFFFFF000ULL;
 763                        } else if (flags & AMDGPU_PTE_VALID) {
 764                                value = addr;
 765                        } else {
 766                                value = 0;
 767                        }
 768                        addr += incr;
 769                        value |= flags;
 770                        ib->ptr[ib->length_dw++] = value;
 771                        ib->ptr[ib->length_dw++] = upper_32_bits(value);
 772                }
 773        }
 774}
 775
 776/**
 777 * cik_sdma_vm_set_pages - update the page tables using sDMA
 778 *
 779 * @ib: indirect buffer to fill with commands
 780 * @pe: addr of the page entry
 781 * @addr: dst addr to write into pe
 782 * @count: number of page entries to update
 783 * @incr: increase next addr by incr bytes
 784 * @flags: access flags
 785 *
 786 * Update the page tables using sDMA (CIK).
 787 */
 788static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
 789                                    uint64_t pe,
 790                                    uint64_t addr, unsigned count,
 791                                    uint32_t incr, uint32_t flags)
 792{
 793        uint64_t value;
 794        unsigned ndw;
 795
 796        while (count) {
 797                ndw = count;
 798                if (ndw > 0x7FFFF)
 799                        ndw = 0x7FFFF;
 800
 801                if (flags & AMDGPU_PTE_VALID)
 802                        value = addr;
 803                else
 804                        value = 0;
 805
 806                /* for physically contiguous pages (vram) */
 807                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
 808                ib->ptr[ib->length_dw++] = pe; /* dst addr */
 809                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 810                ib->ptr[ib->length_dw++] = flags; /* mask */
 811                ib->ptr[ib->length_dw++] = 0;
 812                ib->ptr[ib->length_dw++] = value; /* value */
 813                ib->ptr[ib->length_dw++] = upper_32_bits(value);
 814                ib->ptr[ib->length_dw++] = incr; /* increment size */
 815                ib->ptr[ib->length_dw++] = 0;
 816                ib->ptr[ib->length_dw++] = ndw; /* number of entries */
 817
 818                pe += ndw * 8;
 819                addr += ndw * incr;
 820                count -= ndw;
 821        }
 822}
 823
 824/**
 825 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
 826 *
 827 * @ib: indirect buffer to fill with padding
 828 *
 829 */
 830static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 831{
 832        struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
 833        u32 pad_count;
 834        int i;
 835
 836        pad_count = (8 - (ib->length_dw & 0x7)) % 8;
 837        for (i = 0; i < pad_count; i++)
 838                if (sdma && sdma->burst_nop && (i == 0))
 839                        ib->ptr[ib->length_dw++] =
 840                                        SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
 841                                        SDMA_NOP_COUNT(pad_count - 1);
 842                else
 843                        ib->ptr[ib->length_dw++] =
 844                                        SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 845}
 846
 847/**
 848 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
 849 *
 850 * @ring: amdgpu_ring pointer
 851 * @vm: amdgpu_vm pointer
 852 *
 853 * Update the page table base and flush the VM TLB
 854 * using sDMA (CIK).
 855 */
 856static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
 857                                        unsigned vm_id, uint64_t pd_addr)
 858{
 859        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
 860                          SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
 861
 862        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 863        if (vm_id < 8) {
 864                amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
 865        } else {
 866                amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
 867        }
 868        amdgpu_ring_write(ring, pd_addr >> 12);
 869
 870        /* flush TLB */
 871        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 872        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
 873        amdgpu_ring_write(ring, 1 << vm_id);
 874
 875        amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 876        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 877        amdgpu_ring_write(ring, 0);
 878        amdgpu_ring_write(ring, 0); /* reference */
 879        amdgpu_ring_write(ring, 0); /* mask */
 880        amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 881}
 882
 883static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
 884                                 bool enable)
 885{
 886        u32 orig, data;
 887
 888        if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
 889                WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
 890                WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
 891        } else {
 892                orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
 893                data |= 0xff000000;
 894                if (data != orig)
 895                        WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
 896
 897                orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
 898                data |= 0xff000000;
 899                if (data != orig)
 900                        WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
 901        }
 902}
 903
 904static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
 905                                 bool enable)
 906{
 907        u32 orig, data;
 908
 909        if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
 910                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 911                data |= 0x100;
 912                if (orig != data)
 913                        WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 914
 915                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 916                data |= 0x100;
 917                if (orig != data)
 918                        WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 919        } else {
 920                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
 921                data &= ~0x100;
 922                if (orig != data)
 923                        WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
 924
 925                orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
 926                data &= ~0x100;
 927                if (orig != data)
 928                        WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
 929        }
 930}
 931
 932static int cik_sdma_early_init(void *handle)
 933{
 934        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 935
 936        adev->sdma.num_instances = SDMA_MAX_INSTANCE;
 937
 938        cik_sdma_set_ring_funcs(adev);
 939        cik_sdma_set_irq_funcs(adev);
 940        cik_sdma_set_buffer_funcs(adev);
 941        cik_sdma_set_vm_pte_funcs(adev);
 942
 943        return 0;
 944}
 945
 946static int cik_sdma_sw_init(void *handle)
 947{
 948        struct amdgpu_ring *ring;
 949        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 950        int r, i;
 951
 952        r = cik_sdma_init_microcode(adev);
 953        if (r) {
 954                DRM_ERROR("Failed to load sdma firmware!\n");
 955                return r;
 956        }
 957
 958        /* SDMA trap event */
 959        r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
 960        if (r)
 961                return r;
 962
 963        /* SDMA Privileged inst */
 964        r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
 965        if (r)
 966                return r;
 967
 968        /* SDMA Privileged inst */
 969        r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
 970        if (r)
 971                return r;
 972
 973        for (i = 0; i < adev->sdma.num_instances; i++) {
 974                ring = &adev->sdma.instance[i].ring;
 975                ring->ring_obj = NULL;
 976                sprintf(ring->name, "sdma%d", i);
 977                r = amdgpu_ring_init(adev, ring, 256 * 1024,
 978                                     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
 979                                     &adev->sdma.trap_irq,
 980                                     (i == 0) ?
 981                                     AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
 982                                     AMDGPU_RING_TYPE_SDMA);
 983                if (r)
 984                        return r;
 985        }
 986
 987        return r;
 988}
 989
 990static int cik_sdma_sw_fini(void *handle)
 991{
 992        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 993        int i;
 994
 995        for (i = 0; i < adev->sdma.num_instances; i++)
 996                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 997
 998        return 0;
 999}
1000
1001static int cik_sdma_hw_init(void *handle)
1002{
1003        int r;
1004        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1005
1006        r = cik_sdma_start(adev);
1007        if (r)
1008                return r;
1009
1010        return r;
1011}
1012
1013static int cik_sdma_hw_fini(void *handle)
1014{
1015        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1016
1017        cik_sdma_enable(adev, false);
1018
1019        return 0;
1020}
1021
1022static int cik_sdma_suspend(void *handle)
1023{
1024        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1025
1026        return cik_sdma_hw_fini(adev);
1027}
1028
1029static int cik_sdma_resume(void *handle)
1030{
1031        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032
1033        return cik_sdma_hw_init(adev);
1034}
1035
1036static bool cik_sdma_is_idle(void *handle)
1037{
1038        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1039        u32 tmp = RREG32(mmSRBM_STATUS2);
1040
1041        if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
1042                                SRBM_STATUS2__SDMA1_BUSY_MASK))
1043            return false;
1044
1045        return true;
1046}
1047
1048static int cik_sdma_wait_for_idle(void *handle)
1049{
1050        unsigned i;
1051        u32 tmp;
1052        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1053
1054        for (i = 0; i < adev->usec_timeout; i++) {
1055                tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
1056                                SRBM_STATUS2__SDMA1_BUSY_MASK);
1057
1058                if (!tmp)
1059                        return 0;
1060                udelay(1);
1061        }
1062        return -ETIMEDOUT;
1063}
1064
1065static void cik_sdma_print_status(void *handle)
1066{
1067        int i, j;
1068        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1069
1070        dev_info(adev->dev, "CIK SDMA registers\n");
1071        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
1072                 RREG32(mmSRBM_STATUS2));
1073        for (i = 0; i < adev->sdma.num_instances; i++) {
1074                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
1075                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
1076                dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
1077                         i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
1078                dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
1079                         i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
1080                dev_info(adev->dev, "  SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
1081                         i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
1082                dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1083                         i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
1084                dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
1085                         i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
1086                dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
1087                         i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
1088                dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
1089                         i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
1090                dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
1091                         i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
1092                dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1093                         i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
1094                dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1095                         i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
1096                dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
1097                         i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
1098                dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1099                         i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
1100                mutex_lock(&adev->srbm_mutex);
1101                for (j = 0; j < 16; j++) {
1102                        cik_srbm_select(adev, 0, 0, 0, j);
1103                        dev_info(adev->dev, "  VM %d:\n", j);
1104                        dev_info(adev->dev, "  SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
1105                                 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
1106                        dev_info(adev->dev, "  SDMA0_GFX_APE1_CNTL=0x%08X\n",
1107                                 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
1108                }
1109                cik_srbm_select(adev, 0, 0, 0, 0);
1110                mutex_unlock(&adev->srbm_mutex);
1111        }
1112}
1113
1114static int cik_sdma_soft_reset(void *handle)
1115{
1116        u32 srbm_soft_reset = 0;
1117        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1118        u32 tmp = RREG32(mmSRBM_STATUS2);
1119
1120        if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
1121                /* sdma0 */
1122                tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
1123                tmp |= SDMA0_F32_CNTL__HALT_MASK;
1124                WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
1125                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
1126        }
1127        if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
1128                /* sdma1 */
1129                tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
1130                tmp |= SDMA0_F32_CNTL__HALT_MASK;
1131                WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
1132                srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
1133        }
1134
1135        if (srbm_soft_reset) {
1136                cik_sdma_print_status((void *)adev);
1137
1138                tmp = RREG32(mmSRBM_SOFT_RESET);
1139                tmp |= srbm_soft_reset;
1140                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1141                WREG32(mmSRBM_SOFT_RESET, tmp);
1142                tmp = RREG32(mmSRBM_SOFT_RESET);
1143
1144                udelay(50);
1145
1146                tmp &= ~srbm_soft_reset;
1147                WREG32(mmSRBM_SOFT_RESET, tmp);
1148                tmp = RREG32(mmSRBM_SOFT_RESET);
1149
1150                /* Wait a little for things to settle down */
1151                udelay(50);
1152
1153                cik_sdma_print_status((void *)adev);
1154        }
1155
1156        return 0;
1157}
1158
1159static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
1160                                       struct amdgpu_irq_src *src,
1161                                       unsigned type,
1162                                       enum amdgpu_interrupt_state state)
1163{
1164        u32 sdma_cntl;
1165
1166        switch (type) {
1167        case AMDGPU_SDMA_IRQ_TRAP0:
1168                switch (state) {
1169                case AMDGPU_IRQ_STATE_DISABLE:
1170                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1171                        sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1172                        WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1173                        break;
1174                case AMDGPU_IRQ_STATE_ENABLE:
1175                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1176                        sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1177                        WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1178                        break;
1179                default:
1180                        break;
1181                }
1182                break;
1183        case AMDGPU_SDMA_IRQ_TRAP1:
1184                switch (state) {
1185                case AMDGPU_IRQ_STATE_DISABLE:
1186                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1187                        sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
1188                        WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1189                        break;
1190                case AMDGPU_IRQ_STATE_ENABLE:
1191                        sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1192                        sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
1193                        WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1194                        break;
1195                default:
1196                        break;
1197                }
1198                break;
1199        default:
1200                break;
1201        }
1202        return 0;
1203}
1204
1205static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
1206                                     struct amdgpu_irq_src *source,
1207                                     struct amdgpu_iv_entry *entry)
1208{
1209        u8 instance_id, queue_id;
1210
1211        instance_id = (entry->ring_id & 0x3) >> 0;
1212        queue_id = (entry->ring_id & 0xc) >> 2;
1213        DRM_DEBUG("IH: SDMA trap\n");
1214        switch (instance_id) {
1215        case 0:
1216                switch (queue_id) {
1217                case 0:
1218                        amdgpu_fence_process(&adev->sdma.instance[0].ring);
1219                        break;
1220                case 1:
1221                        /* XXX compute */
1222                        break;
1223                case 2:
1224                        /* XXX compute */
1225                        break;
1226                }
1227                break;
1228        case 1:
1229                switch (queue_id) {
1230                case 0:
1231                        amdgpu_fence_process(&adev->sdma.instance[1].ring);
1232                        break;
1233                case 1:
1234                        /* XXX compute */
1235                        break;
1236                case 2:
1237                        /* XXX compute */
1238                        break;
1239                }
1240                break;
1241        }
1242
1243        return 0;
1244}
1245
1246static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
1247                                             struct amdgpu_irq_src *source,
1248                                             struct amdgpu_iv_entry *entry)
1249{
1250        DRM_ERROR("Illegal instruction in SDMA command stream\n");
1251        schedule_work(&adev->reset_work);
1252        return 0;
1253}
1254
1255static int cik_sdma_set_clockgating_state(void *handle,
1256                                          enum amd_clockgating_state state)
1257{
1258        bool gate = false;
1259        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1260
1261        if (state == AMD_CG_STATE_GATE)
1262                gate = true;
1263
1264        cik_enable_sdma_mgcg(adev, gate);
1265        cik_enable_sdma_mgls(adev, gate);
1266
1267        return 0;
1268}
1269
1270static int cik_sdma_set_powergating_state(void *handle,
1271                                          enum amd_powergating_state state)
1272{
1273        return 0;
1274}
1275
1276const struct amd_ip_funcs cik_sdma_ip_funcs = {
1277        .early_init = cik_sdma_early_init,
1278        .late_init = NULL,
1279        .sw_init = cik_sdma_sw_init,
1280        .sw_fini = cik_sdma_sw_fini,
1281        .hw_init = cik_sdma_hw_init,
1282        .hw_fini = cik_sdma_hw_fini,
1283        .suspend = cik_sdma_suspend,
1284        .resume = cik_sdma_resume,
1285        .is_idle = cik_sdma_is_idle,
1286        .wait_for_idle = cik_sdma_wait_for_idle,
1287        .soft_reset = cik_sdma_soft_reset,
1288        .print_status = cik_sdma_print_status,
1289        .set_clockgating_state = cik_sdma_set_clockgating_state,
1290        .set_powergating_state = cik_sdma_set_powergating_state,
1291};
1292
1293static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
1294        .get_rptr = cik_sdma_ring_get_rptr,
1295        .get_wptr = cik_sdma_ring_get_wptr,
1296        .set_wptr = cik_sdma_ring_set_wptr,
1297        .parse_cs = NULL,
1298        .emit_ib = cik_sdma_ring_emit_ib,
1299        .emit_fence = cik_sdma_ring_emit_fence,
1300        .emit_semaphore = cik_sdma_ring_emit_semaphore,
1301        .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
1302        .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
1303        .test_ring = cik_sdma_ring_test_ring,
1304        .test_ib = cik_sdma_ring_test_ib,
1305        .insert_nop = cik_sdma_ring_insert_nop,
1306};
1307
1308static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
1309{
1310        int i;
1311
1312        for (i = 0; i < adev->sdma.num_instances; i++)
1313                adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
1314}
1315
1316static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
1317        .set = cik_sdma_set_trap_irq_state,
1318        .process = cik_sdma_process_trap_irq,
1319};
1320
1321static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
1322        .process = cik_sdma_process_illegal_inst_irq,
1323};
1324
1325static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
1326{
1327        adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1328        adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
1329        adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
1330}
1331
1332/**
1333 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
1334 *
1335 * @ring: amdgpu_ring structure holding ring information
1336 * @src_offset: src GPU address
1337 * @dst_offset: dst GPU address
1338 * @byte_count: number of bytes to xfer
1339 *
1340 * Copy GPU buffers using the DMA engine (CIK).
1341 * Used by the amdgpu ttm implementation to move pages if
1342 * registered as the asic copy callback.
1343 */
1344static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
1345                                      uint64_t src_offset,
1346                                      uint64_t dst_offset,
1347                                      uint32_t byte_count)
1348{
1349        ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1350        ib->ptr[ib->length_dw++] = byte_count;
1351        ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1352        ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1353        ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1354        ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1355        ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1356}
1357
1358/**
1359 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
1360 *
1361 * @ring: amdgpu_ring structure holding ring information
1362 * @src_data: value to write to buffer
1363 * @dst_offset: dst GPU address
1364 * @byte_count: number of bytes to xfer
1365 *
1366 * Fill GPU buffers using the DMA engine (CIK).
1367 */
1368static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
1369                                      uint32_t src_data,
1370                                      uint64_t dst_offset,
1371                                      uint32_t byte_count)
1372{
1373        ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
1374        ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1375        ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1376        ib->ptr[ib->length_dw++] = src_data;
1377        ib->ptr[ib->length_dw++] = byte_count;
1378}
1379
1380static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
1381        .copy_max_bytes = 0x1fffff,
1382        .copy_num_dw = 7,
1383        .emit_copy_buffer = cik_sdma_emit_copy_buffer,
1384
1385        .fill_max_bytes = 0x1fffff,
1386        .fill_num_dw = 5,
1387        .emit_fill_buffer = cik_sdma_emit_fill_buffer,
1388};
1389
1390static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
1391{
1392        if (adev->mman.buffer_funcs == NULL) {
1393                adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
1394                adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1395        }
1396}
1397
1398static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
1399        .copy_pte = cik_sdma_vm_copy_pte,
1400        .write_pte = cik_sdma_vm_write_pte,
1401        .set_pte_pde = cik_sdma_vm_set_pte_pde,
1402        .pad_ib = cik_sdma_vm_pad_ib,
1403};
1404
1405static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
1406{
1407        if (adev->vm_manager.vm_pte_funcs == NULL) {
1408                adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
1409                adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
1410                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
1411        }
1412}
1413