linux/drivers/gpu/drm/radeon/cik_sdma.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Alex Deucher
  23 */
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "radeon.h"
  27#include "radeon_ucode.h"
  28#include "radeon_asic.h"
  29#include "radeon_trace.h"
  30#include "cikd.h"
  31
  32/* sdma */
  33#define CIK_SDMA_UCODE_SIZE 1050
  34#define CIK_SDMA_UCODE_VERSION 64
  35
  36u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
  37
  38/*
  39 * sDMA - System DMA
  40 * Starting with CIK, the GPU has new asynchronous
  41 * DMA engines.  These engines are used for compute
  42 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
  43 * and each one supports 1 ring buffer used for gfx
  44 * and 2 queues used for compute.
  45 *
  46 * The programming model is very similar to the CP
  47 * (ring buffer, IBs, etc.), but sDMA has it's own
  48 * packet format that is different from the PM4 format
  49 * used by the CP. sDMA supports copying data, writing
  50 * embedded data, solid fills, and a number of other
  51 * things.  It also has support for tiling/detiling of
  52 * buffers.
  53 */
  54
  55/**
  56 * cik_sdma_get_rptr - get the current read pointer
  57 *
  58 * @rdev: radeon_device pointer
  59 * @ring: radeon ring pointer
  60 *
  61 * Get the current rptr from the hardware (CIK+).
  62 */
  63uint32_t cik_sdma_get_rptr(struct radeon_device *rdev,
  64                           struct radeon_ring *ring)
  65{
  66        u32 rptr, reg;
  67
  68        if (rdev->wb.enabled) {
  69                rptr = rdev->wb.wb[ring->rptr_offs/4];
  70        } else {
  71                if (ring->idx == R600_RING_TYPE_DMA_INDEX)
  72                        reg = SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET;
  73                else
  74                        reg = SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET;
  75
  76                rptr = RREG32(reg);
  77        }
  78
  79        return (rptr & 0x3fffc) >> 2;
  80}
  81
  82/**
  83 * cik_sdma_get_wptr - get the current write pointer
  84 *
  85 * @rdev: radeon_device pointer
  86 * @ring: radeon ring pointer
  87 *
  88 * Get the current wptr from the hardware (CIK+).
  89 */
  90uint32_t cik_sdma_get_wptr(struct radeon_device *rdev,
  91                           struct radeon_ring *ring)
  92{
  93        u32 reg;
  94
  95        if (ring->idx == R600_RING_TYPE_DMA_INDEX)
  96                reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
  97        else
  98                reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
  99
 100        return (RREG32(reg) & 0x3fffc) >> 2;
 101}
 102
 103/**
 104 * cik_sdma_set_wptr - commit the write pointer
 105 *
 106 * @rdev: radeon_device pointer
 107 * @ring: radeon ring pointer
 108 *
 109 * Write the wptr back to the hardware (CIK+).
 110 */
 111void cik_sdma_set_wptr(struct radeon_device *rdev,
 112                       struct radeon_ring *ring)
 113{
 114        u32 reg;
 115
 116        if (ring->idx == R600_RING_TYPE_DMA_INDEX)
 117                reg = SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET;
 118        else
 119                reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
 120
 121        WREG32(reg, (ring->wptr << 2) & 0x3fffc);
 122        (void)RREG32(reg);
 123}
 124
 125/**
 126 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
 127 *
 128 * @rdev: radeon_device pointer
 129 * @ib: IB object to schedule
 130 *
 131 * Schedule an IB in the DMA ring (CIK).
 132 */
 133void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
 134                              struct radeon_ib *ib)
 135{
 136        struct radeon_ring *ring = &rdev->ring[ib->ring];
 137        u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
 138
 139        if (rdev->wb.enabled) {
 140                u32 next_rptr = ring->wptr + 5;
 141                while ((next_rptr & 7) != 4)
 142                        next_rptr++;
 143                next_rptr += 4;
 144                radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 145                radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
 146                radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
 147                radeon_ring_write(ring, 1); /* number of DWs to follow */
 148                radeon_ring_write(ring, next_rptr);
 149        }
 150
 151        /* IB packet must end on a 8 DW boundary */
 152        while ((ring->wptr & 7) != 4)
 153                radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
 154        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 155        radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 156        radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
 157        radeon_ring_write(ring, ib->length_dw);
 158
 159}
 160
 161/**
 162 * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
 163 *
 164 * @rdev: radeon_device pointer
 165 * @ridx: radeon ring index
 166 *
 167 * Emit an hdp flush packet on the requested DMA ring.
 168 */
 169static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev,
 170                                         int ridx)
 171{
 172        struct radeon_ring *ring = &rdev->ring[ridx];
 173        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
 174                          SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
 175        u32 ref_and_mask;
 176
 177        if (ridx == R600_RING_TYPE_DMA_INDEX)
 178                ref_and_mask = SDMA0;
 179        else
 180                ref_and_mask = SDMA1;
 181
 182        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 183        radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
 184        radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
 185        radeon_ring_write(ring, ref_and_mask); /* reference */
 186        radeon_ring_write(ring, ref_and_mask); /* mask */
 187        radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 188}
 189
 190/**
 191 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
 192 *
 193 * @rdev: radeon_device pointer
 194 * @fence: radeon fence object
 195 *
 196 * Add a DMA fence packet to the ring to write
 197 * the fence seq number and DMA trap packet to generate
 198 * an interrupt if needed (CIK).
 199 */
 200void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
 201                              struct radeon_fence *fence)
 202{
 203        struct radeon_ring *ring = &rdev->ring[fence->ring];
 204        u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
 205
 206        /* write the fence */
 207        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
 208        radeon_ring_write(ring, lower_32_bits(addr));
 209        radeon_ring_write(ring, upper_32_bits(addr));
 210        radeon_ring_write(ring, fence->seq);
 211        /* generate an interrupt */
 212        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
 213        /* flush HDP */
 214        cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
 215}
 216
 217/**
 218 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
 219 *
 220 * @rdev: radeon_device pointer
 221 * @ring: radeon_ring structure holding ring information
 222 * @semaphore: radeon semaphore object
 223 * @emit_wait: wait or signal semaphore
 224 *
 225 * Add a DMA semaphore packet to the ring wait on or signal
 226 * other rings (CIK).
 227 */
 228bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
 229                                  struct radeon_ring *ring,
 230                                  struct radeon_semaphore *semaphore,
 231                                  bool emit_wait)
 232{
 233        u64 addr = semaphore->gpu_addr;
 234        u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
 235
 236        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
 237        radeon_ring_write(ring, addr & 0xfffffff8);
 238        radeon_ring_write(ring, upper_32_bits(addr));
 239
 240        return true;
 241}
 242
 243/**
 244 * cik_sdma_gfx_stop - stop the gfx async dma engines
 245 *
 246 * @rdev: radeon_device pointer
 247 *
 248 * Stop the gfx async dma ring buffers (CIK).
 249 */
 250static void cik_sdma_gfx_stop(struct radeon_device *rdev)
 251{
 252        u32 rb_cntl, reg_offset;
 253        int i;
 254
 255        if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
 256            (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
 257                radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 258
 259        for (i = 0; i < 2; i++) {
 260                if (i == 0)
 261                        reg_offset = SDMA0_REGISTER_OFFSET;
 262                else
 263                        reg_offset = SDMA1_REGISTER_OFFSET;
 264                rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
 265                rb_cntl &= ~SDMA_RB_ENABLE;
 266                WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
 267                WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
 268        }
 269        rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
 270        rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
 271
 272        /* FIXME use something else than big hammer but after few days can not
 273         * seem to find good combination so reset SDMA blocks as it seems we
 274         * do not shut them down properly. This fix hibernation and does not
 275         * affect suspend to ram.
 276         */
 277        WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
 278        (void)RREG32(SRBM_SOFT_RESET);
 279        udelay(50);
 280        WREG32(SRBM_SOFT_RESET, 0);
 281        (void)RREG32(SRBM_SOFT_RESET);
 282}
 283
 284/**
 285 * cik_sdma_rlc_stop - stop the compute async dma engines
 286 *
 287 * @rdev: radeon_device pointer
 288 *
 289 * Stop the compute async dma queues (CIK).
 290 */
 291static void cik_sdma_rlc_stop(struct radeon_device *rdev)
 292{
 293        /* XXX todo */
 294}
 295
 296/**
 297 * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
 298 *
 299 * @rdev: radeon_device pointer
 300 * @enable: enable/disable preemption.
 301 *
 302 * Halt or unhalt the async dma engines (CIK).
 303 */
 304static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
 305{
 306        uint32_t reg_offset, value;
 307        int i;
 308
 309        for (i = 0; i < 2; i++) {
 310                if (i == 0)
 311                        reg_offset = SDMA0_REGISTER_OFFSET;
 312                else
 313                        reg_offset = SDMA1_REGISTER_OFFSET;
 314                value = RREG32(SDMA0_CNTL + reg_offset);
 315                if (enable)
 316                        value |= AUTO_CTXSW_ENABLE;
 317                else
 318                        value &= ~AUTO_CTXSW_ENABLE;
 319                WREG32(SDMA0_CNTL + reg_offset, value);
 320        }
 321}
 322
 323/**
 324 * cik_sdma_enable - stop the async dma engines
 325 *
 326 * @rdev: radeon_device pointer
 327 * @enable: enable/disable the DMA MEs.
 328 *
 329 * Halt or unhalt the async dma engines (CIK).
 330 */
 331void cik_sdma_enable(struct radeon_device *rdev, bool enable)
 332{
 333        u32 me_cntl, reg_offset;
 334        int i;
 335
 336        if (enable == false) {
 337                cik_sdma_gfx_stop(rdev);
 338                cik_sdma_rlc_stop(rdev);
 339        }
 340
 341        for (i = 0; i < 2; i++) {
 342                if (i == 0)
 343                        reg_offset = SDMA0_REGISTER_OFFSET;
 344                else
 345                        reg_offset = SDMA1_REGISTER_OFFSET;
 346                me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
 347                if (enable)
 348                        me_cntl &= ~SDMA_HALT;
 349                else
 350                        me_cntl |= SDMA_HALT;
 351                WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
 352        }
 353
 354        cik_sdma_ctx_switch_enable(rdev, enable);
 355}
 356
 357/**
 358 * cik_sdma_gfx_resume - setup and start the async dma engines
 359 *
 360 * @rdev: radeon_device pointer
 361 *
 362 * Set up the gfx DMA ring buffers and enable them (CIK).
 363 * Returns 0 for success, error for failure.
 364 */
 365static int cik_sdma_gfx_resume(struct radeon_device *rdev)
 366{
 367        struct radeon_ring *ring;
 368        u32 rb_cntl, ib_cntl;
 369        u32 rb_bufsz;
 370        u32 reg_offset, wb_offset;
 371        int i, r;
 372
 373        for (i = 0; i < 2; i++) {
 374                if (i == 0) {
 375                        ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
 376                        reg_offset = SDMA0_REGISTER_OFFSET;
 377                        wb_offset = R600_WB_DMA_RPTR_OFFSET;
 378                } else {
 379                        ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
 380                        reg_offset = SDMA1_REGISTER_OFFSET;
 381                        wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
 382                }
 383
 384                WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
 385                WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
 386
 387                /* Set ring buffer size in dwords */
 388                rb_bufsz = order_base_2(ring->ring_size / 4);
 389                rb_cntl = rb_bufsz << 1;
 390#ifdef __BIG_ENDIAN
 391                rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
 392#endif
 393                WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
 394
 395                /* Initialize the ring buffer's read and write pointers */
 396                WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
 397                WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
 398
 399                /* set the wb address whether it's enabled or not */
 400                WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
 401                       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
 402                WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
 403                       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
 404
 405                if (rdev->wb.enabled)
 406                        rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
 407
 408                WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
 409                WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
 410
 411                ring->wptr = 0;
 412                WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
 413
 414                /* enable DMA RB */
 415                WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
 416
 417                ib_cntl = SDMA_IB_ENABLE;
 418#ifdef __BIG_ENDIAN
 419                ib_cntl |= SDMA_IB_SWAP_ENABLE;
 420#endif
 421                /* enable DMA IBs */
 422                WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
 423
 424                ring->ready = true;
 425
 426                r = radeon_ring_test(rdev, ring->idx, ring);
 427                if (r) {
 428                        ring->ready = false;
 429                        return r;
 430                }
 431        }
 432
 433        if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
 434            (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
 435                radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 436
 437        return 0;
 438}
 439
 440/**
 441 * cik_sdma_rlc_resume - setup and start the async dma engines
 442 *
 443 * @rdev: radeon_device pointer
 444 *
 445 * Set up the compute DMA queues and enable them (CIK).
 446 * Returns 0 for success, error for failure.
 447 */
 448static int cik_sdma_rlc_resume(struct radeon_device *rdev)
 449{
 450        /* XXX todo */
 451        return 0;
 452}
 453
 454/**
 455 * cik_sdma_load_microcode - load the sDMA ME ucode
 456 *
 457 * @rdev: radeon_device pointer
 458 *
 459 * Loads the sDMA0/1 ucode.
 460 * Returns 0 for success, -EINVAL if the ucode is not available.
 461 */
 462static int cik_sdma_load_microcode(struct radeon_device *rdev)
 463{
 464        int i;
 465
 466        if (!rdev->sdma_fw)
 467                return -EINVAL;
 468
 469        /* halt the MEs */
 470        cik_sdma_enable(rdev, false);
 471
 472        if (rdev->new_fw) {
 473                const struct sdma_firmware_header_v1_0 *hdr =
 474                        (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
 475                const __le32 *fw_data;
 476                u32 fw_size;
 477
 478                radeon_ucode_print_sdma_hdr(&hdr->header);
 479
 480                /* sdma0 */
 481                fw_data = (const __le32 *)
 482                        (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 483                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 484                WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
 485                for (i = 0; i < fw_size; i++)
 486                        WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
 487                WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
 488
 489                /* sdma1 */
 490                fw_data = (const __le32 *)
 491                        (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 492                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 493                WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
 494                for (i = 0; i < fw_size; i++)
 495                        WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
 496                WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
 497        } else {
 498                const __be32 *fw_data;
 499
 500                /* sdma0 */
 501                fw_data = (const __be32 *)rdev->sdma_fw->data;
 502                WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
 503                for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
 504                        WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
 505                WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
 506
 507                /* sdma1 */
 508                fw_data = (const __be32 *)rdev->sdma_fw->data;
 509                WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
 510                for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
 511                        WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
 512                WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
 513        }
 514
 515        WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
 516        WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
 517        return 0;
 518}
 519
 520/**
 521 * cik_sdma_resume - setup and start the async dma engines
 522 *
 523 * @rdev: radeon_device pointer
 524 *
 525 * Set up the DMA engines and enable them (CIK).
 526 * Returns 0 for success, error for failure.
 527 */
 528int cik_sdma_resume(struct radeon_device *rdev)
 529{
 530        int r;
 531
 532        r = cik_sdma_load_microcode(rdev);
 533        if (r)
 534                return r;
 535
 536        /* unhalt the MEs */
 537        cik_sdma_enable(rdev, true);
 538
 539        /* start the gfx rings and rlc compute queues */
 540        r = cik_sdma_gfx_resume(rdev);
 541        if (r)
 542                return r;
 543        r = cik_sdma_rlc_resume(rdev);
 544        if (r)
 545                return r;
 546
 547        return 0;
 548}
 549
 550/**
 551 * cik_sdma_fini - tear down the async dma engines
 552 *
 553 * @rdev: radeon_device pointer
 554 *
 555 * Stop the async dma engines and free the rings (CIK).
 556 */
 557void cik_sdma_fini(struct radeon_device *rdev)
 558{
 559        /* halt the MEs */
 560        cik_sdma_enable(rdev, false);
 561        radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
 562        radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
 563        /* XXX - compute dma queue tear down */
 564}
 565
 566/**
 567 * cik_copy_dma - copy pages using the DMA engine
 568 *
 569 * @rdev: radeon_device pointer
 570 * @src_offset: src GPU address
 571 * @dst_offset: dst GPU address
 572 * @num_gpu_pages: number of GPU pages to xfer
 573 * @resv: reservation object to sync to
 574 *
 575 * Copy GPU paging using the DMA engine (CIK).
 576 * Used by the radeon ttm implementation to move pages if
 577 * registered as the asic copy callback.
 578 */
 579struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
 580                                  uint64_t src_offset, uint64_t dst_offset,
 581                                  unsigned num_gpu_pages,
 582                                  struct reservation_object *resv)
 583{
 584        struct radeon_fence *fence;
 585        struct radeon_sync sync;
 586        int ring_index = rdev->asic->copy.dma_ring_index;
 587        struct radeon_ring *ring = &rdev->ring[ring_index];
 588        u32 size_in_bytes, cur_size_in_bytes;
 589        int i, num_loops;
 590        int r = 0;
 591
 592        radeon_sync_create(&sync);
 593
 594        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
 595        num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
 596        r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
 597        if (r) {
 598                DRM_ERROR("radeon: moving bo (%d).\n", r);
 599                radeon_sync_free(rdev, &sync, NULL);
 600                return ERR_PTR(r);
 601        }
 602
 603        radeon_sync_resv(rdev, &sync, resv, false);
 604        radeon_sync_rings(rdev, &sync, ring->idx);
 605
 606        for (i = 0; i < num_loops; i++) {
 607                cur_size_in_bytes = size_in_bytes;
 608                if (cur_size_in_bytes > 0x1fffff)
 609                        cur_size_in_bytes = 0x1fffff;
 610                size_in_bytes -= cur_size_in_bytes;
 611                radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
 612                radeon_ring_write(ring, cur_size_in_bytes);
 613                radeon_ring_write(ring, 0); /* src/dst endian swap */
 614                radeon_ring_write(ring, lower_32_bits(src_offset));
 615                radeon_ring_write(ring, upper_32_bits(src_offset));
 616                radeon_ring_write(ring, lower_32_bits(dst_offset));
 617                radeon_ring_write(ring, upper_32_bits(dst_offset));
 618                src_offset += cur_size_in_bytes;
 619                dst_offset += cur_size_in_bytes;
 620        }
 621
 622        r = radeon_fence_emit(rdev, &fence, ring->idx);
 623        if (r) {
 624                radeon_ring_unlock_undo(rdev, ring);
 625                radeon_sync_free(rdev, &sync, NULL);
 626                return ERR_PTR(r);
 627        }
 628
 629        radeon_ring_unlock_commit(rdev, ring, false);
 630        radeon_sync_free(rdev, &sync, fence);
 631
 632        return fence;
 633}
 634
 635/**
 636 * cik_sdma_ring_test - simple async dma engine test
 637 *
 638 * @rdev: radeon_device pointer
 639 * @ring: radeon_ring structure holding ring information
 640 *
 641 * Test the DMA engine by writing using it to write an
 642 * value to memory. (CIK).
 643 * Returns 0 for success, error for failure.
 644 */
 645int cik_sdma_ring_test(struct radeon_device *rdev,
 646                       struct radeon_ring *ring)
 647{
 648        unsigned i;
 649        int r;
 650        unsigned index;
 651        u32 tmp;
 652        u64 gpu_addr;
 653
 654        if (ring->idx == R600_RING_TYPE_DMA_INDEX)
 655                index = R600_WB_DMA_RING_TEST_OFFSET;
 656        else
 657                index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
 658
 659        gpu_addr = rdev->wb.gpu_addr + index;
 660
 661        tmp = 0xCAFEDEAD;
 662        rdev->wb.wb[index/4] = cpu_to_le32(tmp);
 663
 664        r = radeon_ring_lock(rdev, ring, 5);
 665        if (r) {
 666                DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
 667                return r;
 668        }
 669        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
 670        radeon_ring_write(ring, lower_32_bits(gpu_addr));
 671        radeon_ring_write(ring, upper_32_bits(gpu_addr));
 672        radeon_ring_write(ring, 1); /* number of DWs to follow */
 673        radeon_ring_write(ring, 0xDEADBEEF);
 674        radeon_ring_unlock_commit(rdev, ring, false);
 675
 676        for (i = 0; i < rdev->usec_timeout; i++) {
 677                tmp = le32_to_cpu(rdev->wb.wb[index/4]);
 678                if (tmp == 0xDEADBEEF)
 679                        break;
 680                DRM_UDELAY(1);
 681        }
 682
 683        if (i < rdev->usec_timeout) {
 684                DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
 685        } else {
 686                DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
 687                          ring->idx, tmp);
 688                r = -EINVAL;
 689        }
 690        return r;
 691}
 692
 693/**
 694 * cik_sdma_ib_test - test an IB on the DMA engine
 695 *
 696 * @rdev: radeon_device pointer
 697 * @ring: radeon_ring structure holding ring information
 698 *
 699 * Test a simple IB in the DMA ring (CIK).
 700 * Returns 0 on success, error on failure.
 701 */
 702int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
 703{
 704        struct radeon_ib ib;
 705        unsigned i;
 706        unsigned index;
 707        int r;
 708        u32 tmp = 0;
 709        u64 gpu_addr;
 710
 711        if (ring->idx == R600_RING_TYPE_DMA_INDEX)
 712                index = R600_WB_DMA_RING_TEST_OFFSET;
 713        else
 714                index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
 715
 716        gpu_addr = rdev->wb.gpu_addr + index;
 717
 718        tmp = 0xCAFEDEAD;
 719        rdev->wb.wb[index/4] = cpu_to_le32(tmp);
 720
 721        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
 722        if (r) {
 723                DRM_ERROR("radeon: failed to get ib (%d).\n", r);
 724                return r;
 725        }
 726
 727        ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 728        ib.ptr[1] = lower_32_bits(gpu_addr);
 729        ib.ptr[2] = upper_32_bits(gpu_addr);
 730        ib.ptr[3] = 1;
 731        ib.ptr[4] = 0xDEADBEEF;
 732        ib.length_dw = 5;
 733
 734        r = radeon_ib_schedule(rdev, &ib, NULL, false);
 735        if (r) {
 736                radeon_ib_free(rdev, &ib);
 737                DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
 738                return r;
 739        }
 740        r = radeon_fence_wait(ib.fence, false);
 741        if (r) {
 742                DRM_ERROR("radeon: fence wait failed (%d).\n", r);
 743                return r;
 744        }
 745        for (i = 0; i < rdev->usec_timeout; i++) {
 746                tmp = le32_to_cpu(rdev->wb.wb[index/4]);
 747                if (tmp == 0xDEADBEEF)
 748                        break;
 749                DRM_UDELAY(1);
 750        }
 751        if (i < rdev->usec_timeout) {
 752                DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
 753        } else {
 754                DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
 755                r = -EINVAL;
 756        }
 757        radeon_ib_free(rdev, &ib);
 758        return r;
 759}
 760
 761/**
 762 * cik_sdma_is_lockup - Check if the DMA engine is locked up
 763 *
 764 * @rdev: radeon_device pointer
 765 * @ring: radeon_ring structure holding ring information
 766 *
 767 * Check if the async DMA engine is locked up (CIK).
 768 * Returns true if the engine appears to be locked up, false if not.
 769 */
 770bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
 771{
 772        u32 reset_mask = cik_gpu_check_soft_reset(rdev);
 773        u32 mask;
 774
 775        if (ring->idx == R600_RING_TYPE_DMA_INDEX)
 776                mask = RADEON_RESET_DMA;
 777        else
 778                mask = RADEON_RESET_DMA1;
 779
 780        if (!(reset_mask & mask)) {
 781                radeon_ring_lockup_update(rdev, ring);
 782                return false;
 783        }
 784        return radeon_ring_test_lockup(rdev, ring);
 785}
 786
 787/**
 788 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
 789 *
 790 * @rdev: radeon_device pointer
 791 * @ib: indirect buffer to fill with commands
 792 * @pe: addr of the page entry
 793 * @src: src addr to copy from
 794 * @count: number of page entries to update
 795 *
 796 * Update PTEs by copying them from the GART using sDMA (CIK).
 797 */
 798void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
 799                            struct radeon_ib *ib,
 800                            uint64_t pe, uint64_t src,
 801                            unsigned count)
 802{
 803        while (count) {
 804                unsigned bytes = count * 8;
 805                if (bytes > 0x1FFFF8)
 806                        bytes = 0x1FFFF8;
 807
 808                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
 809                        SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 810                ib->ptr[ib->length_dw++] = bytes;
 811                ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
 812                ib->ptr[ib->length_dw++] = lower_32_bits(src);
 813                ib->ptr[ib->length_dw++] = upper_32_bits(src);
 814                ib->ptr[ib->length_dw++] = lower_32_bits(pe);
 815                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 816
 817                pe += bytes;
 818                src += bytes;
 819                count -= bytes / 8;
 820        }
 821}
 822
 823/**
 824 * cik_sdma_vm_write_pages - update PTEs by writing them manually
 825 *
 826 * @rdev: radeon_device pointer
 827 * @ib: indirect buffer to fill with commands
 828 * @pe: addr of the page entry
 829 * @addr: dst addr to write into pe
 830 * @count: number of page entries to update
 831 * @incr: increase next addr by incr bytes
 832 * @flags: access flags
 833 *
 834 * Update PTEs by writing them manually using sDMA (CIK).
 835 */
 836void cik_sdma_vm_write_pages(struct radeon_device *rdev,
 837                             struct radeon_ib *ib,
 838                             uint64_t pe,
 839                             uint64_t addr, unsigned count,
 840                             uint32_t incr, uint32_t flags)
 841{
 842        uint64_t value;
 843        unsigned ndw;
 844
 845        while (count) {
 846                ndw = count * 2;
 847                if (ndw > 0xFFFFE)
 848                        ndw = 0xFFFFE;
 849
 850                /* for non-physically contiguous pages (system) */
 851                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
 852                        SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
 853                ib->ptr[ib->length_dw++] = pe;
 854                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 855                ib->ptr[ib->length_dw++] = ndw;
 856                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
 857                        if (flags & R600_PTE_SYSTEM) {
 858                                value = radeon_vm_map_gart(rdev, addr);
 859                        } else if (flags & R600_PTE_VALID) {
 860                                value = addr;
 861                        } else {
 862                                value = 0;
 863                        }
 864                        addr += incr;
 865                        value |= flags;
 866                        ib->ptr[ib->length_dw++] = value;
 867                        ib->ptr[ib->length_dw++] = upper_32_bits(value);
 868                }
 869        }
 870}
 871
 872/**
 873 * cik_sdma_vm_set_pages - update the page tables using sDMA
 874 *
 875 * @rdev: radeon_device pointer
 876 * @ib: indirect buffer to fill with commands
 877 * @pe: addr of the page entry
 878 * @addr: dst addr to write into pe
 879 * @count: number of page entries to update
 880 * @incr: increase next addr by incr bytes
 881 * @flags: access flags
 882 *
 883 * Update the page tables using sDMA (CIK).
 884 */
 885void cik_sdma_vm_set_pages(struct radeon_device *rdev,
 886                           struct radeon_ib *ib,
 887                           uint64_t pe,
 888                           uint64_t addr, unsigned count,
 889                           uint32_t incr, uint32_t flags)
 890{
 891        uint64_t value;
 892        unsigned ndw;
 893
 894        while (count) {
 895                ndw = count;
 896                if (ndw > 0x7FFFF)
 897                        ndw = 0x7FFFF;
 898
 899                if (flags & R600_PTE_VALID)
 900                        value = addr;
 901                else
 902                        value = 0;
 903
 904                /* for physically contiguous pages (vram) */
 905                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
 906                ib->ptr[ib->length_dw++] = pe; /* dst addr */
 907                ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 908                ib->ptr[ib->length_dw++] = flags; /* mask */
 909                ib->ptr[ib->length_dw++] = 0;
 910                ib->ptr[ib->length_dw++] = value; /* value */
 911                ib->ptr[ib->length_dw++] = upper_32_bits(value);
 912                ib->ptr[ib->length_dw++] = incr; /* increment size */
 913                ib->ptr[ib->length_dw++] = 0;
 914                ib->ptr[ib->length_dw++] = ndw; /* number of entries */
 915
 916                pe += ndw * 8;
 917                addr += ndw * incr;
 918                count -= ndw;
 919        }
 920}
 921
 922/**
 923 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
 924 *
 925 * @ib: indirect buffer to fill with padding
 926 *
 927 */
 928void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
 929{
 930        while (ib->length_dw & 0x7)
 931                ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 932}
 933
 934/**
 935 * cik_dma_vm_flush - cik vm flush using sDMA
 936 *
 937 * @rdev: radeon_device pointer
 938 *
 939 * Update the page table base and flush the VM TLB
 940 * using sDMA (CIK).
 941 */
 942void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
 943                      unsigned vm_id, uint64_t pd_addr)
 944{
 945        u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
 946                          SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
 947
 948        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 949        if (vm_id < 8) {
 950                radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
 951        } else {
 952                radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
 953        }
 954        radeon_ring_write(ring, pd_addr >> 12);
 955
 956        /* update SH_MEM_* regs */
 957        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 958        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
 959        radeon_ring_write(ring, VMID(vm_id));
 960
 961        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 962        radeon_ring_write(ring, SH_MEM_BASES >> 2);
 963        radeon_ring_write(ring, 0);
 964
 965        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 966        radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
 967        radeon_ring_write(ring, 0);
 968
 969        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 970        radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
 971        radeon_ring_write(ring, 1);
 972
 973        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 974        radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
 975        radeon_ring_write(ring, 0);
 976
 977        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 978        radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
 979        radeon_ring_write(ring, VMID(0));
 980
 981        /* flush HDP */
 982        cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
 983
 984        /* flush TLB */
 985        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
 986        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
 987        radeon_ring_write(ring, 1 << vm_id);
 988
 989        radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
 990        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
 991        radeon_ring_write(ring, 0);
 992        radeon_ring_write(ring, 0); /* reference */
 993        radeon_ring_write(ring, 0); /* mask */
 994        radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 995}
 996
 997