linux/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2014-2018 Etnaviv Project
   4 */
   5
   6#include <drm/drm_drv.h>
   7
   8#include "etnaviv_cmdbuf.h"
   9#include "etnaviv_gpu.h"
  10#include "etnaviv_gem.h"
  11#include "etnaviv_mmu.h"
  12
  13#include "common.xml.h"
  14#include "state.xml.h"
  15#include "state_blt.xml.h"
  16#include "state_hi.xml.h"
  17#include "state_3d.xml.h"
  18#include "cmdstream.xml.h"
  19
  20/*
  21 * Command Buffer helper:
  22 */
  23
  24
  25static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
  26{
  27        u32 *vaddr = (u32 *)buffer->vaddr;
  28
  29        BUG_ON(buffer->user_size >= buffer->size);
  30
  31        vaddr[buffer->user_size / 4] = data;
  32        buffer->user_size += 4;
  33}
  34
  35static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
  36        u32 reg, u32 value)
  37{
  38        u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
  39
  40        buffer->user_size = ALIGN(buffer->user_size, 8);
  41
  42        /* write a register via cmd stream */
  43        OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
  44                    VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
  45                    VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
  46        OUT(buffer, value);
  47}
  48
  49static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
  50{
  51        buffer->user_size = ALIGN(buffer->user_size, 8);
  52
  53        OUT(buffer, VIV_FE_END_HEADER_OP_END);
  54}
  55
  56static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
  57{
  58        buffer->user_size = ALIGN(buffer->user_size, 8);
  59
  60        OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
  61}
  62
  63static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
  64        u16 prefetch, u32 address)
  65{
  66        buffer->user_size = ALIGN(buffer->user_size, 8);
  67
  68        OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
  69                    VIV_FE_LINK_HEADER_PREFETCH(prefetch));
  70        OUT(buffer, address);
  71}
  72
  73static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
  74        u32 from, u32 to)
  75{
  76        buffer->user_size = ALIGN(buffer->user_size, 8);
  77
  78        OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
  79        OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
  80}
  81
  82static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
  83{
  84        CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
  85                       VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
  86                       VIVS_GL_SEMAPHORE_TOKEN_TO(to));
  87}
  88
  89static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
  90        struct etnaviv_cmdbuf *buffer, u8 pipe)
  91{
  92        u32 flush = 0;
  93
  94        lockdep_assert_held(&gpu->lock);
  95
  96        /*
  97         * This assumes that if we're switching to 2D, we're switching
  98         * away from 3D, and vice versa.  Hence, if we're switching to
  99         * the 2D core, we need to flush the 3D depth and color caches,
 100         * otherwise we need to flush the 2D pixel engine cache.
 101         */
 102        if (gpu->exec_state == ETNA_PIPE_2D)
 103                flush = VIVS_GL_FLUSH_CACHE_PE2D;
 104        else if (gpu->exec_state == ETNA_PIPE_3D)
 105                flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
 106
 107        CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
 108        CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 109        CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 110
 111        CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
 112                       VIVS_GL_PIPE_SELECT_PIPE(pipe));
 113}
 114
 115static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
 116        struct etnaviv_cmdbuf *buf, u32 off, u32 len)
 117{
 118        u32 size = buf->size;
 119        u32 *ptr = buf->vaddr + off;
 120
 121        dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
 122                        ptr, etnaviv_cmdbuf_get_va(buf,
 123                        &gpu->mmu_context->cmdbuf_mapping) +
 124                        off, size - len * 4 - off);
 125
 126        print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 127                        ptr, len * 4, 0);
 128}
 129
 130/*
 131 * Safely replace the WAIT of a waitlink with a new command and argument.
 132 * The GPU may be executing this WAIT while we're modifying it, so we have
 133 * to write it in a specific order to avoid the GPU branching to somewhere
 134 * else.  'wl_offset' is the offset to the first byte of the WAIT command.
 135 */
 136static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
 137        unsigned int wl_offset, u32 cmd, u32 arg)
 138{
 139        u32 *lw = buffer->vaddr + wl_offset;
 140
 141        lw[1] = arg;
 142        mb();
 143        lw[0] = cmd;
 144        mb();
 145}
 146
 147/*
 148 * Ensure that there is space in the command buffer to contiguously write
 149 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
 150 */
 151static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
 152        struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
 153{
 154        if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
 155                buffer->user_size = 0;
 156
 157        return etnaviv_cmdbuf_get_va(buffer,
 158                                     &gpu->mmu_context->cmdbuf_mapping) +
 159               buffer->user_size;
 160}
 161
 162u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
 163{
 164        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 165
 166        lockdep_assert_held(&gpu->lock);
 167
 168        /* initialize buffer */
 169        buffer->user_size = 0;
 170
 171        CMD_WAIT(buffer);
 172        CMD_LINK(buffer, 2,
 173                 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
 174                 + buffer->user_size - 4);
 175
 176        return buffer->user_size / 8;
 177}
 178
 179u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
 180{
 181        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 182
 183        lockdep_assert_held(&gpu->lock);
 184
 185        buffer->user_size = 0;
 186
 187        if (gpu->identity.features & chipFeatures_PIPE_3D) {
 188                CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
 189                               VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
 190                CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
 191                        mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
 192                CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
 193                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 194                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 195        }
 196
 197        if (gpu->identity.features & chipFeatures_PIPE_2D) {
 198                CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
 199                               VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
 200                CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
 201                        mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
 202                CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
 203                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 204                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 205        }
 206
 207        CMD_END(buffer);
 208
 209        buffer->user_size = ALIGN(buffer->user_size, 8);
 210
 211        return buffer->user_size / 8;
 212}
 213
 214u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
 215{
 216        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 217
 218        lockdep_assert_held(&gpu->lock);
 219
 220        buffer->user_size = 0;
 221
 222        CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
 223                       VIVS_MMUv2_PTA_CONFIG_INDEX(id));
 224
 225        CMD_END(buffer);
 226
 227        buffer->user_size = ALIGN(buffer->user_size, 8);
 228
 229        return buffer->user_size / 8;
 230}
 231
 232void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
 233{
 234        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 235        unsigned int waitlink_offset = buffer->user_size - 16;
 236        u32 link_target, flush = 0;
 237        bool has_blt = !!(gpu->identity.minor_features5 &
 238                          chipMinorFeatures5_BLT_ENGINE);
 239
 240        lockdep_assert_held(&gpu->lock);
 241
 242        if (gpu->exec_state == ETNA_PIPE_2D)
 243                flush = VIVS_GL_FLUSH_CACHE_PE2D;
 244        else if (gpu->exec_state == ETNA_PIPE_3D)
 245                flush = VIVS_GL_FLUSH_CACHE_DEPTH |
 246                        VIVS_GL_FLUSH_CACHE_COLOR |
 247                        VIVS_GL_FLUSH_CACHE_TEXTURE |
 248                        VIVS_GL_FLUSH_CACHE_TEXTUREVS |
 249                        VIVS_GL_FLUSH_CACHE_SHADER_L2;
 250
 251        if (flush) {
 252                unsigned int dwords = 7;
 253
 254                if (has_blt)
 255                        dwords += 10;
 256
 257                link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
 258
 259                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 260                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 261                if (has_blt) {
 262                        CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
 263                        CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
 264                        CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
 265                        CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
 266                }
 267                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
 268                if (gpu->exec_state == ETNA_PIPE_3D) {
 269                        if (has_blt) {
 270                                CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
 271                                CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
 272                                CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
 273                        } else {
 274                                CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
 275                                               VIVS_TS_FLUSH_CACHE_FLUSH);
 276                        }
 277                }
 278                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 279                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 280                if (has_blt) {
 281                        CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
 282                        CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
 283                        CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
 284                        CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
 285                }
 286                CMD_END(buffer);
 287
 288                etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 289                                            VIV_FE_LINK_HEADER_OP_LINK |
 290                                            VIV_FE_LINK_HEADER_PREFETCH(dwords),
 291                                            link_target);
 292        } else {
 293                /* Replace the last link-wait with an "END" command */
 294                etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 295                                            VIV_FE_END_HEADER_OP_END, 0);
 296        }
 297}
 298
 299/* Append a 'sync point' to the ring buffer. */
 300void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
 301{
 302        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 303        unsigned int waitlink_offset = buffer->user_size - 16;
 304        u32 dwords, target;
 305
 306        lockdep_assert_held(&gpu->lock);
 307
 308        /*
 309         * We need at most 3 dwords in the return target:
 310         * 1 event + 1 end + 1 wait + 1 link.
 311         */
 312        dwords = 4;
 313        target = etnaviv_buffer_reserve(gpu, buffer, dwords);
 314
 315        /* Signal sync point event */
 316        CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 317                       VIVS_GL_EVENT_FROM_PE);
 318
 319        /* Stop the FE to 'pause' the GPU */
 320        CMD_END(buffer);
 321
 322        /* Append waitlink */
 323        CMD_WAIT(buffer);
 324        CMD_LINK(buffer, 2,
 325                 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
 326                 + buffer->user_size - 4);
 327
 328        /*
 329         * Kick off the 'sync point' command by replacing the previous
 330         * WAIT with a link to the address in the ring buffer.
 331         */
 332        etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 333                                    VIV_FE_LINK_HEADER_OP_LINK |
 334                                    VIV_FE_LINK_HEADER_PREFETCH(dwords),
 335                                    target);
 336}
 337
 338/* Append a command buffer to the ring buffer. */
 339void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
 340        struct etnaviv_iommu_context *mmu_context, unsigned int event,
 341        struct etnaviv_cmdbuf *cmdbuf)
 342{
 343        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 344        unsigned int waitlink_offset = buffer->user_size - 16;
 345        u32 return_target, return_dwords;
 346        u32 link_target, link_dwords;
 347        bool switch_context = gpu->exec_state != exec_state;
 348        bool switch_mmu_context = gpu->mmu_context != mmu_context;
 349        unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
 350        bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
 351        bool has_blt = !!(gpu->identity.minor_features5 &
 352                          chipMinorFeatures5_BLT_ENGINE);
 353
 354        lockdep_assert_held(&gpu->lock);
 355
 356        if (drm_debug_enabled(DRM_UT_DRIVER))
 357                etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 358
 359        link_target = etnaviv_cmdbuf_get_va(cmdbuf,
 360                                            &gpu->mmu_context->cmdbuf_mapping);
 361        link_dwords = cmdbuf->size / 8;
 362
 363        /*
 364         * If we need maintenance prior to submitting this buffer, we will
 365         * need to append a mmu flush load state, followed by a new
 366         * link to this buffer - a total of four additional words.
 367         */
 368        if (need_flush || switch_context) {
 369                u32 target, extra_dwords;
 370
 371                /* link command */
 372                extra_dwords = 1;
 373
 374                /* flush command */
 375                if (need_flush) {
 376                        if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
 377                                extra_dwords += 1;
 378                        else
 379                                extra_dwords += 3;
 380                }
 381
 382                /* pipe switch commands */
 383                if (switch_context)
 384                        extra_dwords += 4;
 385
 386                /* PTA load command */
 387                if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
 388                        extra_dwords += 1;
 389
 390                target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
 391                /*
 392                 * Switch MMU context if necessary. Must be done after the
 393                 * link target has been calculated, as the jump forward in the
 394                 * kernel ring still uses the last active MMU context before
 395                 * the switch.
 396                 */
 397                if (switch_mmu_context) {
 398                        struct etnaviv_iommu_context *old_context = gpu->mmu_context;
 399
 400                        etnaviv_iommu_context_get(mmu_context);
 401                        gpu->mmu_context = mmu_context;
 402                        etnaviv_iommu_context_put(old_context);
 403                }
 404
 405                if (need_flush) {
 406                        /* Add the MMU flush */
 407                        if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
 408                                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
 409                                               VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
 410                                               VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
 411                                               VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
 412                                               VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
 413                                               VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
 414                        } else {
 415                                u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
 416                                            VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
 417
 418                                if (switch_mmu_context &&
 419                                    gpu->sec_mode == ETNA_SEC_KERNEL) {
 420                                        unsigned short id =
 421                                                etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
 422                                        CMD_LOAD_STATE(buffer,
 423                                                VIVS_MMUv2_PTA_CONFIG,
 424                                                VIVS_MMUv2_PTA_CONFIG_INDEX(id));
 425                                }
 426
 427                                if (gpu->sec_mode == ETNA_SEC_NONE)
 428                                        flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
 429
 430                                CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
 431                                               flush);
 432                                CMD_SEM(buffer, SYNC_RECIPIENT_FE,
 433                                        SYNC_RECIPIENT_PE);
 434                                CMD_STALL(buffer, SYNC_RECIPIENT_FE,
 435                                        SYNC_RECIPIENT_PE);
 436                        }
 437
 438                        gpu->flush_seq = new_flush_seq;
 439                }
 440
 441                if (switch_context) {
 442                        etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
 443                        gpu->exec_state = exec_state;
 444                }
 445
 446                /* And the link to the submitted buffer */
 447                link_target = etnaviv_cmdbuf_get_va(cmdbuf,
 448                                        &gpu->mmu_context->cmdbuf_mapping);
 449                CMD_LINK(buffer, link_dwords, link_target);
 450
 451                /* Update the link target to point to above instructions */
 452                link_target = target;
 453                link_dwords = extra_dwords;
 454        }
 455
 456        /*
 457         * Append a LINK to the submitted command buffer to return to
 458         * the ring buffer.  return_target is the ring target address.
 459         * We need at most 7 dwords in the return target: 2 cache flush +
 460         * 2 semaphore stall + 1 event + 1 wait + 1 link.
 461         */
 462        return_dwords = 7;
 463
 464        /*
 465         * When the BLT engine is present we need 6 more dwords in the return
 466         * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
 467         * but we don't need the normal TS flush state.
 468         */
 469        if (has_blt)
 470                return_dwords += 6;
 471
 472        return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
 473        CMD_LINK(cmdbuf, return_dwords, return_target);
 474
 475        /*
 476         * Append a cache flush, stall, event, wait and link pointing back to
 477         * the wait command to the ring buffer.
 478         */
 479        if (gpu->exec_state == ETNA_PIPE_2D) {
 480                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
 481                                       VIVS_GL_FLUSH_CACHE_PE2D);
 482        } else {
 483                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
 484                                       VIVS_GL_FLUSH_CACHE_DEPTH |
 485                                       VIVS_GL_FLUSH_CACHE_COLOR);
 486                if (has_blt) {
 487                        CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
 488                        CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
 489                        CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
 490                } else {
 491                        CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
 492                                               VIVS_TS_FLUSH_CACHE_FLUSH);
 493                }
 494        }
 495        CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 496        CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 497
 498        if (has_blt) {
 499                CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
 500                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
 501                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
 502                CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
 503        }
 504
 505        CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 506                       VIVS_GL_EVENT_FROM_PE);
 507        CMD_WAIT(buffer);
 508        CMD_LINK(buffer, 2,
 509                 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
 510                 + buffer->user_size - 4);
 511
 512        if (drm_debug_enabled(DRM_UT_DRIVER))
 513                pr_info("stream link to 0x%08x @ 0x%08x %p\n",
 514                        return_target,
 515                        etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
 516                        cmdbuf->vaddr);
 517
 518        if (drm_debug_enabled(DRM_UT_DRIVER)) {
 519                print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 520                               cmdbuf->vaddr, cmdbuf->size, 0);
 521
 522                pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
 523                pr_info("addr: 0x%08x\n", link_target);
 524                pr_info("back: 0x%08x\n", return_target);
 525                pr_info("event: %d\n", event);
 526        }
 527
 528        /*
 529         * Kick off the submitted command by replacing the previous
 530         * WAIT with a link to the address in the ring buffer.
 531         */
 532        etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 533                                    VIV_FE_LINK_HEADER_OP_LINK |
 534                                    VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
 535                                    link_target);
 536
 537        if (drm_debug_enabled(DRM_UT_DRIVER))
 538                etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 539}
 540