linux/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2014-2018 Etnaviv Project
   4 */
   5
   6#include "etnaviv_cmdbuf.h"
   7#include "etnaviv_gpu.h"
   8#include "etnaviv_gem.h"
   9#include "etnaviv_mmu.h"
  10
  11#include "common.xml.h"
  12#include "state.xml.h"
  13#include "state_hi.xml.h"
  14#include "state_3d.xml.h"
  15#include "cmdstream.xml.h"
  16
  17/*
  18 * Command Buffer helper:
  19 */
  20
  21
  22static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
  23{
  24        u32 *vaddr = (u32 *)buffer->vaddr;
  25
  26        BUG_ON(buffer->user_size >= buffer->size);
  27
  28        vaddr[buffer->user_size / 4] = data;
  29        buffer->user_size += 4;
  30}
  31
  32static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
  33        u32 reg, u32 value)
  34{
  35        u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
  36
  37        buffer->user_size = ALIGN(buffer->user_size, 8);
  38
  39        /* write a register via cmd stream */
  40        OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
  41                    VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
  42                    VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
  43        OUT(buffer, value);
  44}
  45
  46static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
  47{
  48        buffer->user_size = ALIGN(buffer->user_size, 8);
  49
  50        OUT(buffer, VIV_FE_END_HEADER_OP_END);
  51}
  52
  53static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
  54{
  55        buffer->user_size = ALIGN(buffer->user_size, 8);
  56
  57        OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
  58}
  59
  60static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
  61        u16 prefetch, u32 address)
  62{
  63        buffer->user_size = ALIGN(buffer->user_size, 8);
  64
  65        OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
  66                    VIV_FE_LINK_HEADER_PREFETCH(prefetch));
  67        OUT(buffer, address);
  68}
  69
  70static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
  71        u32 from, u32 to)
  72{
  73        buffer->user_size = ALIGN(buffer->user_size, 8);
  74
  75        OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
  76        OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
  77}
  78
  79static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
  80{
  81        CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
  82                       VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
  83                       VIVS_GL_SEMAPHORE_TOKEN_TO(to));
  84}
  85
  86static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
  87        struct etnaviv_cmdbuf *buffer, u8 pipe)
  88{
  89        u32 flush = 0;
  90
  91        lockdep_assert_held(&gpu->lock);
  92
  93        /*
  94         * This assumes that if we're switching to 2D, we're switching
  95         * away from 3D, and vice versa.  Hence, if we're switching to
  96         * the 2D core, we need to flush the 3D depth and color caches,
  97         * otherwise we need to flush the 2D pixel engine cache.
  98         */
  99        if (gpu->exec_state == ETNA_PIPE_2D)
 100                flush = VIVS_GL_FLUSH_CACHE_PE2D;
 101        else if (gpu->exec_state == ETNA_PIPE_3D)
 102                flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
 103
 104        CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
 105        CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 106        CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 107
 108        CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
 109                       VIVS_GL_PIPE_SELECT_PIPE(pipe));
 110}
 111
 112static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
 113        struct etnaviv_cmdbuf *buf, u32 off, u32 len)
 114{
 115        u32 size = buf->size;
 116        u32 *ptr = buf->vaddr + off;
 117
 118        dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
 119                        ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
 120
 121        print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 122                        ptr, len * 4, 0);
 123}
 124
 125/*
 126 * Safely replace the WAIT of a waitlink with a new command and argument.
 127 * The GPU may be executing this WAIT while we're modifying it, so we have
 128 * to write it in a specific order to avoid the GPU branching to somewhere
 129 * else.  'wl_offset' is the offset to the first byte of the WAIT command.
 130 */
 131static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
 132        unsigned int wl_offset, u32 cmd, u32 arg)
 133{
 134        u32 *lw = buffer->vaddr + wl_offset;
 135
 136        lw[1] = arg;
 137        mb();
 138        lw[0] = cmd;
 139        mb();
 140}
 141
 142/*
 143 * Ensure that there is space in the command buffer to contiguously write
 144 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
 145 */
 146static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
 147        struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
 148{
 149        if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
 150                buffer->user_size = 0;
 151
 152        return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
 153}
 154
 155u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
 156{
 157        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 158
 159        lockdep_assert_held(&gpu->lock);
 160
 161        /* initialize buffer */
 162        buffer->user_size = 0;
 163
 164        CMD_WAIT(buffer);
 165        CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
 166                 buffer->user_size - 4);
 167
 168        return buffer->user_size / 8;
 169}
 170
 171u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
 172{
 173        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 174
 175        lockdep_assert_held(&gpu->lock);
 176
 177        buffer->user_size = 0;
 178
 179        if (gpu->identity.features & chipFeatures_PIPE_3D) {
 180                CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
 181                               VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
 182                CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
 183                        mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
 184                CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
 185                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 186                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 187        }
 188
 189        if (gpu->identity.features & chipFeatures_PIPE_2D) {
 190                CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
 191                               VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
 192                CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
 193                        mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
 194                CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
 195                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 196                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 197        }
 198
 199        CMD_END(buffer);
 200
 201        buffer->user_size = ALIGN(buffer->user_size, 8);
 202
 203        return buffer->user_size / 8;
 204}
 205
 206u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
 207{
 208        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 209
 210        lockdep_assert_held(&gpu->lock);
 211
 212        buffer->user_size = 0;
 213
 214        CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
 215                       VIVS_MMUv2_PTA_CONFIG_INDEX(0));
 216
 217        CMD_END(buffer);
 218
 219        buffer->user_size = ALIGN(buffer->user_size, 8);
 220
 221        return buffer->user_size / 8;
 222}
 223
 224void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
 225{
 226        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 227        unsigned int waitlink_offset = buffer->user_size - 16;
 228        u32 link_target, flush = 0;
 229
 230        lockdep_assert_held(&gpu->lock);
 231
 232        if (gpu->exec_state == ETNA_PIPE_2D)
 233                flush = VIVS_GL_FLUSH_CACHE_PE2D;
 234        else if (gpu->exec_state == ETNA_PIPE_3D)
 235                flush = VIVS_GL_FLUSH_CACHE_DEPTH |
 236                        VIVS_GL_FLUSH_CACHE_COLOR |
 237                        VIVS_GL_FLUSH_CACHE_TEXTURE |
 238                        VIVS_GL_FLUSH_CACHE_TEXTUREVS |
 239                        VIVS_GL_FLUSH_CACHE_SHADER_L2;
 240
 241        if (flush) {
 242                unsigned int dwords = 7;
 243
 244                link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
 245
 246                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 247                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 248                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
 249                if (gpu->exec_state == ETNA_PIPE_3D)
 250                        CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
 251                                       VIVS_TS_FLUSH_CACHE_FLUSH);
 252                CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 253                CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 254                CMD_END(buffer);
 255
 256                etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 257                                            VIV_FE_LINK_HEADER_OP_LINK |
 258                                            VIV_FE_LINK_HEADER_PREFETCH(dwords),
 259                                            link_target);
 260        } else {
 261                /* Replace the last link-wait with an "END" command */
 262                etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 263                                            VIV_FE_END_HEADER_OP_END, 0);
 264        }
 265}
 266
 267/* Append a 'sync point' to the ring buffer. */
 268void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
 269{
 270        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 271        unsigned int waitlink_offset = buffer->user_size - 16;
 272        u32 dwords, target;
 273
 274        lockdep_assert_held(&gpu->lock);
 275
 276        /*
 277         * We need at most 3 dwords in the return target:
 278         * 1 event + 1 end + 1 wait + 1 link.
 279         */
 280        dwords = 4;
 281        target = etnaviv_buffer_reserve(gpu, buffer, dwords);
 282
 283        /* Signal sync point event */
 284        CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 285                       VIVS_GL_EVENT_FROM_PE);
 286
 287        /* Stop the FE to 'pause' the GPU */
 288        CMD_END(buffer);
 289
 290        /* Append waitlink */
 291        CMD_WAIT(buffer);
 292        CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
 293                            buffer->user_size - 4);
 294
 295        /*
 296         * Kick off the 'sync point' command by replacing the previous
 297         * WAIT with a link to the address in the ring buffer.
 298         */
 299        etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 300                                    VIV_FE_LINK_HEADER_OP_LINK |
 301                                    VIV_FE_LINK_HEADER_PREFETCH(dwords),
 302                                    target);
 303}
 304
 305/* Append a command buffer to the ring buffer. */
 306void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
 307        unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
 308{
 309        struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 310        unsigned int waitlink_offset = buffer->user_size - 16;
 311        u32 return_target, return_dwords;
 312        u32 link_target, link_dwords;
 313        bool switch_context = gpu->exec_state != exec_state;
 314
 315        lockdep_assert_held(&gpu->lock);
 316
 317        if (drm_debug & DRM_UT_DRIVER)
 318                etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 319
 320        link_target = etnaviv_cmdbuf_get_va(cmdbuf);
 321        link_dwords = cmdbuf->size / 8;
 322
 323        /*
 324         * If we need maintanence prior to submitting this buffer, we will
 325         * need to append a mmu flush load state, followed by a new
 326         * link to this buffer - a total of four additional words.
 327         */
 328        if (gpu->mmu->need_flush || switch_context) {
 329                u32 target, extra_dwords;
 330
 331                /* link command */
 332                extra_dwords = 1;
 333
 334                /* flush command */
 335                if (gpu->mmu->need_flush) {
 336                        if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
 337                                extra_dwords += 1;
 338                        else
 339                                extra_dwords += 3;
 340                }
 341
 342                /* pipe switch commands */
 343                if (switch_context)
 344                        extra_dwords += 4;
 345
 346                target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
 347
 348                if (gpu->mmu->need_flush) {
 349                        /* Add the MMU flush */
 350                        if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
 351                                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
 352                                               VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
 353                                               VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
 354                                               VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
 355                                               VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
 356                                               VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
 357                        } else {
 358                                CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
 359                                        VIVS_MMUv2_CONFIGURATION_MODE_MASK |
 360                                        VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
 361                                        VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
 362                                CMD_SEM(buffer, SYNC_RECIPIENT_FE,
 363                                        SYNC_RECIPIENT_PE);
 364                                CMD_STALL(buffer, SYNC_RECIPIENT_FE,
 365                                        SYNC_RECIPIENT_PE);
 366                        }
 367
 368                        gpu->mmu->need_flush = false;
 369                }
 370
 371                if (switch_context) {
 372                        etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
 373                        gpu->exec_state = exec_state;
 374                }
 375
 376                /* And the link to the submitted buffer */
 377                CMD_LINK(buffer, link_dwords, link_target);
 378
 379                /* Update the link target to point to above instructions */
 380                link_target = target;
 381                link_dwords = extra_dwords;
 382        }
 383
 384        /*
 385         * Append a LINK to the submitted command buffer to return to
 386         * the ring buffer.  return_target is the ring target address.
 387         * We need at most 7 dwords in the return target: 2 cache flush +
 388         * 2 semaphore stall + 1 event + 1 wait + 1 link.
 389         */
 390        return_dwords = 7;
 391        return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
 392        CMD_LINK(cmdbuf, return_dwords, return_target);
 393
 394        /*
 395         * Append a cache flush, stall, event, wait and link pointing back to
 396         * the wait command to the ring buffer.
 397         */
 398        if (gpu->exec_state == ETNA_PIPE_2D) {
 399                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
 400                                       VIVS_GL_FLUSH_CACHE_PE2D);
 401        } else {
 402                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
 403                                       VIVS_GL_FLUSH_CACHE_DEPTH |
 404                                       VIVS_GL_FLUSH_CACHE_COLOR);
 405                CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
 406                                       VIVS_TS_FLUSH_CACHE_FLUSH);
 407        }
 408        CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 409        CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 410        CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 411                       VIVS_GL_EVENT_FROM_PE);
 412        CMD_WAIT(buffer);
 413        CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
 414                            buffer->user_size - 4);
 415
 416        if (drm_debug & DRM_UT_DRIVER)
 417                pr_info("stream link to 0x%08x @ 0x%08x %p\n",
 418                        return_target, etnaviv_cmdbuf_get_va(cmdbuf),
 419                        cmdbuf->vaddr);
 420
 421        if (drm_debug & DRM_UT_DRIVER) {
 422                print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
 423                               cmdbuf->vaddr, cmdbuf->size, 0);
 424
 425                pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
 426                pr_info("addr: 0x%08x\n", link_target);
 427                pr_info("back: 0x%08x\n", return_target);
 428                pr_info("event: %d\n", event);
 429        }
 430
 431        /*
 432         * Kick off the submitted command by replacing the previous
 433         * WAIT with a link to the address in the ring buffer.
 434         */
 435        etnaviv_buffer_replace_wait(buffer, waitlink_offset,
 436                                    VIV_FE_LINK_HEADER_OP_LINK |
 437                                    VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
 438                                    link_target);
 439
 440        if (drm_debug & DRM_UT_DRIVER)
 441                etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
 442
 443        gpu->lastctx = cmdbuf->ctx;
 444}
 445