linux/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
   3
   4#include <linux/completion.h>
   5#include <linux/circ_buf.h>
   6#include <linux/list.h>
   7
   8#include "a6xx_gmu.h"
   9#include "a6xx_gmu.xml.h"
  10#include "a6xx_gpu.h"
  11
  12#define HFI_MSG_ID(val) [val] = #val
  13
  14static const char * const a6xx_hfi_msg_id[] = {
  15        HFI_MSG_ID(HFI_H2F_MSG_INIT),
  16        HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
  17        HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
  18        HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
  19        HFI_MSG_ID(HFI_H2F_MSG_TEST),
  20        HFI_MSG_ID(HFI_H2F_MSG_START),
  21        HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
  22        HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
  23        HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
  24};
  25
  26static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
  27        struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
  28{
  29        struct a6xx_hfi_queue_header *header = queue->header;
  30        u32 i, hdr, index = header->read_index;
  31
  32        if (header->read_index == header->write_index) {
  33                header->rx_request = 1;
  34                return 0;
  35        }
  36
  37        hdr = queue->data[index];
  38
  39        queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
  40
  41        /*
  42         * If we are to assume that the GMU firmware is in fact a rational actor
  43         * and is programmed to not send us a larger response than we expect
  44         * then we can also assume that if the header size is unexpectedly large
  45         * that it is due to memory corruption and/or hardware failure. In this
  46         * case the only reasonable course of action is to BUG() to help harden
  47         * the failure.
  48         */
  49
  50        BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
  51
  52        for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
  53                data[i] = queue->data[index];
  54                index = (index + 1) % header->size;
  55        }
  56
  57        if (!gmu->legacy)
  58                index = ALIGN(index, 4) % header->size;
  59
  60        header->read_index = index;
  61        return HFI_HEADER_SIZE(hdr);
  62}
  63
  64static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
  65        struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
  66{
  67        struct a6xx_hfi_queue_header *header = queue->header;
  68        u32 i, space, index = header->write_index;
  69
  70        spin_lock(&queue->lock);
  71
  72        space = CIRC_SPACE(header->write_index, header->read_index,
  73                header->size);
  74        if (space < dwords) {
  75                header->dropped++;
  76                spin_unlock(&queue->lock);
  77                return -ENOSPC;
  78        }
  79
  80        queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
  81
  82        for (i = 0; i < dwords; i++) {
  83                queue->data[index] = data[i];
  84                index = (index + 1) % header->size;
  85        }
  86
  87        /* Cookify any non used data at the end of the write buffer */
  88        if (!gmu->legacy) {
  89                for (; index % 4; index = (index + 1) % header->size)
  90                        queue->data[index] = 0xfafafafa;
  91        }
  92
  93        header->write_index = index;
  94        spin_unlock(&queue->lock);
  95
  96        gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
  97        return 0;
  98}
  99
 100static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
 101                u32 *payload, u32 payload_size)
 102{
 103        struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
 104        u32 val;
 105        int ret;
 106
 107        /* Wait for a response */
 108        ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
 109                val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
 110
 111        if (ret) {
 112                DRM_DEV_ERROR(gmu->dev,
 113                        "Message %s id %d timed out waiting for response\n",
 114                        a6xx_hfi_msg_id[id], seqnum);
 115                return -ETIMEDOUT;
 116        }
 117
 118        /* Clear the interrupt */
 119        gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
 120                A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
 121
 122        for (;;) {
 123                struct a6xx_hfi_msg_response resp;
 124
 125                /* Get the next packet */
 126                ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
 127                        sizeof(resp) >> 2);
 128
 129                /* If the queue is empty our response never made it */
 130                if (!ret) {
 131                        DRM_DEV_ERROR(gmu->dev,
 132                                "The HFI response queue is unexpectedly empty\n");
 133
 134                        return -ENOENT;
 135                }
 136
 137                if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
 138                        struct a6xx_hfi_msg_error *error =
 139                                (struct a6xx_hfi_msg_error *) &resp;
 140
 141                        DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
 142                                error->code);
 143                        continue;
 144                }
 145
 146                if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
 147                        DRM_DEV_ERROR(gmu->dev,
 148                                "Unexpected message id %d on the response queue\n",
 149                                HFI_HEADER_SEQNUM(resp.ret_header));
 150                        continue;
 151                }
 152
 153                if (resp.error) {
 154                        DRM_DEV_ERROR(gmu->dev,
 155                                "Message %s id %d returned error %d\n",
 156                                a6xx_hfi_msg_id[id], seqnum, resp.error);
 157                        return -EINVAL;
 158                }
 159
 160                /* All is well, copy over the buffer */
 161                if (payload && payload_size)
 162                        memcpy(payload, resp.payload,
 163                                min_t(u32, payload_size, sizeof(resp.payload)));
 164
 165                return 0;
 166        }
 167}
 168
 169static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
 170                void *data, u32 size, u32 *payload, u32 payload_size)
 171{
 172        struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
 173        int ret, dwords = size >> 2;
 174        u32 seqnum;
 175
 176        seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
 177
 178        /* First dword of the message is the message header - fill it in */
 179        *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
 180                (dwords << 8) | id;
 181
 182        ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
 183        if (ret) {
 184                DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
 185                        a6xx_hfi_msg_id[id], seqnum);
 186                return ret;
 187        }
 188
 189        return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
 190}
 191
 192static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
 193{
 194        struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
 195
 196        msg.dbg_buffer_addr = (u32) gmu->debug.iova;
 197        msg.dbg_buffer_size = (u32) gmu->debug.size;
 198        msg.boot_state = boot_state;
 199
 200        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
 201                NULL, 0);
 202}
 203
 204static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
 205{
 206        struct a6xx_hfi_msg_fw_version msg = { 0 };
 207
 208        /* Currently supporting version 1.1 */
 209        msg.supported_version = (1 << 28) | (1 << 16);
 210
 211        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
 212                version, sizeof(*version));
 213}
 214
 215static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
 216{
 217        struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
 218        int i;
 219
 220        msg.num_gpu_levels = gmu->nr_gpu_freqs;
 221        msg.num_gmu_levels = gmu->nr_gmu_freqs;
 222
 223        for (i = 0; i < gmu->nr_gpu_freqs; i++) {
 224                msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
 225                msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
 226        }
 227
 228        for (i = 0; i < gmu->nr_gmu_freqs; i++) {
 229                msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
 230                msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
 231        }
 232
 233        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
 234                NULL, 0);
 235}
 236
 237static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
 238{
 239        struct a6xx_hfi_msg_perf_table msg = { 0 };
 240        int i;
 241
 242        msg.num_gpu_levels = gmu->nr_gpu_freqs;
 243        msg.num_gmu_levels = gmu->nr_gmu_freqs;
 244
 245        for (i = 0; i < gmu->nr_gpu_freqs; i++) {
 246                msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
 247                msg.gx_votes[i].acd = 0xffffffff;
 248                msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
 249        }
 250
 251        for (i = 0; i < gmu->nr_gmu_freqs; i++) {
 252                msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
 253                msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
 254        }
 255
 256        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
 257                NULL, 0);
 258}
 259
 260static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 261{
 262        /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
 263        msg->bw_level_num = 1;
 264
 265        msg->ddr_cmds_num = 3;
 266        msg->ddr_wait_bitmask = 0x01;
 267
 268        msg->ddr_cmds_addrs[0] = 0x50000;
 269        msg->ddr_cmds_addrs[1] = 0x5003c;
 270        msg->ddr_cmds_addrs[2] = 0x5000c;
 271
 272        msg->ddr_cmds_data[0][0] =  0x40000000;
 273        msg->ddr_cmds_data[0][1] =  0x40000000;
 274        msg->ddr_cmds_data[0][2] =  0x40000000;
 275
 276        /*
 277         * These are the CX (CNOC) votes - these are used by the GMU but the
 278         * votes are known and fixed for the target
 279         */
 280        msg->cnoc_cmds_num = 1;
 281        msg->cnoc_wait_bitmask = 0x01;
 282
 283        msg->cnoc_cmds_addrs[0] = 0x5007c;
 284        msg->cnoc_cmds_data[0][0] =  0x40000000;
 285        msg->cnoc_cmds_data[1][0] =  0x60000001;
 286}
 287
 288static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 289{
 290        /*
 291         * Send a single "off" entry just to get things running
 292         * TODO: bus scaling
 293         */
 294        msg->bw_level_num = 1;
 295
 296        msg->ddr_cmds_num = 3;
 297        msg->ddr_wait_bitmask = 0x01;
 298
 299        msg->ddr_cmds_addrs[0] = 0x50000;
 300        msg->ddr_cmds_addrs[1] = 0x5003c;
 301        msg->ddr_cmds_addrs[2] = 0x5000c;
 302
 303        msg->ddr_cmds_data[0][0] =  0x40000000;
 304        msg->ddr_cmds_data[0][1] =  0x40000000;
 305        msg->ddr_cmds_data[0][2] =  0x40000000;
 306
 307        /*
 308         * These are the CX (CNOC) votes - these are used by the GMU but the
 309         * votes are known and fixed for the target
 310         */
 311        msg->cnoc_cmds_num = 3;
 312        msg->cnoc_wait_bitmask = 0x01;
 313
 314        msg->cnoc_cmds_addrs[0] = 0x50034;
 315        msg->cnoc_cmds_addrs[1] = 0x5007c;
 316        msg->cnoc_cmds_addrs[2] = 0x5004c;
 317
 318        msg->cnoc_cmds_data[0][0] =  0x40000000;
 319        msg->cnoc_cmds_data[0][1] =  0x00000000;
 320        msg->cnoc_cmds_data[0][2] =  0x40000000;
 321
 322        msg->cnoc_cmds_data[1][0] =  0x60000001;
 323        msg->cnoc_cmds_data[1][1] =  0x20000001;
 324        msg->cnoc_cmds_data[1][2] =  0x60000001;
 325}
 326
 327static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 328{
 329        /*
 330         * Send a single "off" entry just to get things running
 331         * TODO: bus scaling
 332         */
 333        msg->bw_level_num = 1;
 334
 335        msg->ddr_cmds_num = 3;
 336        msg->ddr_wait_bitmask = 0x01;
 337
 338        msg->ddr_cmds_addrs[0] = 0x50000;
 339        msg->ddr_cmds_addrs[1] = 0x50004;
 340        msg->ddr_cmds_addrs[2] = 0x5007c;
 341
 342        msg->ddr_cmds_data[0][0] =  0x40000000;
 343        msg->ddr_cmds_data[0][1] =  0x40000000;
 344        msg->ddr_cmds_data[0][2] =  0x40000000;
 345
 346        /*
 347         * These are the CX (CNOC) votes - these are used by the GMU but the
 348         * votes are known and fixed for the target
 349         */
 350        msg->cnoc_cmds_num = 1;
 351        msg->cnoc_wait_bitmask = 0x01;
 352
 353        msg->cnoc_cmds_addrs[0] = 0x500a4;
 354        msg->cnoc_cmds_data[0][0] =  0x40000000;
 355        msg->cnoc_cmds_data[1][0] =  0x60000001;
 356}
 357
 358static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 359{
 360        /*
 361         * Send a single "off" entry just to get things running
 362         * TODO: bus scaling
 363         */
 364        msg->bw_level_num = 1;
 365
 366        msg->ddr_cmds_num = 3;
 367        msg->ddr_wait_bitmask = 0x01;
 368
 369        msg->ddr_cmds_addrs[0] = 0x50004;
 370        msg->ddr_cmds_addrs[1] = 0x500a0;
 371        msg->ddr_cmds_addrs[2] = 0x50000;
 372
 373        msg->ddr_cmds_data[0][0] =  0x40000000;
 374        msg->ddr_cmds_data[0][1] =  0x40000000;
 375        msg->ddr_cmds_data[0][2] =  0x40000000;
 376
 377        /*
 378         * These are the CX (CNOC) votes - these are used by the GMU but the
 379         * votes are known and fixed for the target
 380         */
 381        msg->cnoc_cmds_num = 1;
 382        msg->cnoc_wait_bitmask = 0x01;
 383
 384        msg->cnoc_cmds_addrs[0] = 0x50070;
 385        msg->cnoc_cmds_data[0][0] =  0x40000000;
 386        msg->cnoc_cmds_data[1][0] =  0x60000001;
 387}
 388
 389static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 390{
 391        /*
 392         * Send a single "off" entry just to get things running
 393         * TODO: bus scaling
 394         */
 395        msg->bw_level_num = 1;
 396
 397        msg->ddr_cmds_num = 3;
 398        msg->ddr_wait_bitmask = 0x07;
 399
 400        msg->ddr_cmds_addrs[0] = 0x50004;
 401        msg->ddr_cmds_addrs[1] = 0x50000;
 402        msg->ddr_cmds_addrs[2] = 0x50088;
 403
 404        msg->ddr_cmds_data[0][0] =  0x40000000;
 405        msg->ddr_cmds_data[0][1] =  0x40000000;
 406        msg->ddr_cmds_data[0][2] =  0x40000000;
 407
 408        /*
 409         * These are the CX (CNOC) votes - these are used by the GMU but the
 410         * votes are known and fixed for the target
 411         */
 412        msg->cnoc_cmds_num = 1;
 413        msg->cnoc_wait_bitmask = 0x01;
 414
 415        msg->cnoc_cmds_addrs[0] = 0x5006c;
 416        msg->cnoc_cmds_data[0][0] =  0x40000000;
 417        msg->cnoc_cmds_data[1][0] =  0x60000001;
 418}
 419static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
 420{
 421        /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
 422        msg->bw_level_num = 1;
 423
 424        msg->ddr_cmds_num = 3;
 425        msg->ddr_wait_bitmask = 0x07;
 426
 427        msg->ddr_cmds_addrs[0] = 0x50000;
 428        msg->ddr_cmds_addrs[1] = 0x5005c;
 429        msg->ddr_cmds_addrs[2] = 0x5000c;
 430
 431        msg->ddr_cmds_data[0][0] =  0x40000000;
 432        msg->ddr_cmds_data[0][1] =  0x40000000;
 433        msg->ddr_cmds_data[0][2] =  0x40000000;
 434
 435        /*
 436         * These are the CX (CNOC) votes.  This is used but the values for the
 437         * sdm845 GMU are known and fixed so we can hard code them.
 438         */
 439
 440        msg->cnoc_cmds_num = 3;
 441        msg->cnoc_wait_bitmask = 0x05;
 442
 443        msg->cnoc_cmds_addrs[0] = 0x50034;
 444        msg->cnoc_cmds_addrs[1] = 0x5007c;
 445        msg->cnoc_cmds_addrs[2] = 0x5004c;
 446
 447        msg->cnoc_cmds_data[0][0] =  0x40000000;
 448        msg->cnoc_cmds_data[0][1] =  0x00000000;
 449        msg->cnoc_cmds_data[0][2] =  0x40000000;
 450
 451        msg->cnoc_cmds_data[1][0] =  0x60000001;
 452        msg->cnoc_cmds_data[1][1] =  0x20000001;
 453        msg->cnoc_cmds_data[1][2] =  0x60000001;
 454}
 455
 456
 457static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
 458{
 459        struct a6xx_hfi_msg_bw_table msg = { 0 };
 460        struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
 461        struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
 462
 463        if (adreno_is_a618(adreno_gpu))
 464                a618_build_bw_table(&msg);
 465        else if (adreno_is_a640_family(adreno_gpu))
 466                a640_build_bw_table(&msg);
 467        else if (adreno_is_a650(adreno_gpu))
 468                a650_build_bw_table(&msg);
 469        else if (adreno_is_7c3(adreno_gpu))
 470                adreno_7c3_build_bw_table(&msg);
 471        else if (adreno_is_a660(adreno_gpu))
 472                a660_build_bw_table(&msg);
 473        else
 474                a6xx_build_bw_table(&msg);
 475
 476        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
 477                NULL, 0);
 478}
 479
 480static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
 481{
 482        struct a6xx_hfi_msg_test msg = { 0 };
 483
 484        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
 485                NULL, 0);
 486}
 487
 488static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
 489{
 490        struct a6xx_hfi_msg_start msg = { 0 };
 491
 492        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
 493                NULL, 0);
 494}
 495
 496static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
 497{
 498        struct a6xx_hfi_msg_core_fw_start msg = { 0 };
 499
 500        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
 501                sizeof(msg), NULL, 0);
 502}
 503
 504int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
 505{
 506        struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
 507
 508        msg.ack_type = 1; /* blocking */
 509        msg.freq = index;
 510        msg.bw = 0; /* TODO: bus scaling */
 511
 512        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
 513                sizeof(msg), NULL, 0);
 514}
 515
 516int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
 517{
 518        struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
 519
 520        /* TODO: should freq and bw fields be non-zero ? */
 521
 522        return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
 523                sizeof(msg), NULL, 0);
 524}
 525
 526static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
 527{
 528        int ret;
 529
 530        ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
 531        if (ret)
 532                return ret;
 533
 534        ret = a6xx_hfi_get_fw_version(gmu, NULL);
 535        if (ret)
 536                return ret;
 537
 538        /*
 539         * We have to get exchange version numbers per the sequence but at this
 540         * point th kernel driver doesn't need to know the exact version of
 541         * the GMU firmware
 542         */
 543
 544        ret = a6xx_hfi_send_perf_table_v1(gmu);
 545        if (ret)
 546                return ret;
 547
 548        ret = a6xx_hfi_send_bw_table(gmu);
 549        if (ret)
 550                return ret;
 551
 552        /*
 553         * Let the GMU know that there won't be any more HFI messages until next
 554         * boot
 555         */
 556        a6xx_hfi_send_test(gmu);
 557
 558        return 0;
 559}
 560
 561int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
 562{
 563        int ret;
 564
 565        if (gmu->legacy)
 566                return a6xx_hfi_start_v1(gmu, boot_state);
 567
 568
 569        ret = a6xx_hfi_send_perf_table(gmu);
 570        if (ret)
 571                return ret;
 572
 573        ret = a6xx_hfi_send_bw_table(gmu);
 574        if (ret)
 575                return ret;
 576
 577        ret = a6xx_hfi_send_core_fw_start(gmu);
 578        if (ret)
 579                return ret;
 580
 581        /*
 582         * Downstream driver sends this in its "a6xx_hw_init" equivalent,
 583         * but seems to be no harm in sending it here
 584         */
 585        ret = a6xx_hfi_send_start(gmu);
 586        if (ret)
 587                return ret;
 588
 589        return 0;
 590}
 591
 592void a6xx_hfi_stop(struct a6xx_gmu *gmu)
 593{
 594        int i;
 595
 596        for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
 597                struct a6xx_hfi_queue *queue = &gmu->queues[i];
 598
 599                if (!queue->header)
 600                        continue;
 601
 602                if (queue->header->read_index != queue->header->write_index)
 603                        DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
 604
 605                queue->header->read_index = 0;
 606                queue->header->write_index = 0;
 607
 608                memset(&queue->history, 0xff, sizeof(queue->history));
 609                queue->history_idx = 0;
 610        }
 611}
 612
 613static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
 614                struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
 615                u32 id)
 616{
 617        spin_lock_init(&queue->lock);
 618        queue->header = header;
 619        queue->data = virt;
 620        atomic_set(&queue->seqnum, 0);
 621
 622        memset(&queue->history, 0xff, sizeof(queue->history));
 623        queue->history_idx = 0;
 624
 625        /* Set up the shared memory header */
 626        header->iova = iova;
 627        header->type =  10 << 8 | id;
 628        header->status = 1;
 629        header->size = SZ_4K >> 2;
 630        header->msg_size = 0;
 631        header->dropped = 0;
 632        header->rx_watermark = 1;
 633        header->tx_watermark = 1;
 634        header->rx_request = 1;
 635        header->tx_request = 0;
 636        header->read_index = 0;
 637        header->write_index = 0;
 638}
 639
 640void a6xx_hfi_init(struct a6xx_gmu *gmu)
 641{
 642        struct a6xx_gmu_bo *hfi = &gmu->hfi;
 643        struct a6xx_hfi_queue_table_header *table = hfi->virt;
 644        struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
 645        u64 offset;
 646        int table_size;
 647
 648        /*
 649         * The table size is the size of the table header plus all of the queue
 650         * headers
 651         */
 652        table_size = sizeof(*table);
 653        table_size += (ARRAY_SIZE(gmu->queues) *
 654                sizeof(struct a6xx_hfi_queue_header));
 655
 656        table->version = 0;
 657        table->size = table_size;
 658        /* First queue header is located immediately after the table header */
 659        table->qhdr0_offset = sizeof(*table) >> 2;
 660        table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
 661        table->num_queues = ARRAY_SIZE(gmu->queues);
 662        table->active_queues = ARRAY_SIZE(gmu->queues);
 663
 664        /* Command queue */
 665        offset = SZ_4K;
 666        a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
 667                hfi->iova + offset, 0);
 668
 669        /* GMU response queue */
 670        offset += SZ_4K;
 671        a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
 672                hfi->iova + offset, gmu->legacy ? 4 : 1);
 673}
 674