linux/drivers/media/platform/qcom/venus/hfi_venus.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2017 Linaro Ltd.
   5 */
   6
   7#include <linux/delay.h>
   8#include <linux/device.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/interrupt.h>
  11#include <linux/iopoll.h>
  12#include <linux/kernel.h>
  13#include <linux/slab.h>
  14
  15#include "core.h"
  16#include "hfi_cmds.h"
  17#include "hfi_msgs.h"
  18#include "hfi_venus.h"
  19#include "hfi_venus_io.h"
  20#include "firmware.h"
  21
  22#define HFI_MASK_QHDR_TX_TYPE           0xff000000
  23#define HFI_MASK_QHDR_RX_TYPE           0x00ff0000
  24#define HFI_MASK_QHDR_PRI_TYPE          0x0000ff00
  25#define HFI_MASK_QHDR_ID_TYPE           0x000000ff
  26
  27#define HFI_HOST_TO_CTRL_CMD_Q          0
  28#define HFI_CTRL_TO_HOST_MSG_Q          1
  29#define HFI_CTRL_TO_HOST_DBG_Q          2
  30#define HFI_MASK_QHDR_STATUS            0x000000ff
  31
  32#define IFACEQ_NUM                      3
  33#define IFACEQ_CMD_IDX                  0
  34#define IFACEQ_MSG_IDX                  1
  35#define IFACEQ_DBG_IDX                  2
  36#define IFACEQ_MAX_BUF_COUNT            50
  37#define IFACEQ_MAX_PARALLEL_CLNTS       16
  38#define IFACEQ_DFLT_QHDR                0x01010000
  39
  40#define POLL_INTERVAL_US                50
  41
  42#define IFACEQ_MAX_PKT_SIZE             1024
  43#define IFACEQ_MED_PKT_SIZE             768
  44#define IFACEQ_MIN_PKT_SIZE             8
  45#define IFACEQ_VAR_SMALL_PKT_SIZE       100
  46#define IFACEQ_VAR_LARGE_PKT_SIZE       512
  47#define IFACEQ_VAR_HUGE_PKT_SIZE        (1024 * 12)
  48
  49struct hfi_queue_table_header {
  50        u32 version;
  51        u32 size;
  52        u32 qhdr0_offset;
  53        u32 qhdr_size;
  54        u32 num_q;
  55        u32 num_active_q;
  56};
  57
  58struct hfi_queue_header {
  59        u32 status;
  60        u32 start_addr;
  61        u32 type;
  62        u32 q_size;
  63        u32 pkt_size;
  64        u32 pkt_drop_cnt;
  65        u32 rx_wm;
  66        u32 tx_wm;
  67        u32 rx_req;
  68        u32 tx_req;
  69        u32 rx_irq_status;
  70        u32 tx_irq_status;
  71        u32 read_idx;
  72        u32 write_idx;
  73};
  74
  75#define IFACEQ_TABLE_SIZE       \
  76        (sizeof(struct hfi_queue_table_header) +        \
  77         sizeof(struct hfi_queue_header) * IFACEQ_NUM)
  78
  79#define IFACEQ_QUEUE_SIZE       (IFACEQ_MAX_PKT_SIZE *  \
  80        IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
  81
  82#define IFACEQ_GET_QHDR_START_ADDR(ptr, i)      \
  83        (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) +      \
  84                ((i) * sizeof(struct hfi_queue_header)))
  85
  86#define QDSS_SIZE               SZ_4K
  87#define SFR_SIZE                SZ_4K
  88#define QUEUE_SIZE              \
  89        (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
  90
  91#define ALIGNED_QDSS_SIZE       ALIGN(QDSS_SIZE, SZ_4K)
  92#define ALIGNED_SFR_SIZE        ALIGN(SFR_SIZE, SZ_4K)
  93#define ALIGNED_QUEUE_SIZE      ALIGN(QUEUE_SIZE, SZ_4K)
  94#define SHARED_QSIZE            ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
  95                                      ALIGNED_QDSS_SIZE, SZ_1M)
  96
  97struct mem_desc {
  98        dma_addr_t da;  /* device address */
  99        void *kva;      /* kernel virtual address */
 100        u32 size;
 101        unsigned long attrs;
 102};
 103
 104struct iface_queue {
 105        struct hfi_queue_header *qhdr;
 106        struct mem_desc qmem;
 107};
 108
 109enum venus_state {
 110        VENUS_STATE_DEINIT = 1,
 111        VENUS_STATE_INIT,
 112};
 113
 114struct venus_hfi_device {
 115        struct venus_core *core;
 116        u32 irq_status;
 117        u32 last_packet_type;
 118        bool power_enabled;
 119        bool suspended;
 120        enum venus_state state;
 121        /* serialize read / write to the shared memory */
 122        struct mutex lock;
 123        struct completion pwr_collapse_prep;
 124        struct completion release_resource;
 125        struct mem_desc ifaceq_table;
 126        struct mem_desc sfr;
 127        struct iface_queue queues[IFACEQ_NUM];
 128        u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
 129        u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
 130};
 131
 132static bool venus_pkt_debug;
 133int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
 134static bool venus_sys_idle_indicator;
 135static bool venus_fw_low_power_mode = true;
 136static int venus_hw_rsp_timeout = 1000;
 137static bool venus_fw_coverage;
 138
 139static void venus_set_state(struct venus_hfi_device *hdev,
 140                            enum venus_state state)
 141{
 142        mutex_lock(&hdev->lock);
 143        hdev->state = state;
 144        mutex_unlock(&hdev->lock);
 145}
 146
 147static bool venus_is_valid_state(struct venus_hfi_device *hdev)
 148{
 149        return hdev->state != VENUS_STATE_DEINIT;
 150}
 151
 152static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
 153{
 154        size_t pkt_size = *(u32 *)packet;
 155
 156        if (!venus_pkt_debug)
 157                return;
 158
 159        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
 160                       pkt_size, true);
 161}
 162
 163static int venus_write_queue(struct venus_hfi_device *hdev,
 164                             struct iface_queue *queue,
 165                             void *packet, u32 *rx_req)
 166{
 167        struct hfi_queue_header *qhdr;
 168        u32 dwords, new_wr_idx;
 169        u32 empty_space, rd_idx, wr_idx, qsize;
 170        u32 *wr_ptr;
 171
 172        if (!queue->qmem.kva)
 173                return -EINVAL;
 174
 175        qhdr = queue->qhdr;
 176        if (!qhdr)
 177                return -EINVAL;
 178
 179        venus_dump_packet(hdev, packet);
 180
 181        dwords = (*(u32 *)packet) >> 2;
 182        if (!dwords)
 183                return -EINVAL;
 184
 185        rd_idx = qhdr->read_idx;
 186        wr_idx = qhdr->write_idx;
 187        qsize = qhdr->q_size;
 188        /* ensure rd/wr indices's are read from memory */
 189        rmb();
 190
 191        if (wr_idx >= rd_idx)
 192                empty_space = qsize - (wr_idx - rd_idx);
 193        else
 194                empty_space = rd_idx - wr_idx;
 195
 196        if (empty_space <= dwords) {
 197                qhdr->tx_req = 1;
 198                /* ensure tx_req is updated in memory */
 199                wmb();
 200                return -ENOSPC;
 201        }
 202
 203        qhdr->tx_req = 0;
 204        /* ensure tx_req is updated in memory */
 205        wmb();
 206
 207        new_wr_idx = wr_idx + dwords;
 208        wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
 209        if (new_wr_idx < qsize) {
 210                memcpy(wr_ptr, packet, dwords << 2);
 211        } else {
 212                size_t len;
 213
 214                new_wr_idx -= qsize;
 215                len = (dwords - new_wr_idx) << 2;
 216                memcpy(wr_ptr, packet, len);
 217                memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
 218        }
 219
 220        /* make sure packet is written before updating the write index */
 221        wmb();
 222
 223        qhdr->write_idx = new_wr_idx;
 224        *rx_req = qhdr->rx_req ? 1 : 0;
 225
 226        /* make sure write index is updated before an interrupt is raised */
 227        mb();
 228
 229        return 0;
 230}
 231
 232static int venus_read_queue(struct venus_hfi_device *hdev,
 233                            struct iface_queue *queue, void *pkt, u32 *tx_req)
 234{
 235        struct hfi_queue_header *qhdr;
 236        u32 dwords, new_rd_idx;
 237        u32 rd_idx, wr_idx, type, qsize;
 238        u32 *rd_ptr;
 239        u32 recv_request = 0;
 240        int ret = 0;
 241
 242        if (!queue->qmem.kva)
 243                return -EINVAL;
 244
 245        qhdr = queue->qhdr;
 246        if (!qhdr)
 247                return -EINVAL;
 248
 249        type = qhdr->type;
 250        rd_idx = qhdr->read_idx;
 251        wr_idx = qhdr->write_idx;
 252        qsize = qhdr->q_size;
 253
 254        /* make sure data is valid before using it */
 255        rmb();
 256
 257        /*
 258         * Do not set receive request for debug queue, if set, Venus generates
 259         * interrupt for debug messages even when there is no response message
 260         * available. In general debug queue will not become full as it is being
 261         * emptied out for every interrupt from Venus. Venus will anyway
 262         * generates interrupt if it is full.
 263         */
 264        if (type & HFI_CTRL_TO_HOST_MSG_Q)
 265                recv_request = 1;
 266
 267        if (rd_idx == wr_idx) {
 268                qhdr->rx_req = recv_request;
 269                *tx_req = 0;
 270                /* update rx_req field in memory */
 271                wmb();
 272                return -ENODATA;
 273        }
 274
 275        rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
 276        dwords = *rd_ptr >> 2;
 277        if (!dwords)
 278                return -EINVAL;
 279
 280        new_rd_idx = rd_idx + dwords;
 281        if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
 282                if (new_rd_idx < qsize) {
 283                        memcpy(pkt, rd_ptr, dwords << 2);
 284                } else {
 285                        size_t len;
 286
 287                        new_rd_idx -= qsize;
 288                        len = (dwords - new_rd_idx) << 2;
 289                        memcpy(pkt, rd_ptr, len);
 290                        memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
 291                }
 292        } else {
 293                /* bad packet received, dropping */
 294                new_rd_idx = qhdr->write_idx;
 295                ret = -EBADMSG;
 296        }
 297
 298        /* ensure the packet is read before updating read index */
 299        rmb();
 300
 301        qhdr->read_idx = new_rd_idx;
 302        /* ensure updating read index */
 303        wmb();
 304
 305        rd_idx = qhdr->read_idx;
 306        wr_idx = qhdr->write_idx;
 307        /* ensure rd/wr indices are read from memory */
 308        rmb();
 309
 310        if (rd_idx != wr_idx)
 311                qhdr->rx_req = 0;
 312        else
 313                qhdr->rx_req = recv_request;
 314
 315        *tx_req = qhdr->tx_req ? 1 : 0;
 316
 317        /* ensure rx_req is stored to memory and tx_req is loaded from memory */
 318        mb();
 319
 320        venus_dump_packet(hdev, pkt);
 321
 322        return ret;
 323}
 324
 325static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
 326                       u32 size)
 327{
 328        struct device *dev = hdev->core->dev;
 329
 330        desc->attrs = DMA_ATTR_WRITE_COMBINE;
 331        desc->size = ALIGN(size, SZ_4K);
 332
 333        desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
 334                                    desc->attrs);
 335        if (!desc->kva)
 336                return -ENOMEM;
 337
 338        return 0;
 339}
 340
 341static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
 342{
 343        struct device *dev = hdev->core->dev;
 344
 345        dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
 346}
 347
 348static void venus_set_registers(struct venus_hfi_device *hdev)
 349{
 350        const struct venus_resources *res = hdev->core->res;
 351        const struct reg_val *tbl = res->reg_tbl;
 352        unsigned int count = res->reg_tbl_size;
 353        unsigned int i;
 354
 355        for (i = 0; i < count; i++)
 356                writel(tbl[i].value, hdev->core->base + tbl[i].reg);
 357}
 358
 359static void venus_soft_int(struct venus_hfi_device *hdev)
 360{
 361        void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
 362        u32 clear_bit;
 363
 364        if (IS_V6(hdev->core))
 365                clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
 366        else
 367                clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
 368
 369        writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT);
 370}
 371
 372static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
 373                                         void *pkt, bool sync)
 374{
 375        struct device *dev = hdev->core->dev;
 376        struct hfi_pkt_hdr *cmd_packet;
 377        struct iface_queue *queue;
 378        u32 rx_req;
 379        int ret;
 380
 381        if (!venus_is_valid_state(hdev))
 382                return -EINVAL;
 383
 384        cmd_packet = (struct hfi_pkt_hdr *)pkt;
 385        hdev->last_packet_type = cmd_packet->pkt_type;
 386
 387        queue = &hdev->queues[IFACEQ_CMD_IDX];
 388
 389        ret = venus_write_queue(hdev, queue, pkt, &rx_req);
 390        if (ret) {
 391                dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
 392                return ret;
 393        }
 394
 395        if (sync) {
 396                /*
 397                 * Inform video hardware to raise interrupt for synchronous
 398                 * commands
 399                 */
 400                queue = &hdev->queues[IFACEQ_MSG_IDX];
 401                queue->qhdr->rx_req = 1;
 402                /* ensure rx_req is updated in memory */
 403                wmb();
 404        }
 405
 406        if (rx_req)
 407                venus_soft_int(hdev);
 408
 409        return 0;
 410}
 411
 412static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
 413{
 414        int ret;
 415
 416        mutex_lock(&hdev->lock);
 417        ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
 418        mutex_unlock(&hdev->lock);
 419
 420        return ret;
 421}
 422
 423static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
 424                                       u32 size, u32 addr, void *cookie)
 425{
 426        struct venus_hfi_device *hdev = to_hfi_priv(core);
 427        struct hfi_sys_set_resource_pkt *pkt;
 428        u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
 429        int ret;
 430
 431        if (id == VIDC_RESOURCE_NONE)
 432                return 0;
 433
 434        pkt = (struct hfi_sys_set_resource_pkt *)packet;
 435
 436        ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
 437        if (ret)
 438                return ret;
 439
 440        ret = venus_iface_cmdq_write(hdev, pkt, false);
 441        if (ret)
 442                return ret;
 443
 444        return 0;
 445}
 446
 447static int venus_boot_core(struct venus_hfi_device *hdev)
 448{
 449        struct device *dev = hdev->core->dev;
 450        static const unsigned int max_tries = 100;
 451        u32 ctrl_status = 0, mask_val;
 452        unsigned int count = 0;
 453        void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
 454        void __iomem *wrapper_base = hdev->core->wrapper_base;
 455        int ret = 0;
 456
 457        writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
 458        if (IS_V6(hdev->core)) {
 459                mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
 460                mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
 461                              WRAPPER_INTR_MASK_A2HCPU_MASK);
 462        } else {
 463                mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
 464        }
 465        writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
 466        writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
 467
 468        while (!ctrl_status && count < max_tries) {
 469                ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
 470                if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
 471                        dev_err(dev, "invalid setting for UC_REGION\n");
 472                        ret = -EINVAL;
 473                        break;
 474                }
 475
 476                usleep_range(500, 1000);
 477                count++;
 478        }
 479
 480        if (count >= max_tries)
 481                ret = -ETIMEDOUT;
 482
 483        if (IS_V6(hdev->core)) {
 484                writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
 485                writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
 486        }
 487
 488        return ret;
 489}
 490
 491static u32 venus_hwversion(struct venus_hfi_device *hdev)
 492{
 493        struct device *dev = hdev->core->dev;
 494        void __iomem *wrapper_base = hdev->core->wrapper_base;
 495        u32 ver;
 496        u32 major, minor, step;
 497
 498        ver = readl(wrapper_base + WRAPPER_HW_VERSION);
 499        major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
 500        major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
 501        minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
 502        minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
 503        step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
 504
 505        dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
 506
 507        return major;
 508}
 509
 510static int venus_run(struct venus_hfi_device *hdev)
 511{
 512        struct device *dev = hdev->core->dev;
 513        void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
 514        int ret;
 515
 516        /*
 517         * Re-program all of the registers that get reset as a result of
 518         * regulator_disable() and _enable()
 519         */
 520        venus_set_registers(hdev);
 521
 522        writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
 523        writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
 524        writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
 525        writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
 526        if (hdev->sfr.da)
 527                writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
 528
 529        ret = venus_boot_core(hdev);
 530        if (ret) {
 531                dev_err(dev, "failed to reset venus core\n");
 532                return ret;
 533        }
 534
 535        venus_hwversion(hdev);
 536
 537        return 0;
 538}
 539
 540static int venus_halt_axi(struct venus_hfi_device *hdev)
 541{
 542        void __iomem *wrapper_base = hdev->core->wrapper_base;
 543        void __iomem *vbif_base = hdev->core->vbif_base;
 544        void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
 545        void __iomem *aon_base = hdev->core->aon_base;
 546        struct device *dev = hdev->core->dev;
 547        u32 val;
 548        u32 mask_val;
 549        int ret;
 550
 551        if (IS_V6(hdev->core)) {
 552                writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
 553
 554                writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
 555                ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
 556                                         val,
 557                                         val & BIT(0),
 558                                         POLL_INTERVAL_US,
 559                                         VBIF_AXI_HALT_ACK_TIMEOUT_US);
 560                if (ret)
 561                        return -ETIMEDOUT;
 562
 563                mask_val = (BIT(2) | BIT(1) | BIT(0));
 564                writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
 565
 566                writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
 567                ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6,
 568                                         val,
 569                                         val == 0,
 570                                         POLL_INTERVAL_US,
 571                                         VBIF_AXI_HALT_ACK_TIMEOUT_US);
 572
 573                if (ret) {
 574                        dev_err(dev, "DBLP Release: lpi_status %x\n", val);
 575                        return -ETIMEDOUT;
 576                }
 577                return 0;
 578        }
 579
 580        if (IS_V4(hdev->core)) {
 581                val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
 582                val |= WRAPPER_CPU_AXI_HALT_HALT;
 583                writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
 584
 585                ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
 586                                         val,
 587                                         val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
 588                                         POLL_INTERVAL_US,
 589                                         VBIF_AXI_HALT_ACK_TIMEOUT_US);
 590                if (ret) {
 591                        dev_err(dev, "AXI bus port halt timeout\n");
 592                        return ret;
 593                }
 594
 595                return 0;
 596        }
 597
 598        /* Halt AXI and AXI IMEM VBIF Access */
 599        val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
 600        val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
 601        writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
 602
 603        /* Request for AXI bus port halt */
 604        ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
 605                                 val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
 606                                 POLL_INTERVAL_US,
 607                                 VBIF_AXI_HALT_ACK_TIMEOUT_US);
 608        if (ret) {
 609                dev_err(dev, "AXI bus port halt timeout\n");
 610                return ret;
 611        }
 612
 613        return 0;
 614}
 615
 616static int venus_power_off(struct venus_hfi_device *hdev)
 617{
 618        int ret;
 619
 620        if (!hdev->power_enabled)
 621                return 0;
 622
 623        ret = venus_set_hw_state_suspend(hdev->core);
 624        if (ret)
 625                return ret;
 626
 627        ret = venus_halt_axi(hdev);
 628        if (ret)
 629                return ret;
 630
 631        hdev->power_enabled = false;
 632
 633        return 0;
 634}
 635
 636static int venus_power_on(struct venus_hfi_device *hdev)
 637{
 638        int ret;
 639
 640        if (hdev->power_enabled)
 641                return 0;
 642
 643        ret = venus_set_hw_state_resume(hdev->core);
 644        if (ret)
 645                goto err;
 646
 647        ret = venus_run(hdev);
 648        if (ret)
 649                goto err_suspend;
 650
 651        hdev->power_enabled = true;
 652
 653        return 0;
 654
 655err_suspend:
 656        venus_set_hw_state_suspend(hdev->core);
 657err:
 658        hdev->power_enabled = false;
 659        return ret;
 660}
 661
 662static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
 663                                        void *pkt)
 664{
 665        struct iface_queue *queue;
 666        u32 tx_req;
 667        int ret;
 668
 669        if (!venus_is_valid_state(hdev))
 670                return -EINVAL;
 671
 672        queue = &hdev->queues[IFACEQ_MSG_IDX];
 673
 674        ret = venus_read_queue(hdev, queue, pkt, &tx_req);
 675        if (ret)
 676                return ret;
 677
 678        if (tx_req)
 679                venus_soft_int(hdev);
 680
 681        return 0;
 682}
 683
 684static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
 685{
 686        int ret;
 687
 688        mutex_lock(&hdev->lock);
 689        ret = venus_iface_msgq_read_nolock(hdev, pkt);
 690        mutex_unlock(&hdev->lock);
 691
 692        return ret;
 693}
 694
 695static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
 696                                        void *pkt)
 697{
 698        struct iface_queue *queue;
 699        u32 tx_req;
 700        int ret;
 701
 702        ret = venus_is_valid_state(hdev);
 703        if (!ret)
 704                return -EINVAL;
 705
 706        queue = &hdev->queues[IFACEQ_DBG_IDX];
 707
 708        ret = venus_read_queue(hdev, queue, pkt, &tx_req);
 709        if (ret)
 710                return ret;
 711
 712        if (tx_req)
 713                venus_soft_int(hdev);
 714
 715        return 0;
 716}
 717
 718static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
 719{
 720        int ret;
 721
 722        if (!pkt)
 723                return -EINVAL;
 724
 725        mutex_lock(&hdev->lock);
 726        ret = venus_iface_dbgq_read_nolock(hdev, pkt);
 727        mutex_unlock(&hdev->lock);
 728
 729        return ret;
 730}
 731
 732static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
 733{
 734        qhdr->status = 1;
 735        qhdr->type = IFACEQ_DFLT_QHDR;
 736        qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
 737        qhdr->pkt_size = 0;
 738        qhdr->rx_wm = 1;
 739        qhdr->tx_wm = 1;
 740        qhdr->rx_req = 1;
 741        qhdr->tx_req = 0;
 742        qhdr->rx_irq_status = 0;
 743        qhdr->tx_irq_status = 0;
 744        qhdr->read_idx = 0;
 745        qhdr->write_idx = 0;
 746}
 747
 748static void venus_interface_queues_release(struct venus_hfi_device *hdev)
 749{
 750        mutex_lock(&hdev->lock);
 751
 752        venus_free(hdev, &hdev->ifaceq_table);
 753        venus_free(hdev, &hdev->sfr);
 754
 755        memset(hdev->queues, 0, sizeof(hdev->queues));
 756        memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
 757        memset(&hdev->sfr, 0, sizeof(hdev->sfr));
 758
 759        mutex_unlock(&hdev->lock);
 760}
 761
 762static int venus_interface_queues_init(struct venus_hfi_device *hdev)
 763{
 764        struct hfi_queue_table_header *tbl_hdr;
 765        struct iface_queue *queue;
 766        struct hfi_sfr *sfr;
 767        struct mem_desc desc = {0};
 768        unsigned int offset;
 769        unsigned int i;
 770        int ret;
 771
 772        ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
 773        if (ret)
 774                return ret;
 775
 776        hdev->ifaceq_table = desc;
 777        offset = IFACEQ_TABLE_SIZE;
 778
 779        for (i = 0; i < IFACEQ_NUM; i++) {
 780                queue = &hdev->queues[i];
 781                queue->qmem.da = desc.da + offset;
 782                queue->qmem.kva = desc.kva + offset;
 783                queue->qmem.size = IFACEQ_QUEUE_SIZE;
 784                offset += queue->qmem.size;
 785                queue->qhdr =
 786                        IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
 787
 788                venus_set_qhdr_defaults(queue->qhdr);
 789
 790                queue->qhdr->start_addr = queue->qmem.da;
 791
 792                if (i == IFACEQ_CMD_IDX)
 793                        queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
 794                else if (i == IFACEQ_MSG_IDX)
 795                        queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
 796                else if (i == IFACEQ_DBG_IDX)
 797                        queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
 798        }
 799
 800        tbl_hdr = hdev->ifaceq_table.kva;
 801        tbl_hdr->version = 0;
 802        tbl_hdr->size = IFACEQ_TABLE_SIZE;
 803        tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
 804        tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
 805        tbl_hdr->num_q = IFACEQ_NUM;
 806        tbl_hdr->num_active_q = IFACEQ_NUM;
 807
 808        /*
 809         * Set receive request to zero on debug queue as there is no
 810         * need of interrupt from video hardware for debug messages
 811         */
 812        queue = &hdev->queues[IFACEQ_DBG_IDX];
 813        queue->qhdr->rx_req = 0;
 814
 815        ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
 816        if (ret) {
 817                hdev->sfr.da = 0;
 818        } else {
 819                hdev->sfr = desc;
 820                sfr = hdev->sfr.kva;
 821                sfr->buf_size = ALIGNED_SFR_SIZE;
 822        }
 823
 824        /* ensure table and queue header structs are settled in memory */
 825        wmb();
 826
 827        return 0;
 828}
 829
 830static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
 831{
 832        struct hfi_sys_set_property_pkt *pkt;
 833        u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
 834        int ret;
 835
 836        pkt = (struct hfi_sys_set_property_pkt *)packet;
 837
 838        pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
 839
 840        ret = venus_iface_cmdq_write(hdev, pkt, false);
 841        if (ret)
 842                return ret;
 843
 844        return 0;
 845}
 846
 847static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
 848{
 849        struct hfi_sys_set_property_pkt *pkt;
 850        u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
 851        int ret;
 852
 853        pkt = (struct hfi_sys_set_property_pkt *)packet;
 854
 855        pkt_sys_coverage_config(pkt, mode);
 856
 857        ret = venus_iface_cmdq_write(hdev, pkt, false);
 858        if (ret)
 859                return ret;
 860
 861        return 0;
 862}
 863
 864static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
 865                                      bool enable)
 866{
 867        struct hfi_sys_set_property_pkt *pkt;
 868        u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
 869        int ret;
 870
 871        if (!enable)
 872                return 0;
 873
 874        pkt = (struct hfi_sys_set_property_pkt *)packet;
 875
 876        pkt_sys_idle_indicator(pkt, enable);
 877
 878        ret = venus_iface_cmdq_write(hdev, pkt, false);
 879        if (ret)
 880                return ret;
 881
 882        return 0;
 883}
 884
 885static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
 886                                       bool enable)
 887{
 888        struct hfi_sys_set_property_pkt *pkt;
 889        u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
 890        int ret;
 891
 892        pkt = (struct hfi_sys_set_property_pkt *)packet;
 893
 894        pkt_sys_power_control(pkt, enable);
 895
 896        ret = venus_iface_cmdq_write(hdev, pkt, false);
 897        if (ret)
 898                return ret;
 899
 900        return 0;
 901}
 902
 903static int venus_get_queue_size(struct venus_hfi_device *hdev,
 904                                unsigned int index)
 905{
 906        struct hfi_queue_header *qhdr;
 907
 908        if (index >= IFACEQ_NUM)
 909                return -EINVAL;
 910
 911        qhdr = hdev->queues[index].qhdr;
 912        if (!qhdr)
 913                return -EINVAL;
 914
 915        return abs(qhdr->read_idx - qhdr->write_idx);
 916}
 917
 918static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
 919{
 920        struct device *dev = hdev->core->dev;
 921        int ret;
 922
 923        ret = venus_sys_set_debug(hdev, venus_fw_debug);
 924        if (ret)
 925                dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
 926
 927        /*
 928         * Idle indicator is disabled by default on some 4xx firmware versions,
 929         * enable it explicitly in order to make suspend functional by checking
 930         * WFI (wait-for-interrupt) bit.
 931         */
 932        if (IS_V4(hdev->core) || IS_V6(hdev->core))
 933                venus_sys_idle_indicator = true;
 934
 935        ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
 936        if (ret)
 937                dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
 938
 939        ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
 940        if (ret)
 941                dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
 942                         ret);
 943
 944        return ret;
 945}
 946
 947static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
 948{
 949        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
 950        struct hfi_session_pkt pkt;
 951
 952        pkt_session_cmd(&pkt, pkt_type, inst);
 953
 954        return venus_iface_cmdq_write(hdev, &pkt, sync);
 955}
 956
 957static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
 958{
 959        struct device *dev = hdev->core->dev;
 960        void *packet = hdev->dbg_buf;
 961
 962        while (!venus_iface_dbgq_read(hdev, packet)) {
 963                struct hfi_msg_sys_coverage_pkt *pkt = packet;
 964
 965                if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
 966                        struct hfi_msg_sys_debug_pkt *pkt = packet;
 967
 968                        dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
 969                }
 970        }
 971}
 972
 973static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
 974                                        bool wait)
 975{
 976        unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
 977        struct hfi_sys_pc_prep_pkt pkt;
 978        int ret;
 979
 980        init_completion(&hdev->pwr_collapse_prep);
 981
 982        pkt_sys_pc_prep(&pkt);
 983
 984        ret = venus_iface_cmdq_write(hdev, &pkt, false);
 985        if (ret)
 986                return ret;
 987
 988        if (!wait)
 989                return 0;
 990
 991        ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
 992        if (!ret) {
 993                venus_flush_debug_queue(hdev);
 994                return -ETIMEDOUT;
 995        }
 996
 997        return 0;
 998}
 999
1000static int venus_are_queues_empty(struct venus_hfi_device *hdev)
1001{
1002        int ret1, ret2;
1003
1004        ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
1005        if (ret1 < 0)
1006                return ret1;
1007
1008        ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
1009        if (ret2 < 0)
1010                return ret2;
1011
1012        if (!ret1 && !ret2)
1013                return 1;
1014
1015        return 0;
1016}
1017
1018static void venus_sfr_print(struct venus_hfi_device *hdev)
1019{
1020        struct device *dev = hdev->core->dev;
1021        struct hfi_sfr *sfr = hdev->sfr.kva;
1022        void *p;
1023
1024        if (!sfr)
1025                return;
1026
1027        p = memchr(sfr->data, '\0', sfr->buf_size);
1028        /*
1029         * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
1030         * that Venus is in the process of crashing.
1031         */
1032        if (!p)
1033                sfr->data[sfr->buf_size - 1] = '\0';
1034
1035        dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
1036}
1037
1038static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
1039                                        void *packet)
1040{
1041        struct hfi_msg_event_notify_pkt *event_pkt = packet;
1042
1043        if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
1044                return;
1045
1046        venus_set_state(hdev, VENUS_STATE_DEINIT);
1047
1048        venus_sfr_print(hdev);
1049}
1050
1051static irqreturn_t venus_isr_thread(struct venus_core *core)
1052{
1053        struct venus_hfi_device *hdev = to_hfi_priv(core);
1054        const struct venus_resources *res;
1055        void *pkt;
1056        u32 msg_ret;
1057
1058        if (!hdev)
1059                return IRQ_NONE;
1060
1061        res = hdev->core->res;
1062        pkt = hdev->pkt_buf;
1063
1064
1065        while (!venus_iface_msgq_read(hdev, pkt)) {
1066                msg_ret = hfi_process_msg_packet(core, pkt);
1067                switch (msg_ret) {
1068                case HFI_MSG_EVENT_NOTIFY:
1069                        venus_process_msg_sys_error(hdev, pkt);
1070                        break;
1071                case HFI_MSG_SYS_INIT:
1072                        venus_hfi_core_set_resource(core, res->vmem_id,
1073                                                    res->vmem_size,
1074                                                    res->vmem_addr,
1075                                                    hdev);
1076                        break;
1077                case HFI_MSG_SYS_RELEASE_RESOURCE:
1078                        complete(&hdev->release_resource);
1079                        break;
1080                case HFI_MSG_SYS_PC_PREP:
1081                        complete(&hdev->pwr_collapse_prep);
1082                        break;
1083                default:
1084                        break;
1085                }
1086        }
1087
1088        venus_flush_debug_queue(hdev);
1089
1090        return IRQ_HANDLED;
1091}
1092
1093static irqreturn_t venus_isr(struct venus_core *core)
1094{
1095        struct venus_hfi_device *hdev = to_hfi_priv(core);
1096        u32 status;
1097        void __iomem *cpu_cs_base;
1098        void __iomem *wrapper_base;
1099
1100        if (!hdev)
1101                return IRQ_NONE;
1102
1103        cpu_cs_base = hdev->core->cpu_cs_base;
1104        wrapper_base = hdev->core->wrapper_base;
1105
1106        status = readl(wrapper_base + WRAPPER_INTR_STATUS);
1107        if (IS_V6(core)) {
1108                if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1109                    status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
1110                    status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1111                        hdev->irq_status = status;
1112        } else {
1113                if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1114                    status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
1115                    status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1116                        hdev->irq_status = status;
1117        }
1118        writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
1119        if (!IS_V6(core))
1120                writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
1121
1122        return IRQ_WAKE_THREAD;
1123}
1124
1125static int venus_core_init(struct venus_core *core)
1126{
1127        struct venus_hfi_device *hdev = to_hfi_priv(core);
1128        struct device *dev = core->dev;
1129        struct hfi_sys_get_property_pkt version_pkt;
1130        struct hfi_sys_init_pkt pkt;
1131        int ret;
1132
1133        pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
1134
1135        venus_set_state(hdev, VENUS_STATE_INIT);
1136
1137        ret = venus_iface_cmdq_write(hdev, &pkt, false);
1138        if (ret)
1139                return ret;
1140
1141        pkt_sys_image_version(&version_pkt);
1142
1143        ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
1144        if (ret)
1145                dev_warn(dev, "failed to send image version pkt to fw\n");
1146
1147        ret = venus_sys_set_default_properties(hdev);
1148        if (ret)
1149                return ret;
1150
1151        return 0;
1152}
1153
1154static int venus_core_deinit(struct venus_core *core)
1155{
1156        struct venus_hfi_device *hdev = to_hfi_priv(core);
1157
1158        venus_set_state(hdev, VENUS_STATE_DEINIT);
1159        hdev->suspended = true;
1160        hdev->power_enabled = false;
1161
1162        return 0;
1163}
1164
1165static int venus_core_ping(struct venus_core *core, u32 cookie)
1166{
1167        struct venus_hfi_device *hdev = to_hfi_priv(core);
1168        struct hfi_sys_ping_pkt pkt;
1169
1170        pkt_sys_ping(&pkt, cookie);
1171
1172        return venus_iface_cmdq_write(hdev, &pkt, false);
1173}
1174
1175static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
1176{
1177        struct venus_hfi_device *hdev = to_hfi_priv(core);
1178        struct hfi_sys_test_ssr_pkt pkt;
1179        int ret;
1180
1181        ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
1182        if (ret)
1183                return ret;
1184
1185        return venus_iface_cmdq_write(hdev, &pkt, false);
1186}
1187
1188static int venus_session_init(struct venus_inst *inst, u32 session_type,
1189                              u32 codec)
1190{
1191        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1192        struct hfi_session_init_pkt pkt;
1193        int ret;
1194
1195        ret = venus_sys_set_debug(hdev, venus_fw_debug);
1196        if (ret)
1197                goto err;
1198
1199        ret = pkt_session_init(&pkt, inst, session_type, codec);
1200        if (ret)
1201                goto err;
1202
1203        ret = venus_iface_cmdq_write(hdev, &pkt, true);
1204        if (ret)
1205                goto err;
1206
1207        return 0;
1208
1209err:
1210        venus_flush_debug_queue(hdev);
1211        return ret;
1212}
1213
1214static int venus_session_end(struct venus_inst *inst)
1215{
1216        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1217        struct device *dev = hdev->core->dev;
1218
1219        if (venus_fw_coverage) {
1220                if (venus_sys_set_coverage(hdev, venus_fw_coverage))
1221                        dev_warn(dev, "fw coverage msg ON failed\n");
1222        }
1223
1224        return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
1225}
1226
1227static int venus_session_abort(struct venus_inst *inst)
1228{
1229        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1230
1231        venus_flush_debug_queue(hdev);
1232
1233        return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
1234}
1235
1236static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
1237{
1238        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1239        struct hfi_session_flush_pkt pkt;
1240        int ret;
1241
1242        ret = pkt_session_flush(&pkt, inst, flush_mode);
1243        if (ret)
1244                return ret;
1245
1246        return venus_iface_cmdq_write(hdev, &pkt, true);
1247}
1248
1249static int venus_session_start(struct venus_inst *inst)
1250{
1251        return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
1252}
1253
1254static int venus_session_stop(struct venus_inst *inst)
1255{
1256        return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
1257}
1258
1259static int venus_session_continue(struct venus_inst *inst)
1260{
1261        return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
1262}
1263
1264static int venus_session_etb(struct venus_inst *inst,
1265                             struct hfi_frame_data *in_frame)
1266{
1267        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1268        u32 session_type = inst->session_type;
1269        int ret;
1270
1271        if (session_type == VIDC_SESSION_TYPE_DEC) {
1272                struct hfi_session_empty_buffer_compressed_pkt pkt;
1273
1274                ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
1275                if (ret)
1276                        return ret;
1277
1278                ret = venus_iface_cmdq_write(hdev, &pkt, false);
1279        } else if (session_type == VIDC_SESSION_TYPE_ENC) {
1280                struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
1281
1282                ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
1283                if (ret)
1284                        return ret;
1285
1286                ret = venus_iface_cmdq_write(hdev, &pkt, false);
1287        } else {
1288                ret = -EINVAL;
1289        }
1290
1291        return ret;
1292}
1293
1294static int venus_session_ftb(struct venus_inst *inst,
1295                             struct hfi_frame_data *out_frame)
1296{
1297        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1298        struct hfi_session_fill_buffer_pkt pkt;
1299        int ret;
1300
1301        ret = pkt_session_ftb(&pkt, inst, out_frame);
1302        if (ret)
1303                return ret;
1304
1305        return venus_iface_cmdq_write(hdev, &pkt, false);
1306}
1307
1308static int venus_session_set_buffers(struct venus_inst *inst,
1309                                     struct hfi_buffer_desc *bd)
1310{
1311        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1312        struct hfi_session_set_buffers_pkt *pkt;
1313        u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1314        int ret;
1315
1316        if (bd->buffer_type == HFI_BUFFER_INPUT)
1317                return 0;
1318
1319        pkt = (struct hfi_session_set_buffers_pkt *)packet;
1320
1321        ret = pkt_session_set_buffers(pkt, inst, bd);
1322        if (ret)
1323                return ret;
1324
1325        return venus_iface_cmdq_write(hdev, pkt, false);
1326}
1327
1328static int venus_session_unset_buffers(struct venus_inst *inst,
1329                                       struct hfi_buffer_desc *bd)
1330{
1331        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1332        struct hfi_session_release_buffer_pkt *pkt;
1333        u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1334        int ret;
1335
1336        if (bd->buffer_type == HFI_BUFFER_INPUT)
1337                return 0;
1338
1339        pkt = (struct hfi_session_release_buffer_pkt *)packet;
1340
1341        ret = pkt_session_unset_buffers(pkt, inst, bd);
1342        if (ret)
1343                return ret;
1344
1345        return venus_iface_cmdq_write(hdev, pkt, true);
1346}
1347
1348static int venus_session_load_res(struct venus_inst *inst)
1349{
1350        return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
1351}
1352
1353static int venus_session_release_res(struct venus_inst *inst)
1354{
1355        return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
1356}
1357
1358static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1359                                       u32 seq_hdr_len)
1360{
1361        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1362        struct hfi_session_parse_sequence_header_pkt *pkt;
1363        u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1364        int ret;
1365
1366        pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
1367
1368        ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
1369        if (ret)
1370                return ret;
1371
1372        ret = venus_iface_cmdq_write(hdev, pkt, false);
1373        if (ret)
1374                return ret;
1375
1376        return 0;
1377}
1378
1379static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1380                                     u32 seq_hdr_len)
1381{
1382        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1383        struct hfi_session_get_sequence_header_pkt *pkt;
1384        u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1385        int ret;
1386
1387        pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
1388
1389        ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
1390        if (ret)
1391                return ret;
1392
1393        return venus_iface_cmdq_write(hdev, pkt, false);
1394}
1395
1396static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
1397                                      void *pdata)
1398{
1399        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1400        struct hfi_session_set_property_pkt *pkt;
1401        u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1402        int ret;
1403
1404        pkt = (struct hfi_session_set_property_pkt *)packet;
1405
1406        ret = pkt_session_set_property(pkt, inst, ptype, pdata);
1407        if (ret == -ENOTSUPP)
1408                return 0;
1409        if (ret)
1410                return ret;
1411
1412        return venus_iface_cmdq_write(hdev, pkt, false);
1413}
1414
1415static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
1416{
1417        struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1418        struct hfi_session_get_property_pkt pkt;
1419        int ret;
1420
1421        ret = pkt_session_get_property(&pkt, inst, ptype);
1422        if (ret)
1423                return ret;
1424
1425        return venus_iface_cmdq_write(hdev, &pkt, true);
1426}
1427
1428static int venus_resume(struct venus_core *core)
1429{
1430        struct venus_hfi_device *hdev = to_hfi_priv(core);
1431        int ret = 0;
1432
1433        mutex_lock(&hdev->lock);
1434
1435        if (!hdev->suspended)
1436                goto unlock;
1437
1438        ret = venus_power_on(hdev);
1439
1440unlock:
1441        if (!ret)
1442                hdev->suspended = false;
1443
1444        mutex_unlock(&hdev->lock);
1445
1446        return ret;
1447}
1448
1449static int venus_suspend_1xx(struct venus_core *core)
1450{
1451        struct venus_hfi_device *hdev = to_hfi_priv(core);
1452        struct device *dev = core->dev;
1453        void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1454        u32 ctrl_status;
1455        int ret;
1456
1457        if (!hdev->power_enabled || hdev->suspended)
1458                return 0;
1459
1460        mutex_lock(&hdev->lock);
1461        ret = venus_is_valid_state(hdev);
1462        mutex_unlock(&hdev->lock);
1463
1464        if (!ret) {
1465                dev_err(dev, "bad state, cannot suspend\n");
1466                return -EINVAL;
1467        }
1468
1469        ret = venus_prepare_power_collapse(hdev, true);
1470        if (ret) {
1471                dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1472                return ret;
1473        }
1474
1475        mutex_lock(&hdev->lock);
1476
1477        if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
1478                mutex_unlock(&hdev->lock);
1479                return -EINVAL;
1480        }
1481
1482        ret = venus_are_queues_empty(hdev);
1483        if (ret < 0 || !ret) {
1484                mutex_unlock(&hdev->lock);
1485                return -EINVAL;
1486        }
1487
1488        ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1489        if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
1490                mutex_unlock(&hdev->lock);
1491                return -EINVAL;
1492        }
1493
1494        ret = venus_power_off(hdev);
1495        if (ret) {
1496                mutex_unlock(&hdev->lock);
1497                return ret;
1498        }
1499
1500        hdev->suspended = true;
1501
1502        mutex_unlock(&hdev->lock);
1503
1504        return 0;
1505}
1506
1507static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
1508{
1509        void __iomem *wrapper_base = hdev->core->wrapper_base;
1510        void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1511        void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1512        u32 ctrl_status, cpu_status;
1513
1514        if (IS_V6(hdev->core))
1515                cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1516        else
1517                cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1518        ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1519
1520        if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1521            ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1522                return true;
1523
1524        return false;
1525}
1526
1527static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
1528{
1529        void __iomem *wrapper_base = hdev->core->wrapper_base;
1530        void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1531        void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1532        u32 ctrl_status, cpu_status;
1533
1534        if (IS_V6(hdev->core))
1535                cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1536        else
1537                cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1538        ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1539
1540        if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1541            ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1542                return true;
1543
1544        return false;
1545}
1546
1547static int venus_suspend_3xx(struct venus_core *core)
1548{
1549        struct venus_hfi_device *hdev = to_hfi_priv(core);
1550        struct device *dev = core->dev;
1551        void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1552        u32 ctrl_status;
1553        bool val;
1554        int ret;
1555
1556        if (!hdev->power_enabled || hdev->suspended)
1557                return 0;
1558
1559        mutex_lock(&hdev->lock);
1560        ret = venus_is_valid_state(hdev);
1561        mutex_unlock(&hdev->lock);
1562
1563        if (!ret) {
1564                dev_err(dev, "bad state, cannot suspend\n");
1565                return -EINVAL;
1566        }
1567
1568        ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1569        if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1570                goto power_off;
1571
1572        /*
1573         * Power collapse sequence for Venus 3xx and 4xx versions:
1574         * 1. Check for ARM9 and video core to be idle by checking WFI bit
1575         *    (bit 0) in CPU status register and by checking Idle (bit 30) in
1576         *    Control status register for video core.
1577         * 2. Send a command to prepare for power collapse.
1578         * 3. Check for WFI and PC_READY bits.
1579         */
1580        ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1581                                 1500, 100 * 1500);
1582        if (ret)
1583                return ret;
1584
1585        ret = venus_prepare_power_collapse(hdev, false);
1586        if (ret) {
1587                dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1588                return ret;
1589        }
1590
1591        ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1592                                 1500, 100 * 1500);
1593        if (ret)
1594                return ret;
1595
1596power_off:
1597        mutex_lock(&hdev->lock);
1598
1599        ret = venus_power_off(hdev);
1600        if (ret) {
1601                dev_err(dev, "venus_power_off (%d)\n", ret);
1602                mutex_unlock(&hdev->lock);
1603                return ret;
1604        }
1605
1606        hdev->suspended = true;
1607
1608        mutex_unlock(&hdev->lock);
1609
1610        return 0;
1611}
1612
1613static int venus_suspend(struct venus_core *core)
1614{
1615        if (IS_V3(core) || IS_V4(core) || IS_V6(core))
1616                return venus_suspend_3xx(core);
1617
1618        return venus_suspend_1xx(core);
1619}
1620
1621static const struct hfi_ops venus_hfi_ops = {
1622        .core_init                      = venus_core_init,
1623        .core_deinit                    = venus_core_deinit,
1624        .core_ping                      = venus_core_ping,
1625        .core_trigger_ssr               = venus_core_trigger_ssr,
1626
1627        .session_init                   = venus_session_init,
1628        .session_end                    = venus_session_end,
1629        .session_abort                  = venus_session_abort,
1630        .session_flush                  = venus_session_flush,
1631        .session_start                  = venus_session_start,
1632        .session_stop                   = venus_session_stop,
1633        .session_continue               = venus_session_continue,
1634        .session_etb                    = venus_session_etb,
1635        .session_ftb                    = venus_session_ftb,
1636        .session_set_buffers            = venus_session_set_buffers,
1637        .session_unset_buffers          = venus_session_unset_buffers,
1638        .session_load_res               = venus_session_load_res,
1639        .session_release_res            = venus_session_release_res,
1640        .session_parse_seq_hdr          = venus_session_parse_seq_hdr,
1641        .session_get_seq_hdr            = venus_session_get_seq_hdr,
1642        .session_set_property           = venus_session_set_property,
1643        .session_get_property           = venus_session_get_property,
1644
1645        .resume                         = venus_resume,
1646        .suspend                        = venus_suspend,
1647
1648        .isr                            = venus_isr,
1649        .isr_thread                     = venus_isr_thread,
1650};
1651
1652void venus_hfi_destroy(struct venus_core *core)
1653{
1654        struct venus_hfi_device *hdev = to_hfi_priv(core);
1655
1656        core->priv = NULL;
1657        venus_interface_queues_release(hdev);
1658        mutex_destroy(&hdev->lock);
1659        kfree(hdev);
1660        core->ops = NULL;
1661}
1662
1663int venus_hfi_create(struct venus_core *core)
1664{
1665        struct venus_hfi_device *hdev;
1666        int ret;
1667
1668        hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
1669        if (!hdev)
1670                return -ENOMEM;
1671
1672        mutex_init(&hdev->lock);
1673
1674        hdev->core = core;
1675        hdev->suspended = true;
1676        core->priv = hdev;
1677        core->ops = &venus_hfi_ops;
1678
1679        ret = venus_interface_queues_init(hdev);
1680        if (ret)
1681                goto err_kfree;
1682
1683        return 0;
1684
1685err_kfree:
1686        kfree(hdev);
1687        core->priv = NULL;
1688        core->ops = NULL;
1689        return ret;
1690}
1691
1692void venus_hfi_queues_reinit(struct venus_core *core)
1693{
1694        struct venus_hfi_device *hdev = to_hfi_priv(core);
1695        struct hfi_queue_table_header *tbl_hdr;
1696        struct iface_queue *queue;
1697        struct hfi_sfr *sfr;
1698        unsigned int i;
1699
1700        mutex_lock(&hdev->lock);
1701
1702        for (i = 0; i < IFACEQ_NUM; i++) {
1703                queue = &hdev->queues[i];
1704                queue->qhdr =
1705                        IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
1706
1707                venus_set_qhdr_defaults(queue->qhdr);
1708
1709                queue->qhdr->start_addr = queue->qmem.da;
1710
1711                if (i == IFACEQ_CMD_IDX)
1712                        queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
1713                else if (i == IFACEQ_MSG_IDX)
1714                        queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
1715                else if (i == IFACEQ_DBG_IDX)
1716                        queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
1717        }
1718
1719        tbl_hdr = hdev->ifaceq_table.kva;
1720        tbl_hdr->version = 0;
1721        tbl_hdr->size = IFACEQ_TABLE_SIZE;
1722        tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
1723        tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
1724        tbl_hdr->num_q = IFACEQ_NUM;
1725        tbl_hdr->num_active_q = IFACEQ_NUM;
1726
1727        /*
1728         * Set receive request to zero on debug queue as there is no
1729         * need of interrupt from video hardware for debug messages
1730         */
1731        queue = &hdev->queues[IFACEQ_DBG_IDX];
1732        queue->qhdr->rx_req = 0;
1733
1734        sfr = hdev->sfr.kva;
1735        sfr->buf_size = ALIGNED_SFR_SIZE;
1736
1737        /* ensure table and queue header structs are settled in memory */
1738        wmb();
1739
1740        mutex_unlock(&hdev->lock);
1741}
1742