linux/drivers/misc/habanalabs/goya/goya.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Copyright 2016-2019 HabanaLabs, Ltd.
   5 * All Rights Reserved.
   6 */
   7
   8#include "goyaP.h"
   9#include "include/hw_ip/mmu/mmu_general.h"
  10#include "include/hw_ip/mmu/mmu_v1_0.h"
  11#include "include/goya/asic_reg/goya_masks.h"
  12#include "include/goya/goya_reg_map.h"
  13
  14#include <linux/pci.h>
  15#include <linux/genalloc.h>
  16#include <linux/hwmon.h>
  17#include <linux/io-64-nonatomic-lo-hi.h>
  18#include <linux/iommu.h>
  19#include <linux/seq_file.h>
  20
  21/*
  22 * GOYA security scheme:
  23 *
  24 * 1. Host is protected by:
  25 *        - Range registers (When MMU is enabled, DMA RR does NOT protect host)
  26 *        - MMU
  27 *
  28 * 2. DRAM is protected by:
  29 *        - Range registers (protect the first 512MB)
  30 *        - MMU (isolation between users)
  31 *
  32 * 3. Configuration is protected by:
  33 *        - Range registers
  34 *        - Protection bits
  35 *
  36 * When MMU is disabled:
  37 *
  38 * QMAN DMA: PQ, CQ, CP, DMA are secured.
  39 * PQ, CB and the data are on the host.
  40 *
  41 * QMAN TPC/MME:
  42 * PQ, CQ and CP are not secured.
  43 * PQ, CB and the data are on the SRAM/DRAM.
  44 *
  45 * Since QMAN DMA is secured, the driver is parsing the DMA CB:
  46 *     - checks DMA pointer
  47 *     - WREG, MSG_PROT are not allowed.
  48 *     - MSG_LONG/SHORT are allowed.
  49 *
  50 * A read/write transaction by the QMAN to a protected area will succeed if
  51 * and only if the QMAN's CP is secured and MSG_PROT is used
  52 *
  53 *
  54 * When MMU is enabled:
  55 *
  56 * QMAN DMA: PQ, CQ and CP are secured.
  57 * MMU is set to bypass on the Secure props register of the QMAN.
  58 * The reasons we don't enable MMU for PQ, CQ and CP are:
  59 *     - PQ entry is in kernel address space and the driver doesn't map it.
  60 *     - CP writes to MSIX register and to kernel address space (completion
  61 *       queue).
  62 *
  63 * DMA is not secured but because CP is secured, the driver still needs to parse
  64 * the CB, but doesn't need to check the DMA addresses.
  65 *
  66 * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
  67 * the driver doesn't map memory in MMU.
  68 *
  69 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
  70 *
  71 * DMA RR does NOT protect host because DMA is not secured
  72 *
  73 */
  74
  75#define GOYA_MMU_REGS_NUM               63
  76
  77#define GOYA_DMA_POOL_BLK_SIZE          0x100           /* 256 bytes */
  78
  79#define GOYA_RESET_TIMEOUT_MSEC         500             /* 500ms */
  80#define GOYA_PLDM_RESET_TIMEOUT_MSEC    20000           /* 20s */
  81#define GOYA_RESET_WAIT_MSEC            1               /* 1ms */
  82#define GOYA_CPU_RESET_WAIT_MSEC        100             /* 100ms */
  83#define GOYA_PLDM_RESET_WAIT_MSEC       1000            /* 1s */
  84#define GOYA_TEST_QUEUE_WAIT_USEC       100000          /* 100ms */
  85#define GOYA_PLDM_MMU_TIMEOUT_USEC      (MMU_CONFIG_TIMEOUT_USEC * 100)
  86#define GOYA_PLDM_QMAN0_TIMEOUT_USEC    (HL_DEVICE_TIMEOUT_USEC * 30)
  87
  88#define GOYA_QMAN0_FENCE_VAL            0xD169B243
  89
  90#define GOYA_MAX_STRING_LEN             20
  91
  92#define GOYA_CB_POOL_CB_CNT             512
  93#define GOYA_CB_POOL_CB_SIZE            0x20000         /* 128KB */
  94
  95#define IS_QM_IDLE(engine, qm_glbl_sts0) \
  96        (((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
  97#define IS_DMA_QM_IDLE(qm_glbl_sts0)    IS_QM_IDLE(DMA, qm_glbl_sts0)
  98#define IS_TPC_QM_IDLE(qm_glbl_sts0)    IS_QM_IDLE(TPC, qm_glbl_sts0)
  99#define IS_MME_QM_IDLE(qm_glbl_sts0)    IS_QM_IDLE(MME, qm_glbl_sts0)
 100
 101#define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
 102        (((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
 103                        engine##_CMDQ_IDLE_MASK)
 104#define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
 105        IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
 106#define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
 107        IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
 108
 109#define IS_DMA_IDLE(dma_core_sts0) \
 110        !((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
 111
 112#define IS_TPC_IDLE(tpc_cfg_sts) \
 113        (((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
 114
 115#define IS_MME_IDLE(mme_arch_sts) \
 116        (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
 117
 118
 119static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
 120                "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
 121                "goya cq 4", "goya cpu eq"
 122};
 123
 124static u16 goya_packet_sizes[MAX_PACKET_ID] = {
 125        [PACKET_WREG_32]        = sizeof(struct packet_wreg32),
 126        [PACKET_WREG_BULK]      = sizeof(struct packet_wreg_bulk),
 127        [PACKET_MSG_LONG]       = sizeof(struct packet_msg_long),
 128        [PACKET_MSG_SHORT]      = sizeof(struct packet_msg_short),
 129        [PACKET_CP_DMA]         = sizeof(struct packet_cp_dma),
 130        [PACKET_MSG_PROT]       = sizeof(struct packet_msg_prot),
 131        [PACKET_FENCE]          = sizeof(struct packet_fence),
 132        [PACKET_LIN_DMA]        = sizeof(struct packet_lin_dma),
 133        [PACKET_NOP]            = sizeof(struct packet_nop),
 134        [PACKET_STOP]           = sizeof(struct packet_stop)
 135};
 136
 137static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
 138        mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
 139        mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
 140        mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
 141        mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
 142        mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
 143        mmTPC0_QM_GLBL_SECURE_PROPS,
 144        mmTPC0_QM_GLBL_NON_SECURE_PROPS,
 145        mmTPC0_CMDQ_GLBL_SECURE_PROPS,
 146        mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
 147        mmTPC0_CFG_ARUSER,
 148        mmTPC0_CFG_AWUSER,
 149        mmTPC1_QM_GLBL_SECURE_PROPS,
 150        mmTPC1_QM_GLBL_NON_SECURE_PROPS,
 151        mmTPC1_CMDQ_GLBL_SECURE_PROPS,
 152        mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
 153        mmTPC1_CFG_ARUSER,
 154        mmTPC1_CFG_AWUSER,
 155        mmTPC2_QM_GLBL_SECURE_PROPS,
 156        mmTPC2_QM_GLBL_NON_SECURE_PROPS,
 157        mmTPC2_CMDQ_GLBL_SECURE_PROPS,
 158        mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
 159        mmTPC2_CFG_ARUSER,
 160        mmTPC2_CFG_AWUSER,
 161        mmTPC3_QM_GLBL_SECURE_PROPS,
 162        mmTPC3_QM_GLBL_NON_SECURE_PROPS,
 163        mmTPC3_CMDQ_GLBL_SECURE_PROPS,
 164        mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
 165        mmTPC3_CFG_ARUSER,
 166        mmTPC3_CFG_AWUSER,
 167        mmTPC4_QM_GLBL_SECURE_PROPS,
 168        mmTPC4_QM_GLBL_NON_SECURE_PROPS,
 169        mmTPC4_CMDQ_GLBL_SECURE_PROPS,
 170        mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
 171        mmTPC4_CFG_ARUSER,
 172        mmTPC4_CFG_AWUSER,
 173        mmTPC5_QM_GLBL_SECURE_PROPS,
 174        mmTPC5_QM_GLBL_NON_SECURE_PROPS,
 175        mmTPC5_CMDQ_GLBL_SECURE_PROPS,
 176        mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
 177        mmTPC5_CFG_ARUSER,
 178        mmTPC5_CFG_AWUSER,
 179        mmTPC6_QM_GLBL_SECURE_PROPS,
 180        mmTPC6_QM_GLBL_NON_SECURE_PROPS,
 181        mmTPC6_CMDQ_GLBL_SECURE_PROPS,
 182        mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
 183        mmTPC6_CFG_ARUSER,
 184        mmTPC6_CFG_AWUSER,
 185        mmTPC7_QM_GLBL_SECURE_PROPS,
 186        mmTPC7_QM_GLBL_NON_SECURE_PROPS,
 187        mmTPC7_CMDQ_GLBL_SECURE_PROPS,
 188        mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
 189        mmTPC7_CFG_ARUSER,
 190        mmTPC7_CFG_AWUSER,
 191        mmMME_QM_GLBL_SECURE_PROPS,
 192        mmMME_QM_GLBL_NON_SECURE_PROPS,
 193        mmMME_CMDQ_GLBL_SECURE_PROPS,
 194        mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
 195        mmMME_SBA_CONTROL_DATA,
 196        mmMME_SBB_CONTROL_DATA,
 197        mmMME_SBC_CONTROL_DATA,
 198        mmMME_WBC_CONTROL_DATA,
 199        mmPCIE_WRAP_PSOC_ARUSER,
 200        mmPCIE_WRAP_PSOC_AWUSER
 201};
 202
 203static u32 goya_all_events[] = {
 204        GOYA_ASYNC_EVENT_ID_PCIE_IF,
 205        GOYA_ASYNC_EVENT_ID_TPC0_ECC,
 206        GOYA_ASYNC_EVENT_ID_TPC1_ECC,
 207        GOYA_ASYNC_EVENT_ID_TPC2_ECC,
 208        GOYA_ASYNC_EVENT_ID_TPC3_ECC,
 209        GOYA_ASYNC_EVENT_ID_TPC4_ECC,
 210        GOYA_ASYNC_EVENT_ID_TPC5_ECC,
 211        GOYA_ASYNC_EVENT_ID_TPC6_ECC,
 212        GOYA_ASYNC_EVENT_ID_TPC7_ECC,
 213        GOYA_ASYNC_EVENT_ID_MME_ECC,
 214        GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
 215        GOYA_ASYNC_EVENT_ID_MMU_ECC,
 216        GOYA_ASYNC_EVENT_ID_DMA_MACRO,
 217        GOYA_ASYNC_EVENT_ID_DMA_ECC,
 218        GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
 219        GOYA_ASYNC_EVENT_ID_PSOC_MEM,
 220        GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
 221        GOYA_ASYNC_EVENT_ID_SRAM0,
 222        GOYA_ASYNC_EVENT_ID_SRAM1,
 223        GOYA_ASYNC_EVENT_ID_SRAM2,
 224        GOYA_ASYNC_EVENT_ID_SRAM3,
 225        GOYA_ASYNC_EVENT_ID_SRAM4,
 226        GOYA_ASYNC_EVENT_ID_SRAM5,
 227        GOYA_ASYNC_EVENT_ID_SRAM6,
 228        GOYA_ASYNC_EVENT_ID_SRAM7,
 229        GOYA_ASYNC_EVENT_ID_SRAM8,
 230        GOYA_ASYNC_EVENT_ID_SRAM9,
 231        GOYA_ASYNC_EVENT_ID_SRAM10,
 232        GOYA_ASYNC_EVENT_ID_SRAM11,
 233        GOYA_ASYNC_EVENT_ID_SRAM12,
 234        GOYA_ASYNC_EVENT_ID_SRAM13,
 235        GOYA_ASYNC_EVENT_ID_SRAM14,
 236        GOYA_ASYNC_EVENT_ID_SRAM15,
 237        GOYA_ASYNC_EVENT_ID_SRAM16,
 238        GOYA_ASYNC_EVENT_ID_SRAM17,
 239        GOYA_ASYNC_EVENT_ID_SRAM18,
 240        GOYA_ASYNC_EVENT_ID_SRAM19,
 241        GOYA_ASYNC_EVENT_ID_SRAM20,
 242        GOYA_ASYNC_EVENT_ID_SRAM21,
 243        GOYA_ASYNC_EVENT_ID_SRAM22,
 244        GOYA_ASYNC_EVENT_ID_SRAM23,
 245        GOYA_ASYNC_EVENT_ID_SRAM24,
 246        GOYA_ASYNC_EVENT_ID_SRAM25,
 247        GOYA_ASYNC_EVENT_ID_SRAM26,
 248        GOYA_ASYNC_EVENT_ID_SRAM27,
 249        GOYA_ASYNC_EVENT_ID_SRAM28,
 250        GOYA_ASYNC_EVENT_ID_SRAM29,
 251        GOYA_ASYNC_EVENT_ID_GIC500,
 252        GOYA_ASYNC_EVENT_ID_PLL0,
 253        GOYA_ASYNC_EVENT_ID_PLL1,
 254        GOYA_ASYNC_EVENT_ID_PLL3,
 255        GOYA_ASYNC_EVENT_ID_PLL4,
 256        GOYA_ASYNC_EVENT_ID_PLL5,
 257        GOYA_ASYNC_EVENT_ID_PLL6,
 258        GOYA_ASYNC_EVENT_ID_AXI_ECC,
 259        GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
 260        GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
 261        GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
 262        GOYA_ASYNC_EVENT_ID_PCIE_DEC,
 263        GOYA_ASYNC_EVENT_ID_TPC0_DEC,
 264        GOYA_ASYNC_EVENT_ID_TPC1_DEC,
 265        GOYA_ASYNC_EVENT_ID_TPC2_DEC,
 266        GOYA_ASYNC_EVENT_ID_TPC3_DEC,
 267        GOYA_ASYNC_EVENT_ID_TPC4_DEC,
 268        GOYA_ASYNC_EVENT_ID_TPC5_DEC,
 269        GOYA_ASYNC_EVENT_ID_TPC6_DEC,
 270        GOYA_ASYNC_EVENT_ID_TPC7_DEC,
 271        GOYA_ASYNC_EVENT_ID_MME_WACS,
 272        GOYA_ASYNC_EVENT_ID_MME_WACSD,
 273        GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
 274        GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
 275        GOYA_ASYNC_EVENT_ID_PSOC,
 276        GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
 277        GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
 278        GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
 279        GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
 280        GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
 281        GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
 282        GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
 283        GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
 284        GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
 285        GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
 286        GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
 287        GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
 288        GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
 289        GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
 290        GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
 291        GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
 292        GOYA_ASYNC_EVENT_ID_TPC0_QM,
 293        GOYA_ASYNC_EVENT_ID_TPC1_QM,
 294        GOYA_ASYNC_EVENT_ID_TPC2_QM,
 295        GOYA_ASYNC_EVENT_ID_TPC3_QM,
 296        GOYA_ASYNC_EVENT_ID_TPC4_QM,
 297        GOYA_ASYNC_EVENT_ID_TPC5_QM,
 298        GOYA_ASYNC_EVENT_ID_TPC6_QM,
 299        GOYA_ASYNC_EVENT_ID_TPC7_QM,
 300        GOYA_ASYNC_EVENT_ID_MME_QM,
 301        GOYA_ASYNC_EVENT_ID_MME_CMDQ,
 302        GOYA_ASYNC_EVENT_ID_DMA0_QM,
 303        GOYA_ASYNC_EVENT_ID_DMA1_QM,
 304        GOYA_ASYNC_EVENT_ID_DMA2_QM,
 305        GOYA_ASYNC_EVENT_ID_DMA3_QM,
 306        GOYA_ASYNC_EVENT_ID_DMA4_QM,
 307        GOYA_ASYNC_EVENT_ID_DMA0_CH,
 308        GOYA_ASYNC_EVENT_ID_DMA1_CH,
 309        GOYA_ASYNC_EVENT_ID_DMA2_CH,
 310        GOYA_ASYNC_EVENT_ID_DMA3_CH,
 311        GOYA_ASYNC_EVENT_ID_DMA4_CH,
 312        GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
 313        GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
 314        GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
 315        GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
 316        GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
 317        GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
 318        GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
 319        GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
 320        GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
 321        GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
 322        GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
 323        GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
 324        GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
 325};
 326
 327static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
 328static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
 329static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
 330static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
 331
 332void goya_get_fixed_properties(struct hl_device *hdev)
 333{
 334        struct asic_fixed_properties *prop = &hdev->asic_prop;
 335        int i;
 336
 337        for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
 338                prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
 339                prop->hw_queues_props[i].driver_only = 0;
 340        }
 341
 342        for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
 343                prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
 344                prop->hw_queues_props[i].driver_only = 1;
 345        }
 346
 347        for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
 348                        NUMBER_OF_INT_HW_QUEUES; i++) {
 349                prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
 350                prop->hw_queues_props[i].driver_only = 0;
 351        }
 352
 353        for (; i < HL_MAX_QUEUES; i++)
 354                prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
 355
 356        prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
 357
 358        prop->dram_base_address = DRAM_PHYS_BASE;
 359        prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
 360        prop->dram_end_address = prop->dram_base_address + prop->dram_size;
 361        prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
 362
 363        prop->sram_base_address = SRAM_BASE_ADDR;
 364        prop->sram_size = SRAM_SIZE;
 365        prop->sram_end_address = prop->sram_base_address + prop->sram_size;
 366        prop->sram_user_base_address = prop->sram_base_address +
 367                                                SRAM_USER_BASE_OFFSET;
 368
 369        prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
 370        prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
 371        if (hdev->pldm)
 372                prop->mmu_pgt_size = 0x800000; /* 8MB */
 373        else
 374                prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
 375        prop->mmu_pte_size = HL_PTE_SIZE;
 376        prop->mmu_hop_table_size = HOP_TABLE_SIZE;
 377        prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
 378        prop->dram_page_size = PAGE_SIZE_2MB;
 379
 380        prop->va_space_host_start_address = VA_HOST_SPACE_START;
 381        prop->va_space_host_end_address = VA_HOST_SPACE_END;
 382        prop->va_space_dram_start_address = VA_DDR_SPACE_START;
 383        prop->va_space_dram_end_address = VA_DDR_SPACE_END;
 384        prop->dram_size_for_default_page_mapping =
 385                        prop->va_space_dram_end_address;
 386        prop->cfg_size = CFG_SIZE;
 387        prop->max_asid = MAX_ASID;
 388        prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
 389        prop->high_pll = PLL_HIGH_DEFAULT;
 390        prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
 391        prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
 392        prop->max_power_default = MAX_POWER_DEFAULT;
 393        prop->tpc_enabled_mask = TPC_ENABLED_MASK;
 394        prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
 395        prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
 396}
 397
 398/*
 399 * goya_pci_bars_map - Map PCI BARS of Goya device
 400 *
 401 * @hdev: pointer to hl_device structure
 402 *
 403 * Request PCI regions and map them to kernel virtual addresses.
 404 * Returns 0 on success
 405 *
 406 */
 407static int goya_pci_bars_map(struct hl_device *hdev)
 408{
 409        static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
 410        bool is_wc[3] = {false, false, true};
 411        int rc;
 412
 413        rc = hl_pci_bars_map(hdev, name, is_wc);
 414        if (rc)
 415                return rc;
 416
 417        hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
 418                        (CFG_BASE - SRAM_BASE_ADDR);
 419
 420        return 0;
 421}
 422
 423static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
 424{
 425        struct goya_device *goya = hdev->asic_specific;
 426        u64 old_addr = addr;
 427        int rc;
 428
 429        if ((goya) && (goya->ddr_bar_cur_addr == addr))
 430                return old_addr;
 431
 432        /* Inbound Region 1 - Bar 4 - Point to DDR */
 433        rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
 434        if (rc)
 435                return U64_MAX;
 436
 437        if (goya) {
 438                old_addr = goya->ddr_bar_cur_addr;
 439                goya->ddr_bar_cur_addr = addr;
 440        }
 441
 442        return old_addr;
 443}
 444
 445/*
 446 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
 447 *
 448 * @hdev: pointer to hl_device structure
 449 *
 450 * This is needed in case the firmware doesn't initialize the iATU
 451 *
 452 */
 453static int goya_init_iatu(struct hl_device *hdev)
 454{
 455        return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
 456                                HOST_PHYS_BASE, HOST_PHYS_SIZE);
 457}
 458
 459/*
 460 * goya_early_init - GOYA early initialization code
 461 *
 462 * @hdev: pointer to hl_device structure
 463 *
 464 * Verify PCI bars
 465 * Set DMA masks
 466 * PCI controller initialization
 467 * Map PCI bars
 468 *
 469 */
 470static int goya_early_init(struct hl_device *hdev)
 471{
 472        struct asic_fixed_properties *prop = &hdev->asic_prop;
 473        struct pci_dev *pdev = hdev->pdev;
 474        u32 val;
 475        int rc;
 476
 477        goya_get_fixed_properties(hdev);
 478
 479        /* Check BAR sizes */
 480        if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
 481                dev_err(hdev->dev,
 482                        "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
 483                        SRAM_CFG_BAR_ID,
 484                        (unsigned long long) pci_resource_len(pdev,
 485                                                        SRAM_CFG_BAR_ID),
 486                        CFG_BAR_SIZE);
 487                return -ENODEV;
 488        }
 489
 490        if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
 491                dev_err(hdev->dev,
 492                        "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
 493                        MSIX_BAR_ID,
 494                        (unsigned long long) pci_resource_len(pdev,
 495                                                                MSIX_BAR_ID),
 496                        MSIX_BAR_SIZE);
 497                return -ENODEV;
 498        }
 499
 500        prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
 501
 502        rc = hl_pci_init(hdev, 48);
 503        if (rc)
 504                return rc;
 505
 506        if (!hdev->pldm) {
 507                val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
 508                if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
 509                        dev_warn(hdev->dev,
 510                                "PCI strap is not configured correctly, PCI bus errors may occur\n");
 511        }
 512
 513        return 0;
 514}
 515
 516/*
 517 * goya_early_fini - GOYA early finalization code
 518 *
 519 * @hdev: pointer to hl_device structure
 520 *
 521 * Unmap PCI bars
 522 *
 523 */
 524static int goya_early_fini(struct hl_device *hdev)
 525{
 526        hl_pci_fini(hdev);
 527
 528        return 0;
 529}
 530
 531static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
 532{
 533        /* mask to zero the MMBP and ASID bits */
 534        WREG32_AND(reg, ~0x7FF);
 535        WREG32_OR(reg, asid);
 536}
 537
 538static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
 539{
 540        struct goya_device *goya = hdev->asic_specific;
 541
 542        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
 543                return;
 544
 545        if (secure)
 546                WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
 547        else
 548                WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
 549
 550        RREG32(mmDMA_QM_0_GLBL_PROT);
 551}
 552
 553/*
 554 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
 555 *
 556 * @hdev: pointer to hl_device structure
 557 *
 558 */
 559static void goya_fetch_psoc_frequency(struct hl_device *hdev)
 560{
 561        struct asic_fixed_properties *prop = &hdev->asic_prop;
 562
 563        prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
 564        prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
 565        prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
 566        prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
 567}
 568
 569int goya_late_init(struct hl_device *hdev)
 570{
 571        struct asic_fixed_properties *prop = &hdev->asic_prop;
 572        int rc;
 573
 574        goya_fetch_psoc_frequency(hdev);
 575
 576        rc = goya_mmu_clear_pgt_range(hdev);
 577        if (rc) {
 578                dev_err(hdev->dev,
 579                        "Failed to clear MMU page tables range %d\n", rc);
 580                return rc;
 581        }
 582
 583        rc = goya_mmu_set_dram_default_page(hdev);
 584        if (rc) {
 585                dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
 586                return rc;
 587        }
 588
 589        rc = goya_mmu_add_mappings_for_device_cpu(hdev);
 590        if (rc)
 591                return rc;
 592
 593        rc = goya_init_cpu_queues(hdev);
 594        if (rc)
 595                return rc;
 596
 597        rc = goya_test_cpu_queue(hdev);
 598        if (rc)
 599                return rc;
 600
 601        rc = goya_armcp_info_get(hdev);
 602        if (rc) {
 603                dev_err(hdev->dev, "Failed to get armcp info %d\n", rc);
 604                return rc;
 605        }
 606
 607        /* Now that we have the DRAM size in ASIC prop, we need to check
 608         * its size and configure the DMA_IF DDR wrap protection (which is in
 609         * the MMU block) accordingly. The value is the log2 of the DRAM size
 610         */
 611        WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
 612
 613        rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
 614        if (rc) {
 615                dev_err(hdev->dev,
 616                        "Failed to enable PCI access from CPU %d\n", rc);
 617                return rc;
 618        }
 619
 620        WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
 621                        GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
 622
 623        return 0;
 624}
 625
 626/*
 627 * goya_late_fini - GOYA late tear-down code
 628 *
 629 * @hdev: pointer to hl_device structure
 630 *
 631 * Free sensors allocated structures
 632 */
 633void goya_late_fini(struct hl_device *hdev)
 634{
 635        const struct hwmon_channel_info **channel_info_arr;
 636        int i = 0;
 637
 638        if (!hdev->hl_chip_info->info)
 639                return;
 640
 641        channel_info_arr = hdev->hl_chip_info->info;
 642
 643        while (channel_info_arr[i]) {
 644                kfree(channel_info_arr[i]->config);
 645                kfree(channel_info_arr[i]);
 646                i++;
 647        }
 648
 649        kfree(channel_info_arr);
 650
 651        hdev->hl_chip_info->info = NULL;
 652}
 653
 654/*
 655 * goya_sw_init - Goya software initialization code
 656 *
 657 * @hdev: pointer to hl_device structure
 658 *
 659 */
 660static int goya_sw_init(struct hl_device *hdev)
 661{
 662        struct goya_device *goya;
 663        int rc;
 664
 665        /* Allocate device structure */
 666        goya = kzalloc(sizeof(*goya), GFP_KERNEL);
 667        if (!goya)
 668                return -ENOMEM;
 669
 670        /* according to goya_init_iatu */
 671        goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
 672
 673        goya->mme_clk = GOYA_PLL_FREQ_LOW;
 674        goya->tpc_clk = GOYA_PLL_FREQ_LOW;
 675        goya->ic_clk = GOYA_PLL_FREQ_LOW;
 676
 677        hdev->asic_specific = goya;
 678
 679        /* Create DMA pool for small allocations */
 680        hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
 681                        &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
 682        if (!hdev->dma_pool) {
 683                dev_err(hdev->dev, "failed to create DMA pool\n");
 684                rc = -ENOMEM;
 685                goto free_goya_device;
 686        }
 687
 688        hdev->cpu_accessible_dma_mem =
 689                        hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
 690                                        HL_CPU_ACCESSIBLE_MEM_SIZE,
 691                                        &hdev->cpu_accessible_dma_address,
 692                                        GFP_KERNEL | __GFP_ZERO);
 693
 694        if (!hdev->cpu_accessible_dma_mem) {
 695                rc = -ENOMEM;
 696                goto free_dma_pool;
 697        }
 698
 699        dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
 700                &hdev->cpu_accessible_dma_address);
 701
 702        hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
 703        if (!hdev->cpu_accessible_dma_pool) {
 704                dev_err(hdev->dev,
 705                        "Failed to create CPU accessible DMA pool\n");
 706                rc = -ENOMEM;
 707                goto free_cpu_dma_mem;
 708        }
 709
 710        rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
 711                                (uintptr_t) hdev->cpu_accessible_dma_mem,
 712                                HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
 713        if (rc) {
 714                dev_err(hdev->dev,
 715                        "Failed to add memory to CPU accessible DMA pool\n");
 716                rc = -EFAULT;
 717                goto free_cpu_accessible_dma_pool;
 718        }
 719
 720        spin_lock_init(&goya->hw_queues_lock);
 721
 722        return 0;
 723
 724free_cpu_accessible_dma_pool:
 725        gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 726free_cpu_dma_mem:
 727        hdev->asic_funcs->asic_dma_free_coherent(hdev,
 728                        HL_CPU_ACCESSIBLE_MEM_SIZE,
 729                        hdev->cpu_accessible_dma_mem,
 730                        hdev->cpu_accessible_dma_address);
 731free_dma_pool:
 732        dma_pool_destroy(hdev->dma_pool);
 733free_goya_device:
 734        kfree(goya);
 735
 736        return rc;
 737}
 738
 739/*
 740 * goya_sw_fini - Goya software tear-down code
 741 *
 742 * @hdev: pointer to hl_device structure
 743 *
 744 */
 745static int goya_sw_fini(struct hl_device *hdev)
 746{
 747        struct goya_device *goya = hdev->asic_specific;
 748
 749        gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 750
 751        hdev->asic_funcs->asic_dma_free_coherent(hdev,
 752                        HL_CPU_ACCESSIBLE_MEM_SIZE,
 753                        hdev->cpu_accessible_dma_mem,
 754                        hdev->cpu_accessible_dma_address);
 755
 756        dma_pool_destroy(hdev->dma_pool);
 757
 758        kfree(goya);
 759
 760        return 0;
 761}
 762
 763static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
 764                dma_addr_t bus_address)
 765{
 766        struct goya_device *goya = hdev->asic_specific;
 767        u32 mtr_base_lo, mtr_base_hi;
 768        u32 so_base_lo, so_base_hi;
 769        u32 gic_base_lo, gic_base_hi;
 770        u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
 771
 772        mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
 773        mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
 774        so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
 775        so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
 776
 777        gic_base_lo =
 778                lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
 779        gic_base_hi =
 780                upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
 781
 782        WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
 783        WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
 784
 785        WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
 786        WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
 787        WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
 788
 789        WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
 790        WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
 791        WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
 792        WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
 793        WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
 794        WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
 795        WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
 796                        GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
 797
 798        /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
 799        WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
 800        WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
 801
 802        if (goya->hw_cap_initialized & HW_CAP_MMU)
 803                WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
 804        else
 805                WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
 806
 807        WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
 808        WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
 809}
 810
 811static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
 812{
 813        u32 gic_base_lo, gic_base_hi;
 814        u64 sob_addr;
 815        u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
 816
 817        gic_base_lo =
 818                lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
 819        gic_base_hi =
 820                upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
 821
 822        WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
 823        WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
 824        WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
 825                        GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
 826
 827        if (dma_id)
 828                sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
 829                                (dma_id - 1) * 4;
 830        else
 831                sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
 832
 833        WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
 834        WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
 835}
 836
 837/*
 838 * goya_init_dma_qmans - Initialize QMAN DMA registers
 839 *
 840 * @hdev: pointer to hl_device structure
 841 *
 842 * Initialize the H/W registers of the QMAN DMA channels
 843 *
 844 */
 845void goya_init_dma_qmans(struct hl_device *hdev)
 846{
 847        struct goya_device *goya = hdev->asic_specific;
 848        struct hl_hw_queue *q;
 849        int i;
 850
 851        if (goya->hw_cap_initialized & HW_CAP_DMA)
 852                return;
 853
 854        q = &hdev->kernel_queues[0];
 855
 856        for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
 857                goya_init_dma_qman(hdev, i, q->bus_address);
 858                goya_init_dma_ch(hdev, i);
 859        }
 860
 861        goya->hw_cap_initialized |= HW_CAP_DMA;
 862}
 863
 864/*
 865 * goya_disable_external_queues - Disable external queues
 866 *
 867 * @hdev: pointer to hl_device structure
 868 *
 869 */
 870static void goya_disable_external_queues(struct hl_device *hdev)
 871{
 872        WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
 873        WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
 874        WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
 875        WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
 876        WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
 877}
 878
 879static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
 880                                u32 cp_sts_reg, u32 glbl_sts0_reg)
 881{
 882        int rc;
 883        u32 status;
 884
 885        /* use the values of TPC0 as they are all the same*/
 886
 887        WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
 888
 889        status = RREG32(cp_sts_reg);
 890        if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
 891                rc = hl_poll_timeout(
 892                        hdev,
 893                        cp_sts_reg,
 894                        status,
 895                        !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
 896                        1000,
 897                        QMAN_FENCE_TIMEOUT_USEC);
 898
 899                /* if QMAN is stuck in fence no need to check for stop */
 900                if (rc)
 901                        return 0;
 902        }
 903
 904        rc = hl_poll_timeout(
 905                hdev,
 906                glbl_sts0_reg,
 907                status,
 908                (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
 909                1000,
 910                QMAN_STOP_TIMEOUT_USEC);
 911
 912        if (rc) {
 913                dev_err(hdev->dev,
 914                        "Timeout while waiting for QMAN to stop\n");
 915                return -EINVAL;
 916        }
 917
 918        return 0;
 919}
 920
 921/*
 922 * goya_stop_external_queues - Stop external queues
 923 *
 924 * @hdev: pointer to hl_device structure
 925 *
 926 * Returns 0 on success
 927 *
 928 */
 929static int goya_stop_external_queues(struct hl_device *hdev)
 930{
 931        int rc, retval = 0;
 932
 933        rc = goya_stop_queue(hdev,
 934                        mmDMA_QM_0_GLBL_CFG1,
 935                        mmDMA_QM_0_CP_STS,
 936                        mmDMA_QM_0_GLBL_STS0);
 937
 938        if (rc) {
 939                dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
 940                retval = -EIO;
 941        }
 942
 943        rc = goya_stop_queue(hdev,
 944                        mmDMA_QM_1_GLBL_CFG1,
 945                        mmDMA_QM_1_CP_STS,
 946                        mmDMA_QM_1_GLBL_STS0);
 947
 948        if (rc) {
 949                dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
 950                retval = -EIO;
 951        }
 952
 953        rc = goya_stop_queue(hdev,
 954                        mmDMA_QM_2_GLBL_CFG1,
 955                        mmDMA_QM_2_CP_STS,
 956                        mmDMA_QM_2_GLBL_STS0);
 957
 958        if (rc) {
 959                dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
 960                retval = -EIO;
 961        }
 962
 963        rc = goya_stop_queue(hdev,
 964                        mmDMA_QM_3_GLBL_CFG1,
 965                        mmDMA_QM_3_CP_STS,
 966                        mmDMA_QM_3_GLBL_STS0);
 967
 968        if (rc) {
 969                dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
 970                retval = -EIO;
 971        }
 972
 973        rc = goya_stop_queue(hdev,
 974                        mmDMA_QM_4_GLBL_CFG1,
 975                        mmDMA_QM_4_CP_STS,
 976                        mmDMA_QM_4_GLBL_STS0);
 977
 978        if (rc) {
 979                dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
 980                retval = -EIO;
 981        }
 982
 983        return retval;
 984}
 985
 986/*
 987 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
 988 *
 989 * @hdev: pointer to hl_device structure
 990 *
 991 * Returns 0 on success
 992 *
 993 */
 994int goya_init_cpu_queues(struct hl_device *hdev)
 995{
 996        struct goya_device *goya = hdev->asic_specific;
 997        struct hl_eq *eq;
 998        u32 status;
 999        struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1000        int err;
1001
1002        if (!hdev->cpu_queues_enable)
1003                return 0;
1004
1005        if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1006                return 0;
1007
1008        eq = &hdev->event_queue;
1009
1010        WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
1011        WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
1012
1013        WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
1014        WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
1015
1016        WREG32(mmCPU_CQ_BASE_ADDR_LOW,
1017                        lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1018        WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
1019                        upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1020
1021        WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
1022        WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
1023        WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
1024
1025        /* Used for EQ CI */
1026        WREG32(mmCPU_EQ_CI, 0);
1027
1028        WREG32(mmCPU_IF_PF_PQ_PI, 0);
1029
1030        WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
1031
1032        WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1033                        GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1034
1035        err = hl_poll_timeout(
1036                hdev,
1037                mmCPU_PQ_INIT_STATUS,
1038                status,
1039                (status == PQ_INIT_STATUS_READY_FOR_HOST),
1040                1000,
1041                GOYA_CPU_TIMEOUT_USEC);
1042
1043        if (err) {
1044                dev_err(hdev->dev,
1045                        "Failed to setup communication with device CPU\n");
1046                return -EIO;
1047        }
1048
1049        goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1050        return 0;
1051}
1052
1053static void goya_set_pll_refclk(struct hl_device *hdev)
1054{
1055        WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1056        WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1057        WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1058        WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1059
1060        WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1061        WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1062        WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1063        WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1064
1065        WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1066        WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1067        WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1068        WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1069
1070        WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1071        WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1072        WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1073        WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1074
1075        WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1076        WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1077        WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1078        WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1079
1080        WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1081        WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1082        WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1083        WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1084
1085        WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1086        WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1087        WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1088        WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1089}
1090
1091static void goya_disable_clk_rlx(struct hl_device *hdev)
1092{
1093        WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1094        WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1095}
1096
1097static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1098{
1099        u64 tpc_eml_address;
1100        u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1101        int err, slm_index;
1102
1103        tpc_offset = tpc_id * 0x40000;
1104        tpc_eml_offset = tpc_id * 0x200000;
1105        tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1106        tpc_slm_offset = tpc_eml_address + 0x100000;
1107
1108        /*
1109         * Workaround for Bug H2 #2443 :
1110         * "TPC SB is not initialized on chip reset"
1111         */
1112
1113        val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1114        if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1115                dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1116                        tpc_id);
1117
1118        WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1119
1120        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1121        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1122        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1123        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1124        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1125        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1126        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1127        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1128        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1129        WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1130
1131        WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1132                1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1133
1134        err = hl_poll_timeout(
1135                hdev,
1136                mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1137                val,
1138                (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1139                1000,
1140                HL_DEVICE_TIMEOUT_USEC);
1141
1142        if (err)
1143                dev_err(hdev->dev,
1144                        "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1145
1146        WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1147                1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1148
1149        msleep(GOYA_RESET_WAIT_MSEC);
1150
1151        WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1152                ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1153
1154        msleep(GOYA_RESET_WAIT_MSEC);
1155
1156        for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1157                WREG32(tpc_slm_offset + (slm_index << 2), 0);
1158
1159        val = RREG32(tpc_slm_offset);
1160}
1161
1162static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1163{
1164        struct goya_device *goya = hdev->asic_specific;
1165        int i;
1166
1167        if (hdev->pldm)
1168                return;
1169
1170        if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1171                return;
1172
1173        /* Workaround for H2 #2443 */
1174
1175        for (i = 0 ; i < TPC_MAX_NUM ; i++)
1176                _goya_tpc_mbist_workaround(hdev, i);
1177
1178        goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1179}
1180
1181/*
1182 * goya_init_golden_registers - Initialize golden registers
1183 *
1184 * @hdev: pointer to hl_device structure
1185 *
1186 * Initialize the H/W registers of the device
1187 *
1188 */
1189static void goya_init_golden_registers(struct hl_device *hdev)
1190{
1191        struct goya_device *goya = hdev->asic_specific;
1192        u32 polynom[10], tpc_intr_mask, offset;
1193        int i;
1194
1195        if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1196                return;
1197
1198        polynom[0] = 0x00020080;
1199        polynom[1] = 0x00401000;
1200        polynom[2] = 0x00200800;
1201        polynom[3] = 0x00002000;
1202        polynom[4] = 0x00080200;
1203        polynom[5] = 0x00040100;
1204        polynom[6] = 0x00100400;
1205        polynom[7] = 0x00004000;
1206        polynom[8] = 0x00010000;
1207        polynom[9] = 0x00008000;
1208
1209        /* Mask all arithmetic interrupts from TPC */
1210        tpc_intr_mask = 0x7FFF;
1211
1212        for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1213                WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1214                WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1215                WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1216                WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1217                WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1218
1219                WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1220                WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1221                WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1222                WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1223                WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1224
1225
1226                WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1227                WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1228                WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1229                WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1230                WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1231
1232                WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1233                WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1234                WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1235                WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1236                WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1237
1238                WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1239                WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1240                WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1241                WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1242                WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1243
1244                WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1245                WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1246                WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1247                WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1248                WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1249        }
1250
1251        WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1252        WREG32(mmMME_AGU, 0x0f0f0f10);
1253        WREG32(mmMME_SEI_MASK, ~0x0);
1254
1255        WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1256        WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1257        WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1258        WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1259        WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1260        WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1261        WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1262        WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1263        WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1264        WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1265        WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1266        WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1267        WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1268        WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1269        WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1270        WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1271        WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1272        WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1273        WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1274        WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1275        WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1276        WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1277        WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1278        WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1279        WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1280        WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1281        WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1282        WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1283        WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1284        WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1285        WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1286        WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1287        WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1288        WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1289        WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1290        WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1291        WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1292        WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1293        WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1294        WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1295        WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1296        WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1297        WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1298        WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1299        WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1300        WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1301        WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1302        WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1303        WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1304        WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1305        WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1306        WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1307        WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1308        WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1309        WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1310        WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1311        WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1312        WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1313        WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1314        WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1315        WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1316        WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1317        WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1318        WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1319        WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1320        WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1321        WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1322        WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1323        WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1324        WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1325        WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1326        WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1327        WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1328        WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1329        WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1330        WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1331        WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1332        WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1333        WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1334        WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1335        WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1336        WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1337        WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1338        WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1339
1340        WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1341        WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1342        WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1343        WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1344        WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1345        WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1346        WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1347        WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1348        WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1349        WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1350        WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1351        WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1352
1353        WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1354        WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1355        WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1356        WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1357        WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1358        WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1359        WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1360        WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1361        WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1362        WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1363        WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1364        WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1365
1366        WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1367        WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1368        WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1369        WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1370        WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1371        WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1372        WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1373        WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1374        WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1375        WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1376        WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1377        WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1378
1379        WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1380        WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1381        WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1382        WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1383        WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1384        WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1385        WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1386        WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1387        WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1388        WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1389        WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1390        WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1391
1392        WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1393        WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1394        WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1395        WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1396        WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1397        WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1398        WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1399        WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1400        WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1401        WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1402        WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1403        WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1404
1405        WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1406        WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1407        WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1408        WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1409        WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1410        WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1411        WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1412        WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1413        WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1414        WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1415        WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1416        WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1417
1418        for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1419                WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1420                WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1421                WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1422                WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1423                WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1424                WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1425
1426                WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1427                WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1428                WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1429                WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1430                WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1431                WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1432                WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1433                WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1434
1435                WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1436                WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1437        }
1438
1439        for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1440                WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1441                                1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1442                WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1443                                1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1444        }
1445
1446        for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1447                /*
1448                 * Workaround for Bug H2 #2441 :
1449                 * "ST.NOP set trace event illegal opcode"
1450                 */
1451                WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1452
1453                WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1454                                1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1455                WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1456                                1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1457        }
1458
1459        WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1460        WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1461                        1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1462
1463        WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1464        WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1465                        1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1466
1467        /*
1468         * Workaround for H2 #HW-23 bug
1469         * Set DMA max outstanding read requests to 240 on DMA CH 1.
1470         * This limitation is still large enough to not affect Gen4 bandwidth.
1471         * We need to only limit that DMA channel because the user can only read
1472         * from Host using DMA CH 1
1473         */
1474        WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1475
1476        WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1477
1478        goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1479}
1480
1481static void goya_init_mme_qman(struct hl_device *hdev)
1482{
1483        u32 mtr_base_lo, mtr_base_hi;
1484        u32 so_base_lo, so_base_hi;
1485        u32 gic_base_lo, gic_base_hi;
1486        u64 qman_base_addr;
1487
1488        mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1489        mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1490        so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1491        so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1492
1493        gic_base_lo =
1494                lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1495        gic_base_hi =
1496                upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1497
1498        qman_base_addr = hdev->asic_prop.sram_base_address +
1499                                MME_QMAN_BASE_OFFSET;
1500
1501        WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1502        WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1503        WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1504        WREG32(mmMME_QM_PQ_PI, 0);
1505        WREG32(mmMME_QM_PQ_CI, 0);
1506        WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1507        WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1508        WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1509        WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1510
1511        WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1512        WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1513        WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1514        WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1515
1516        /* QMAN CQ has 8 cache lines */
1517        WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1518
1519        WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1520        WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1521
1522        WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1523
1524        WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1525
1526        WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1527
1528        WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1529}
1530
1531static void goya_init_mme_cmdq(struct hl_device *hdev)
1532{
1533        u32 mtr_base_lo, mtr_base_hi;
1534        u32 so_base_lo, so_base_hi;
1535        u32 gic_base_lo, gic_base_hi;
1536        u64 qman_base_addr;
1537
1538        mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1539        mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1540        so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1541        so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1542
1543        gic_base_lo =
1544                lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1545        gic_base_hi =
1546                upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1547
1548        qman_base_addr = hdev->asic_prop.sram_base_address +
1549                                MME_QMAN_BASE_OFFSET;
1550
1551        WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1552        WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1553        WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1554        WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1555
1556        /* CMDQ CQ has 20 cache lines */
1557        WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1558
1559        WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1560        WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1561
1562        WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1563
1564        WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1565
1566        WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1567
1568        WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1569}
1570
1571void goya_init_mme_qmans(struct hl_device *hdev)
1572{
1573        struct goya_device *goya = hdev->asic_specific;
1574        u32 so_base_lo, so_base_hi;
1575
1576        if (goya->hw_cap_initialized & HW_CAP_MME)
1577                return;
1578
1579        so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1580        so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1581
1582        WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1583        WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1584
1585        goya_init_mme_qman(hdev);
1586        goya_init_mme_cmdq(hdev);
1587
1588        goya->hw_cap_initialized |= HW_CAP_MME;
1589}
1590
1591static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1592{
1593        u32 mtr_base_lo, mtr_base_hi;
1594        u32 so_base_lo, so_base_hi;
1595        u32 gic_base_lo, gic_base_hi;
1596        u64 qman_base_addr;
1597        u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1598
1599        mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1600        mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1601        so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1602        so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1603
1604        gic_base_lo =
1605                lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1606        gic_base_hi =
1607                upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1608
1609        qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1610
1611        WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1612        WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1613        WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1614        WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1615        WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1616        WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1617        WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1618        WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1619        WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1620
1621        WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1622        WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1623        WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1624        WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1625
1626        WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1627
1628        WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1629        WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1630
1631        WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1632                        GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1633
1634        WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1635
1636        WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1637
1638        WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1639}
1640
1641static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1642{
1643        u32 mtr_base_lo, mtr_base_hi;
1644        u32 so_base_lo, so_base_hi;
1645        u32 gic_base_lo, gic_base_hi;
1646        u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1647
1648        mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1649        mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1650        so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1651        so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1652
1653        gic_base_lo =
1654                lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1655        gic_base_hi =
1656                upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1657
1658        WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1659        WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1660        WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1661        WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1662
1663        WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1664
1665        WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1666        WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1667
1668        WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1669                        GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1670
1671        WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1672
1673        WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1674
1675        WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1676}
1677
1678void goya_init_tpc_qmans(struct hl_device *hdev)
1679{
1680        struct goya_device *goya = hdev->asic_specific;
1681        u32 so_base_lo, so_base_hi;
1682        u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1683                        mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1684        int i;
1685
1686        if (goya->hw_cap_initialized & HW_CAP_TPC)
1687                return;
1688
1689        so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1690        so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1691
1692        for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1693                WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1694                                so_base_lo);
1695                WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1696                                so_base_hi);
1697        }
1698
1699        goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1700        goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1701        goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1702        goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1703        goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1704        goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1705        goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1706        goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1707
1708        for (i = 0 ; i < TPC_MAX_NUM ; i++)
1709                goya_init_tpc_cmdq(hdev, i);
1710
1711        goya->hw_cap_initialized |= HW_CAP_TPC;
1712}
1713
1714/*
1715 * goya_disable_internal_queues - Disable internal queues
1716 *
1717 * @hdev: pointer to hl_device structure
1718 *
1719 */
1720static void goya_disable_internal_queues(struct hl_device *hdev)
1721{
1722        WREG32(mmMME_QM_GLBL_CFG0, 0);
1723        WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1724
1725        WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1726        WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1727
1728        WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1729        WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1730
1731        WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1732        WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1733
1734        WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1735        WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1736
1737        WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1738        WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1739
1740        WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1741        WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1742
1743        WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1744        WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1745
1746        WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1747        WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1748}
1749
1750/*
1751 * goya_stop_internal_queues - Stop internal queues
1752 *
1753 * @hdev: pointer to hl_device structure
1754 *
1755 * Returns 0 on success
1756 *
1757 */
1758static int goya_stop_internal_queues(struct hl_device *hdev)
1759{
1760        int rc, retval = 0;
1761
1762        /*
1763         * Each queue (QMAN) is a separate H/W logic. That means that each
1764         * QMAN can be stopped independently and failure to stop one does NOT
1765         * mandate we should not try to stop other QMANs
1766         */
1767
1768        rc = goya_stop_queue(hdev,
1769                        mmMME_QM_GLBL_CFG1,
1770                        mmMME_QM_CP_STS,
1771                        mmMME_QM_GLBL_STS0);
1772
1773        if (rc) {
1774                dev_err(hdev->dev, "failed to stop MME QMAN\n");
1775                retval = -EIO;
1776        }
1777
1778        rc = goya_stop_queue(hdev,
1779                        mmMME_CMDQ_GLBL_CFG1,
1780                        mmMME_CMDQ_CP_STS,
1781                        mmMME_CMDQ_GLBL_STS0);
1782
1783        if (rc) {
1784                dev_err(hdev->dev, "failed to stop MME CMDQ\n");
1785                retval = -EIO;
1786        }
1787
1788        rc = goya_stop_queue(hdev,
1789                        mmTPC0_QM_GLBL_CFG1,
1790                        mmTPC0_QM_CP_STS,
1791                        mmTPC0_QM_GLBL_STS0);
1792
1793        if (rc) {
1794                dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
1795                retval = -EIO;
1796        }
1797
1798        rc = goya_stop_queue(hdev,
1799                        mmTPC0_CMDQ_GLBL_CFG1,
1800                        mmTPC0_CMDQ_CP_STS,
1801                        mmTPC0_CMDQ_GLBL_STS0);
1802
1803        if (rc) {
1804                dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
1805                retval = -EIO;
1806        }
1807
1808        rc = goya_stop_queue(hdev,
1809                        mmTPC1_QM_GLBL_CFG1,
1810                        mmTPC1_QM_CP_STS,
1811                        mmTPC1_QM_GLBL_STS0);
1812
1813        if (rc) {
1814                dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
1815                retval = -EIO;
1816        }
1817
1818        rc = goya_stop_queue(hdev,
1819                        mmTPC1_CMDQ_GLBL_CFG1,
1820                        mmTPC1_CMDQ_CP_STS,
1821                        mmTPC1_CMDQ_GLBL_STS0);
1822
1823        if (rc) {
1824                dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
1825                retval = -EIO;
1826        }
1827
1828        rc = goya_stop_queue(hdev,
1829                        mmTPC2_QM_GLBL_CFG1,
1830                        mmTPC2_QM_CP_STS,
1831                        mmTPC2_QM_GLBL_STS0);
1832
1833        if (rc) {
1834                dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
1835                retval = -EIO;
1836        }
1837
1838        rc = goya_stop_queue(hdev,
1839                        mmTPC2_CMDQ_GLBL_CFG1,
1840                        mmTPC2_CMDQ_CP_STS,
1841                        mmTPC2_CMDQ_GLBL_STS0);
1842
1843        if (rc) {
1844                dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
1845                retval = -EIO;
1846        }
1847
1848        rc = goya_stop_queue(hdev,
1849                        mmTPC3_QM_GLBL_CFG1,
1850                        mmTPC3_QM_CP_STS,
1851                        mmTPC3_QM_GLBL_STS0);
1852
1853        if (rc) {
1854                dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
1855                retval = -EIO;
1856        }
1857
1858        rc = goya_stop_queue(hdev,
1859                        mmTPC3_CMDQ_GLBL_CFG1,
1860                        mmTPC3_CMDQ_CP_STS,
1861                        mmTPC3_CMDQ_GLBL_STS0);
1862
1863        if (rc) {
1864                dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
1865                retval = -EIO;
1866        }
1867
1868        rc = goya_stop_queue(hdev,
1869                        mmTPC4_QM_GLBL_CFG1,
1870                        mmTPC4_QM_CP_STS,
1871                        mmTPC4_QM_GLBL_STS0);
1872
1873        if (rc) {
1874                dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
1875                retval = -EIO;
1876        }
1877
1878        rc = goya_stop_queue(hdev,
1879                        mmTPC4_CMDQ_GLBL_CFG1,
1880                        mmTPC4_CMDQ_CP_STS,
1881                        mmTPC4_CMDQ_GLBL_STS0);
1882
1883        if (rc) {
1884                dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
1885                retval = -EIO;
1886        }
1887
1888        rc = goya_stop_queue(hdev,
1889                        mmTPC5_QM_GLBL_CFG1,
1890                        mmTPC5_QM_CP_STS,
1891                        mmTPC5_QM_GLBL_STS0);
1892
1893        if (rc) {
1894                dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
1895                retval = -EIO;
1896        }
1897
1898        rc = goya_stop_queue(hdev,
1899                        mmTPC5_CMDQ_GLBL_CFG1,
1900                        mmTPC5_CMDQ_CP_STS,
1901                        mmTPC5_CMDQ_GLBL_STS0);
1902
1903        if (rc) {
1904                dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
1905                retval = -EIO;
1906        }
1907
1908        rc = goya_stop_queue(hdev,
1909                        mmTPC6_QM_GLBL_CFG1,
1910                        mmTPC6_QM_CP_STS,
1911                        mmTPC6_QM_GLBL_STS0);
1912
1913        if (rc) {
1914                dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
1915                retval = -EIO;
1916        }
1917
1918        rc = goya_stop_queue(hdev,
1919                        mmTPC6_CMDQ_GLBL_CFG1,
1920                        mmTPC6_CMDQ_CP_STS,
1921                        mmTPC6_CMDQ_GLBL_STS0);
1922
1923        if (rc) {
1924                dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
1925                retval = -EIO;
1926        }
1927
1928        rc = goya_stop_queue(hdev,
1929                        mmTPC7_QM_GLBL_CFG1,
1930                        mmTPC7_QM_CP_STS,
1931                        mmTPC7_QM_GLBL_STS0);
1932
1933        if (rc) {
1934                dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
1935                retval = -EIO;
1936        }
1937
1938        rc = goya_stop_queue(hdev,
1939                        mmTPC7_CMDQ_GLBL_CFG1,
1940                        mmTPC7_CMDQ_CP_STS,
1941                        mmTPC7_CMDQ_GLBL_STS0);
1942
1943        if (rc) {
1944                dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
1945                retval = -EIO;
1946        }
1947
1948        return retval;
1949}
1950
1951static void goya_dma_stall(struct hl_device *hdev)
1952{
1953        WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
1954        WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
1955        WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
1956        WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
1957        WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
1958}
1959
1960static void goya_tpc_stall(struct hl_device *hdev)
1961{
1962        WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
1963        WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
1964        WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
1965        WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
1966        WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
1967        WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
1968        WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
1969        WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
1970}
1971
1972static void goya_mme_stall(struct hl_device *hdev)
1973{
1974        WREG32(mmMME_STALL, 0xFFFFFFFF);
1975}
1976
1977static int goya_enable_msix(struct hl_device *hdev)
1978{
1979        struct goya_device *goya = hdev->asic_specific;
1980        int cq_cnt = hdev->asic_prop.completion_queues_count;
1981        int rc, i, irq_cnt_init, irq;
1982
1983        if (goya->hw_cap_initialized & HW_CAP_MSIX)
1984                return 0;
1985
1986        rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
1987                                GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
1988        if (rc < 0) {
1989                dev_err(hdev->dev,
1990                        "MSI-X: Failed to enable support -- %d/%d\n",
1991                        GOYA_MSIX_ENTRIES, rc);
1992                return rc;
1993        }
1994
1995        for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
1996                irq = pci_irq_vector(hdev->pdev, i);
1997                rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
1998                                &hdev->completion_queue[i]);
1999                if (rc) {
2000                        dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2001                        goto free_irqs;
2002                }
2003        }
2004
2005        irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2006
2007        rc = request_irq(irq, hl_irq_handler_eq, 0,
2008                        goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
2009                        &hdev->event_queue);
2010        if (rc) {
2011                dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2012                goto free_irqs;
2013        }
2014
2015        goya->hw_cap_initialized |= HW_CAP_MSIX;
2016        return 0;
2017
2018free_irqs:
2019        for (i = 0 ; i < irq_cnt_init ; i++)
2020                free_irq(pci_irq_vector(hdev->pdev, i),
2021                        &hdev->completion_queue[i]);
2022
2023        pci_free_irq_vectors(hdev->pdev);
2024        return rc;
2025}
2026
2027static void goya_sync_irqs(struct hl_device *hdev)
2028{
2029        struct goya_device *goya = hdev->asic_specific;
2030        int i;
2031
2032        if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2033                return;
2034
2035        /* Wait for all pending IRQs to be finished */
2036        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2037                synchronize_irq(pci_irq_vector(hdev->pdev, i));
2038
2039        synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2040}
2041
2042static void goya_disable_msix(struct hl_device *hdev)
2043{
2044        struct goya_device *goya = hdev->asic_specific;
2045        int i, irq;
2046
2047        if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2048                return;
2049
2050        goya_sync_irqs(hdev);
2051
2052        irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2053        free_irq(irq, &hdev->event_queue);
2054
2055        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2056                irq = pci_irq_vector(hdev->pdev, i);
2057                free_irq(irq, &hdev->completion_queue[i]);
2058        }
2059
2060        pci_free_irq_vectors(hdev->pdev);
2061
2062        goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2063}
2064
2065static void goya_enable_timestamp(struct hl_device *hdev)
2066{
2067        /* Disable the timestamp counter */
2068        WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2069
2070        /* Zero the lower/upper parts of the 64-bit counter */
2071        WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2072        WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2073
2074        /* Enable the counter */
2075        WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2076}
2077
2078static void goya_disable_timestamp(struct hl_device *hdev)
2079{
2080        /* Disable the timestamp counter */
2081        WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2082}
2083
2084static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2085{
2086        u32 wait_timeout_ms, cpu_timeout_ms;
2087
2088        dev_info(hdev->dev,
2089                "Halting compute engines and disabling interrupts\n");
2090
2091        if (hdev->pldm) {
2092                wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2093                cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2094        } else {
2095                wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2096                cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2097        }
2098
2099        if (hard_reset) {
2100                /*
2101                 * I don't know what is the state of the CPU so make sure it is
2102                 * stopped in any means necessary
2103                 */
2104                WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2105                WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2106                        GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2107                msleep(cpu_timeout_ms);
2108        }
2109
2110        goya_stop_external_queues(hdev);
2111        goya_stop_internal_queues(hdev);
2112
2113        msleep(wait_timeout_ms);
2114
2115        goya_dma_stall(hdev);
2116        goya_tpc_stall(hdev);
2117        goya_mme_stall(hdev);
2118
2119        msleep(wait_timeout_ms);
2120
2121        goya_disable_external_queues(hdev);
2122        goya_disable_internal_queues(hdev);
2123
2124        goya_disable_timestamp(hdev);
2125
2126        if (hard_reset) {
2127                goya_disable_msix(hdev);
2128                goya_mmu_remove_device_cpu_mappings(hdev);
2129        } else {
2130                goya_sync_irqs(hdev);
2131        }
2132}
2133
2134/*
2135 * goya_push_uboot_to_device() - Push u-boot FW code to device.
2136 * @hdev: Pointer to hl_device structure.
2137 *
2138 * Copy u-boot fw code from firmware file to SRAM BAR.
2139 *
2140 * Return: 0 on success, non-zero for failure.
2141 */
2142static int goya_push_uboot_to_device(struct hl_device *hdev)
2143{
2144        char fw_name[200];
2145        void __iomem *dst;
2146
2147        snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2148        dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2149
2150        return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2151}
2152
2153/*
2154 * goya_push_linux_to_device() - Push LINUX FW code to device.
2155 * @hdev: Pointer to hl_device structure.
2156 *
2157 * Copy LINUX fw code from firmware file to HBM BAR.
2158 *
2159 * Return: 0 on success, non-zero for failure.
2160 */
2161static int goya_push_linux_to_device(struct hl_device *hdev)
2162{
2163        char fw_name[200];
2164        void __iomem *dst;
2165
2166        snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2167        dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2168
2169        return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2170}
2171
2172static int goya_pldm_init_cpu(struct hl_device *hdev)
2173{
2174        u32 val, unit_rst_val;
2175        int rc;
2176
2177        /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2178        goya_init_golden_registers(hdev);
2179
2180        /* Put ARM cores into reset */
2181        WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2182        val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2183
2184        /* Reset the CA53 MACRO */
2185        unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2186        WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2187        val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2188        WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2189        val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2190
2191        rc = goya_push_uboot_to_device(hdev);
2192        if (rc)
2193                return rc;
2194
2195        rc = goya_push_linux_to_device(hdev);
2196        if (rc)
2197                return rc;
2198
2199        WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2200        WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2201
2202        WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2203                lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2204        WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2205                upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2206
2207        /* Release ARM core 0 from reset */
2208        WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2209                                        CPU_RESET_CORE0_DEASSERT);
2210        val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2211
2212        return 0;
2213}
2214
2215/*
2216 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2217 * The version string should be located by that offset.
2218 */
2219static void goya_read_device_fw_version(struct hl_device *hdev,
2220                                        enum goya_fw_component fwc)
2221{
2222        const char *name;
2223        u32 ver_off;
2224        char *dest;
2225
2226        switch (fwc) {
2227        case FW_COMP_UBOOT:
2228                ver_off = RREG32(mmUBOOT_VER_OFFSET);
2229                dest = hdev->asic_prop.uboot_ver;
2230                name = "U-Boot";
2231                break;
2232        case FW_COMP_PREBOOT:
2233                ver_off = RREG32(mmPREBOOT_VER_OFFSET);
2234                dest = hdev->asic_prop.preboot_ver;
2235                name = "Preboot";
2236                break;
2237        default:
2238                dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2239                return;
2240        }
2241
2242        ver_off &= ~((u32)SRAM_BASE_ADDR);
2243
2244        if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2245                memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2246                                                        VERSION_MAX_LEN);
2247        } else {
2248                dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2249                                                                name, ver_off);
2250                strcpy(dest, "unavailable");
2251        }
2252}
2253
2254static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2255{
2256        struct goya_device *goya = hdev->asic_specific;
2257        u32 status;
2258        int rc;
2259
2260        if (!hdev->cpu_enable)
2261                return 0;
2262
2263        if (goya->hw_cap_initialized & HW_CAP_CPU)
2264                return 0;
2265
2266        /*
2267         * Before pushing u-boot/linux to device, need to set the ddr bar to
2268         * base address of dram
2269         */
2270        if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2271                dev_err(hdev->dev,
2272                        "failed to map DDR bar to DRAM base address\n");
2273                return -EIO;
2274        }
2275
2276        if (hdev->pldm) {
2277                rc = goya_pldm_init_cpu(hdev);
2278                if (rc)
2279                        return rc;
2280
2281                goto out;
2282        }
2283
2284        /* Make sure CPU boot-loader is running */
2285        rc = hl_poll_timeout(
2286                hdev,
2287                mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2288                status,
2289                (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2290                (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2291                10000,
2292                cpu_timeout);
2293
2294        if (rc) {
2295                dev_err(hdev->dev, "Error in ARM u-boot!");
2296                switch (status) {
2297                case CPU_BOOT_STATUS_NA:
2298                        dev_err(hdev->dev,
2299                                "ARM status %d - BTL did NOT run\n", status);
2300                        break;
2301                case CPU_BOOT_STATUS_IN_WFE:
2302                        dev_err(hdev->dev,
2303                                "ARM status %d - Inside WFE loop\n", status);
2304                        break;
2305                case CPU_BOOT_STATUS_IN_BTL:
2306                        dev_err(hdev->dev,
2307                                "ARM status %d - Stuck in BTL\n", status);
2308                        break;
2309                case CPU_BOOT_STATUS_IN_PREBOOT:
2310                        dev_err(hdev->dev,
2311                                "ARM status %d - Stuck in Preboot\n", status);
2312                        break;
2313                case CPU_BOOT_STATUS_IN_SPL:
2314                        dev_err(hdev->dev,
2315                                "ARM status %d - Stuck in SPL\n", status);
2316                        break;
2317                case CPU_BOOT_STATUS_IN_UBOOT:
2318                        dev_err(hdev->dev,
2319                                "ARM status %d - Stuck in u-boot\n", status);
2320                        break;
2321                case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2322                        dev_err(hdev->dev,
2323                                "ARM status %d - DDR initialization failed\n",
2324                                status);
2325                        break;
2326                case CPU_BOOT_STATUS_UBOOT_NOT_READY:
2327                        dev_err(hdev->dev,
2328                                "ARM status %d - u-boot stopped by user\n",
2329                                status);
2330                        break;
2331                default:
2332                        dev_err(hdev->dev,
2333                                "ARM status %d - Invalid status code\n",
2334                                status);
2335                        break;
2336                }
2337                return -EIO;
2338        }
2339
2340        /* Read U-Boot version now in case we will later fail */
2341        goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2342        goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2343
2344        if (!hdev->fw_loading) {
2345                dev_info(hdev->dev, "Skip loading FW\n");
2346                goto out;
2347        }
2348
2349        if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2350                goto out;
2351
2352        rc = goya_push_linux_to_device(hdev);
2353        if (rc)
2354                return rc;
2355
2356        WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2357
2358        rc = hl_poll_timeout(
2359                hdev,
2360                mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2361                status,
2362                (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2363                10000,
2364                cpu_timeout);
2365
2366        if (rc) {
2367                if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2368                        dev_err(hdev->dev,
2369                                "ARM u-boot reports FIT image is corrupted\n");
2370                else
2371                        dev_err(hdev->dev,
2372                                "ARM Linux failed to load, %d\n", status);
2373                WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2374                return -EIO;
2375        }
2376
2377        dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2378
2379out:
2380        goya->hw_cap_initialized |= HW_CAP_CPU;
2381
2382        return 0;
2383}
2384
2385static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2386                                                u64 phys_addr)
2387{
2388        u32 status, timeout_usec;
2389        int rc;
2390
2391        if (hdev->pldm)
2392                timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2393        else
2394                timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2395
2396        WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2397        WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2398        WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2399
2400        rc = hl_poll_timeout(
2401                hdev,
2402                MMU_ASID_BUSY,
2403                status,
2404                !(status & 0x80000000),
2405                1000,
2406                timeout_usec);
2407
2408        if (rc) {
2409                dev_err(hdev->dev,
2410                        "Timeout during MMU hop0 config of asid %d\n", asid);
2411                return rc;
2412        }
2413
2414        return 0;
2415}
2416
2417int goya_mmu_init(struct hl_device *hdev)
2418{
2419        struct asic_fixed_properties *prop = &hdev->asic_prop;
2420        struct goya_device *goya = hdev->asic_specific;
2421        u64 hop0_addr;
2422        int rc, i;
2423
2424        if (!hdev->mmu_enable)
2425                return 0;
2426
2427        if (goya->hw_cap_initialized & HW_CAP_MMU)
2428                return 0;
2429
2430        hdev->dram_supports_virtual_memory = true;
2431        hdev->dram_default_page_mapping = true;
2432
2433        for (i = 0 ; i < prop->max_asid ; i++) {
2434                hop0_addr = prop->mmu_pgt_addr +
2435                                (i * prop->mmu_hop_table_size);
2436
2437                rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2438                if (rc) {
2439                        dev_err(hdev->dev,
2440                                "failed to set hop0 addr for asid %d\n", i);
2441                        goto err;
2442                }
2443        }
2444
2445        goya->hw_cap_initialized |= HW_CAP_MMU;
2446
2447        /* init MMU cache manage page */
2448        WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2449                                lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2450        WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2451
2452        /* Remove follower feature due to performance bug */
2453        WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2454                        (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2455
2456        hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2457
2458        WREG32(mmMMU_MMU_ENABLE, 1);
2459        WREG32(mmMMU_SPI_MASK, 0xF);
2460
2461        return 0;
2462
2463err:
2464        return rc;
2465}
2466
2467/*
2468 * goya_hw_init - Goya hardware initialization code
2469 *
2470 * @hdev: pointer to hl_device structure
2471 *
2472 * Returns 0 on success
2473 *
2474 */
2475static int goya_hw_init(struct hl_device *hdev)
2476{
2477        struct asic_fixed_properties *prop = &hdev->asic_prop;
2478        u32 val;
2479        int rc;
2480
2481        dev_info(hdev->dev, "Starting initialization of H/W\n");
2482
2483        /* Perform read from the device to make sure device is up */
2484        val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2485
2486        /*
2487         * Let's mark in the H/W that we have reached this point. We check
2488         * this value in the reset_before_init function to understand whether
2489         * we need to reset the chip before doing H/W init. This register is
2490         * cleared by the H/W upon H/W reset
2491         */
2492        WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2493
2494        rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2495        if (rc) {
2496                dev_err(hdev->dev, "failed to initialize CPU\n");
2497                return rc;
2498        }
2499
2500        goya_tpc_mbist_workaround(hdev);
2501
2502        goya_init_golden_registers(hdev);
2503
2504        /*
2505         * After CPU initialization is finished, change DDR bar mapping inside
2506         * iATU to point to the start address of the MMU page tables
2507         */
2508        if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2509                        (MMU_PAGE_TABLES_ADDR &
2510                        ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2511                dev_err(hdev->dev,
2512                        "failed to map DDR bar to MMU page tables\n");
2513                return -EIO;
2514        }
2515
2516        rc = goya_mmu_init(hdev);
2517        if (rc)
2518                return rc;
2519
2520        goya_init_security(hdev);
2521
2522        goya_init_dma_qmans(hdev);
2523
2524        goya_init_mme_qmans(hdev);
2525
2526        goya_init_tpc_qmans(hdev);
2527
2528        goya_enable_timestamp(hdev);
2529
2530        /* MSI-X must be enabled before CPU queues are initialized */
2531        rc = goya_enable_msix(hdev);
2532        if (rc)
2533                goto disable_queues;
2534
2535        /* Perform read from the device to flush all MSI-X configuration */
2536        val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2537
2538        return 0;
2539
2540disable_queues:
2541        goya_disable_internal_queues(hdev);
2542        goya_disable_external_queues(hdev);
2543
2544        return rc;
2545}
2546
2547/*
2548 * goya_hw_fini - Goya hardware tear-down code
2549 *
2550 * @hdev: pointer to hl_device structure
2551 * @hard_reset: should we do hard reset to all engines or just reset the
2552 *              compute/dma engines
2553 */
2554static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2555{
2556        struct goya_device *goya = hdev->asic_specific;
2557        u32 reset_timeout_ms, status;
2558
2559        if (hdev->pldm)
2560                reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2561        else
2562                reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2563
2564        if (hard_reset) {
2565                goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2566                goya_disable_clk_rlx(hdev);
2567                goya_set_pll_refclk(hdev);
2568
2569                WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2570                dev_info(hdev->dev,
2571                        "Issued HARD reset command, going to wait %dms\n",
2572                        reset_timeout_ms);
2573        } else {
2574                WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2575                dev_info(hdev->dev,
2576                        "Issued SOFT reset command, going to wait %dms\n",
2577                        reset_timeout_ms);
2578        }
2579
2580        /*
2581         * After hard reset, we can't poll the BTM_FSM register because the PSOC
2582         * itself is in reset. In either reset we need to wait until the reset
2583         * is deasserted
2584         */
2585        msleep(reset_timeout_ms);
2586
2587        status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2588        if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2589                dev_err(hdev->dev,
2590                        "Timeout while waiting for device to reset 0x%x\n",
2591                        status);
2592
2593        if (!hard_reset) {
2594                goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2595                                                HW_CAP_GOLDEN | HW_CAP_TPC);
2596                WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2597                                GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2598                return;
2599        }
2600
2601        /* Chicken bit to re-initiate boot sequencer flow */
2602        WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2603                1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2604        /* Move boot manager FSM to pre boot sequencer init state */
2605        WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2606                        0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2607
2608        goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2609                                        HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2610                                        HW_CAP_DMA | HW_CAP_MME |
2611                                        HW_CAP_MMU | HW_CAP_TPC_MBIST |
2612                                        HW_CAP_GOLDEN | HW_CAP_TPC);
2613        memset(goya->events_stat, 0, sizeof(goya->events_stat));
2614
2615        if (!hdev->pldm) {
2616                int rc;
2617                /* In case we are running inside VM and the VM is
2618                 * shutting down, we need to make sure CPU boot-loader
2619                 * is running before we can continue the VM shutdown.
2620                 * That is because the VM will send an FLR signal that
2621                 * we must answer
2622                 */
2623                dev_info(hdev->dev,
2624                        "Going to wait up to %ds for CPU boot loader\n",
2625                        GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2626
2627                rc = hl_poll_timeout(
2628                        hdev,
2629                        mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2630                        status,
2631                        (status == CPU_BOOT_STATUS_DRAM_RDY),
2632                        10000,
2633                        GOYA_CPU_TIMEOUT_USEC);
2634                if (rc)
2635                        dev_err(hdev->dev,
2636                                "failed to wait for CPU boot loader\n");
2637        }
2638}
2639
2640int goya_suspend(struct hl_device *hdev)
2641{
2642        int rc;
2643
2644        rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2645        if (rc)
2646                dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2647
2648        return rc;
2649}
2650
2651int goya_resume(struct hl_device *hdev)
2652{
2653        return goya_init_iatu(hdev);
2654}
2655
2656static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2657                u64 kaddress, phys_addr_t paddress, u32 size)
2658{
2659        int rc;
2660
2661        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2662                        VM_DONTCOPY | VM_NORESERVE;
2663
2664        rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2665                                size, vma->vm_page_prot);
2666        if (rc)
2667                dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2668
2669        return rc;
2670}
2671
2672void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2673{
2674        u32 db_reg_offset, db_value;
2675
2676        switch (hw_queue_id) {
2677        case GOYA_QUEUE_ID_DMA_0:
2678                db_reg_offset = mmDMA_QM_0_PQ_PI;
2679                break;
2680
2681        case GOYA_QUEUE_ID_DMA_1:
2682                db_reg_offset = mmDMA_QM_1_PQ_PI;
2683                break;
2684
2685        case GOYA_QUEUE_ID_DMA_2:
2686                db_reg_offset = mmDMA_QM_2_PQ_PI;
2687                break;
2688
2689        case GOYA_QUEUE_ID_DMA_3:
2690                db_reg_offset = mmDMA_QM_3_PQ_PI;
2691                break;
2692
2693        case GOYA_QUEUE_ID_DMA_4:
2694                db_reg_offset = mmDMA_QM_4_PQ_PI;
2695                break;
2696
2697        case GOYA_QUEUE_ID_CPU_PQ:
2698                db_reg_offset = mmCPU_IF_PF_PQ_PI;
2699                break;
2700
2701        case GOYA_QUEUE_ID_MME:
2702                db_reg_offset = mmMME_QM_PQ_PI;
2703                break;
2704
2705        case GOYA_QUEUE_ID_TPC0:
2706                db_reg_offset = mmTPC0_QM_PQ_PI;
2707                break;
2708
2709        case GOYA_QUEUE_ID_TPC1:
2710                db_reg_offset = mmTPC1_QM_PQ_PI;
2711                break;
2712
2713        case GOYA_QUEUE_ID_TPC2:
2714                db_reg_offset = mmTPC2_QM_PQ_PI;
2715                break;
2716
2717        case GOYA_QUEUE_ID_TPC3:
2718                db_reg_offset = mmTPC3_QM_PQ_PI;
2719                break;
2720
2721        case GOYA_QUEUE_ID_TPC4:
2722                db_reg_offset = mmTPC4_QM_PQ_PI;
2723                break;
2724
2725        case GOYA_QUEUE_ID_TPC5:
2726                db_reg_offset = mmTPC5_QM_PQ_PI;
2727                break;
2728
2729        case GOYA_QUEUE_ID_TPC6:
2730                db_reg_offset = mmTPC6_QM_PQ_PI;
2731                break;
2732
2733        case GOYA_QUEUE_ID_TPC7:
2734                db_reg_offset = mmTPC7_QM_PQ_PI;
2735                break;
2736
2737        default:
2738                /* Should never get here */
2739                dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
2740                        hw_queue_id);
2741                return;
2742        }
2743
2744        db_value = pi;
2745
2746        /* ring the doorbell */
2747        WREG32(db_reg_offset, db_value);
2748
2749        if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
2750                WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2751                                GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2752}
2753
2754void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2755{
2756        /* The QMANs are on the SRAM so need to copy to IO space */
2757        memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2758}
2759
2760static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2761                                        dma_addr_t *dma_handle, gfp_t flags)
2762{
2763        void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2764                                                dma_handle, flags);
2765
2766        /* Shift to the device's base physical address of host memory */
2767        if (kernel_addr)
2768                *dma_handle += HOST_PHYS_BASE;
2769
2770        return kernel_addr;
2771}
2772
2773static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2774                                        void *cpu_addr, dma_addr_t dma_handle)
2775{
2776        /* Cancel the device's base physical address of host memory */
2777        dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
2778
2779        dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
2780}
2781
2782void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
2783                                dma_addr_t *dma_handle, u16 *queue_len)
2784{
2785        void *base;
2786        u32 offset;
2787
2788        *dma_handle = hdev->asic_prop.sram_base_address;
2789
2790        base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
2791
2792        switch (queue_id) {
2793        case GOYA_QUEUE_ID_MME:
2794                offset = MME_QMAN_BASE_OFFSET;
2795                *queue_len = MME_QMAN_LENGTH;
2796                break;
2797        case GOYA_QUEUE_ID_TPC0:
2798                offset = TPC0_QMAN_BASE_OFFSET;
2799                *queue_len = TPC_QMAN_LENGTH;
2800                break;
2801        case GOYA_QUEUE_ID_TPC1:
2802                offset = TPC1_QMAN_BASE_OFFSET;
2803                *queue_len = TPC_QMAN_LENGTH;
2804                break;
2805        case GOYA_QUEUE_ID_TPC2:
2806                offset = TPC2_QMAN_BASE_OFFSET;
2807                *queue_len = TPC_QMAN_LENGTH;
2808                break;
2809        case GOYA_QUEUE_ID_TPC3:
2810                offset = TPC3_QMAN_BASE_OFFSET;
2811                *queue_len = TPC_QMAN_LENGTH;
2812                break;
2813        case GOYA_QUEUE_ID_TPC4:
2814                offset = TPC4_QMAN_BASE_OFFSET;
2815                *queue_len = TPC_QMAN_LENGTH;
2816                break;
2817        case GOYA_QUEUE_ID_TPC5:
2818                offset = TPC5_QMAN_BASE_OFFSET;
2819                *queue_len = TPC_QMAN_LENGTH;
2820                break;
2821        case GOYA_QUEUE_ID_TPC6:
2822                offset = TPC6_QMAN_BASE_OFFSET;
2823                *queue_len = TPC_QMAN_LENGTH;
2824                break;
2825        case GOYA_QUEUE_ID_TPC7:
2826                offset = TPC7_QMAN_BASE_OFFSET;
2827                *queue_len = TPC_QMAN_LENGTH;
2828                break;
2829        default:
2830                dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
2831                return NULL;
2832        }
2833
2834        base += offset;
2835        *dma_handle += offset;
2836
2837        return base;
2838}
2839
2840static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
2841{
2842        struct packet_msg_prot *fence_pkt;
2843        u32 *fence_ptr;
2844        dma_addr_t fence_dma_addr;
2845        struct hl_cb *cb;
2846        u32 tmp, timeout;
2847        int rc;
2848
2849        if (hdev->pldm)
2850                timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
2851        else
2852                timeout = HL_DEVICE_TIMEOUT_USEC;
2853
2854        if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
2855                dev_err_ratelimited(hdev->dev,
2856                        "Can't send driver job on QMAN0 because the device is not idle\n");
2857                return -EBUSY;
2858        }
2859
2860        fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2861                                                        &fence_dma_addr);
2862        if (!fence_ptr) {
2863                dev_err(hdev->dev,
2864                        "Failed to allocate fence memory for QMAN0\n");
2865                return -ENOMEM;
2866        }
2867
2868        goya_qman0_set_security(hdev, true);
2869
2870        cb = job->patched_cb;
2871
2872        fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
2873                        job->job_cb_size - sizeof(struct packet_msg_prot));
2874
2875        tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2876                        (1 << GOYA_PKT_CTL_EB_SHIFT) |
2877                        (1 << GOYA_PKT_CTL_MB_SHIFT);
2878        fence_pkt->ctl = cpu_to_le32(tmp);
2879        fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
2880        fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2881
2882        rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
2883                                        job->job_cb_size, cb->bus_address);
2884        if (rc) {
2885                dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
2886                goto free_fence_ptr;
2887        }
2888
2889        rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
2890                                (tmp == GOYA_QMAN0_FENCE_VAL), 1000,
2891                                timeout, true);
2892
2893        hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
2894
2895        if (rc == -ETIMEDOUT) {
2896                dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
2897                goto free_fence_ptr;
2898        }
2899
2900free_fence_ptr:
2901        hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
2902                                        fence_dma_addr);
2903
2904        goya_qman0_set_security(hdev, false);
2905
2906        return rc;
2907}
2908
2909int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
2910                                u32 timeout, long *result)
2911{
2912        struct goya_device *goya = hdev->asic_specific;
2913
2914        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
2915                if (result)
2916                        *result = 0;
2917                return 0;
2918        }
2919
2920        return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
2921                                        timeout, result);
2922}
2923
2924int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
2925{
2926        struct packet_msg_prot *fence_pkt;
2927        dma_addr_t pkt_dma_addr;
2928        u32 fence_val, tmp;
2929        dma_addr_t fence_dma_addr;
2930        u32 *fence_ptr;
2931        int rc;
2932
2933        fence_val = GOYA_QMAN0_FENCE_VAL;
2934
2935        fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2936                                                        &fence_dma_addr);
2937        if (!fence_ptr) {
2938                dev_err(hdev->dev,
2939                        "Failed to allocate memory for queue testing\n");
2940                return -ENOMEM;
2941        }
2942
2943        *fence_ptr = 0;
2944
2945        fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
2946                                        sizeof(struct packet_msg_prot),
2947                                        GFP_KERNEL, &pkt_dma_addr);
2948        if (!fence_pkt) {
2949                dev_err(hdev->dev,
2950                        "Failed to allocate packet for queue testing\n");
2951                rc = -ENOMEM;
2952                goto free_fence_ptr;
2953        }
2954
2955        tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2956                        (1 << GOYA_PKT_CTL_EB_SHIFT) |
2957                        (1 << GOYA_PKT_CTL_MB_SHIFT);
2958        fence_pkt->ctl = cpu_to_le32(tmp);
2959        fence_pkt->value = cpu_to_le32(fence_val);
2960        fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2961
2962        rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
2963                                        sizeof(struct packet_msg_prot),
2964                                        pkt_dma_addr);
2965        if (rc) {
2966                dev_err(hdev->dev,
2967                        "Failed to send fence packet\n");
2968                goto free_pkt;
2969        }
2970
2971        rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
2972                                        1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
2973
2974        hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
2975
2976        if (rc == -ETIMEDOUT) {
2977                dev_err(hdev->dev,
2978                        "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
2979                        hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
2980                rc = -EIO;
2981        } else {
2982                dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
2983                        hw_queue_id);
2984        }
2985
2986free_pkt:
2987        hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
2988                                        pkt_dma_addr);
2989free_fence_ptr:
2990        hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
2991                                        fence_dma_addr);
2992        return rc;
2993}
2994
2995int goya_test_cpu_queue(struct hl_device *hdev)
2996{
2997        struct goya_device *goya = hdev->asic_specific;
2998
2999        /*
3000         * check capability here as send_cpu_message() won't update the result
3001         * value if no capability
3002         */
3003        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
3004                return 0;
3005
3006        return hl_fw_test_cpu_queue(hdev);
3007}
3008
3009int goya_test_queues(struct hl_device *hdev)
3010{
3011        int i, rc, ret_val = 0;
3012
3013        for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3014                rc = goya_test_queue(hdev, i);
3015                if (rc)
3016                        ret_val = -EINVAL;
3017        }
3018
3019        return ret_val;
3020}
3021
3022static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3023                                        gfp_t mem_flags, dma_addr_t *dma_handle)
3024{
3025        void *kernel_addr;
3026
3027        if (size > GOYA_DMA_POOL_BLK_SIZE)
3028                return NULL;
3029
3030        kernel_addr =  dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3031
3032        /* Shift to the device's base physical address of host memory */
3033        if (kernel_addr)
3034                *dma_handle += HOST_PHYS_BASE;
3035
3036        return kernel_addr;
3037}
3038
3039static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3040                                dma_addr_t dma_addr)
3041{
3042        /* Cancel the device's base physical address of host memory */
3043        dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
3044
3045        dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3046}
3047
3048void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3049                                        dma_addr_t *dma_handle)
3050{
3051        void *vaddr;
3052
3053        vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3054        *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
3055                        VA_CPU_ACCESSIBLE_MEM_ADDR;
3056
3057        return vaddr;
3058}
3059
3060void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3061                                        void *vaddr)
3062{
3063        hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3064}
3065
3066static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
3067                                int nents, enum dma_data_direction dir)
3068{
3069        struct scatterlist *sg;
3070        int i;
3071
3072        if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
3073                return -ENOMEM;
3074
3075        /* Shift to the device's base physical address of host memory */
3076        for_each_sg(sgl, sg, nents, i)
3077                sg->dma_address += HOST_PHYS_BASE;
3078
3079        return 0;
3080}
3081
3082static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
3083                                int nents, enum dma_data_direction dir)
3084{
3085        struct scatterlist *sg;
3086        int i;
3087
3088        /* Cancel the device's base physical address of host memory */
3089        for_each_sg(sgl, sg, nents, i)
3090                sg->dma_address -= HOST_PHYS_BASE;
3091
3092        dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
3093}
3094
3095u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3096{
3097        struct scatterlist *sg, *sg_next_iter;
3098        u32 count, dma_desc_cnt;
3099        u64 len, len_next;
3100        dma_addr_t addr, addr_next;
3101
3102        dma_desc_cnt = 0;
3103
3104        for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3105
3106                len = sg_dma_len(sg);
3107                addr = sg_dma_address(sg);
3108
3109                if (len == 0)
3110                        break;
3111
3112                while ((count + 1) < sgt->nents) {
3113                        sg_next_iter = sg_next(sg);
3114                        len_next = sg_dma_len(sg_next_iter);
3115                        addr_next = sg_dma_address(sg_next_iter);
3116
3117                        if (len_next == 0)
3118                                break;
3119
3120                        if ((addr + len == addr_next) &&
3121                                (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3122                                len += len_next;
3123                                count++;
3124                                sg = sg_next_iter;
3125                        } else {
3126                                break;
3127                        }
3128                }
3129
3130                dma_desc_cnt++;
3131        }
3132
3133        return dma_desc_cnt * sizeof(struct packet_lin_dma);
3134}
3135
3136static int goya_pin_memory_before_cs(struct hl_device *hdev,
3137                                struct hl_cs_parser *parser,
3138                                struct packet_lin_dma *user_dma_pkt,
3139                                u64 addr, enum dma_data_direction dir)
3140{
3141        struct hl_userptr *userptr;
3142        int rc;
3143
3144        if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3145                        parser->job_userptr_list, &userptr))
3146                goto already_pinned;
3147
3148        userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3149        if (!userptr)
3150                return -ENOMEM;
3151
3152        rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3153                                userptr);
3154        if (rc)
3155                goto free_userptr;
3156
3157        list_add_tail(&userptr->job_node, parser->job_userptr_list);
3158
3159        rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3160                                        userptr->sgt->nents, dir);
3161        if (rc) {
3162                dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3163                goto unpin_memory;
3164        }
3165
3166        userptr->dma_mapped = true;
3167        userptr->dir = dir;
3168
3169already_pinned:
3170        parser->patched_cb_size +=
3171                        goya_get_dma_desc_list_size(hdev, userptr->sgt);
3172
3173        return 0;
3174
3175unpin_memory:
3176        hl_unpin_host_memory(hdev, userptr);
3177free_userptr:
3178        kfree(userptr);
3179        return rc;
3180}
3181
3182static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3183                                struct hl_cs_parser *parser,
3184                                struct packet_lin_dma *user_dma_pkt)
3185{
3186        u64 device_memory_addr, addr;
3187        enum dma_data_direction dir;
3188        enum goya_dma_direction user_dir;
3189        bool sram_addr = true;
3190        bool skip_host_mem_pin = false;
3191        bool user_memset;
3192        u32 ctl;
3193        int rc = 0;
3194
3195        ctl = le32_to_cpu(user_dma_pkt->ctl);
3196
3197        user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3198                        GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3199
3200        user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3201                        GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3202
3203        switch (user_dir) {
3204        case DMA_HOST_TO_DRAM:
3205                dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3206                dir = DMA_TO_DEVICE;
3207                sram_addr = false;
3208                addr = le64_to_cpu(user_dma_pkt->src_addr);
3209                device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3210                if (user_memset)
3211                        skip_host_mem_pin = true;
3212                break;
3213
3214        case DMA_DRAM_TO_HOST:
3215                dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3216                dir = DMA_FROM_DEVICE;
3217                sram_addr = false;
3218                addr = le64_to_cpu(user_dma_pkt->dst_addr);
3219                device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3220                break;
3221
3222        case DMA_HOST_TO_SRAM:
3223                dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3224                dir = DMA_TO_DEVICE;
3225                addr = le64_to_cpu(user_dma_pkt->src_addr);
3226                device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3227                if (user_memset)
3228                        skip_host_mem_pin = true;
3229                break;
3230
3231        case DMA_SRAM_TO_HOST:
3232                dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3233                dir = DMA_FROM_DEVICE;
3234                addr = le64_to_cpu(user_dma_pkt->dst_addr);
3235                device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3236                break;
3237        default:
3238                dev_err(hdev->dev, "DMA direction is undefined\n");
3239                return -EFAULT;
3240        }
3241
3242        if (sram_addr) {
3243                if (!hl_mem_area_inside_range(device_memory_addr,
3244                                le32_to_cpu(user_dma_pkt->tsize),
3245                                hdev->asic_prop.sram_user_base_address,
3246                                hdev->asic_prop.sram_end_address)) {
3247
3248                        dev_err(hdev->dev,
3249                                "SRAM address 0x%llx + 0x%x is invalid\n",
3250                                device_memory_addr,
3251                                user_dma_pkt->tsize);
3252                        return -EFAULT;
3253                }
3254        } else {
3255                if (!hl_mem_area_inside_range(device_memory_addr,
3256                                le32_to_cpu(user_dma_pkt->tsize),
3257                                hdev->asic_prop.dram_user_base_address,
3258                                hdev->asic_prop.dram_end_address)) {
3259
3260                        dev_err(hdev->dev,
3261                                "DRAM address 0x%llx + 0x%x is invalid\n",
3262                                device_memory_addr,
3263                                user_dma_pkt->tsize);
3264                        return -EFAULT;
3265                }
3266        }
3267
3268        if (skip_host_mem_pin)
3269                parser->patched_cb_size += sizeof(*user_dma_pkt);
3270        else {
3271                if ((dir == DMA_TO_DEVICE) &&
3272                                (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3273                        dev_err(hdev->dev,
3274                                "Can't DMA from host on queue other then 1\n");
3275                        return -EFAULT;
3276                }
3277
3278                rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3279                                                addr, dir);
3280        }
3281
3282        return rc;
3283}
3284
3285static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3286                                struct hl_cs_parser *parser,
3287                                struct packet_lin_dma *user_dma_pkt)
3288{
3289        u64 sram_memory_addr, dram_memory_addr;
3290        enum goya_dma_direction user_dir;
3291        u32 ctl;
3292
3293        ctl = le32_to_cpu(user_dma_pkt->ctl);
3294        user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3295                        GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3296
3297        if (user_dir == DMA_DRAM_TO_SRAM) {
3298                dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3299                dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3300                sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3301        } else {
3302                dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3303                sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3304                dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3305        }
3306
3307        if (!hl_mem_area_inside_range(sram_memory_addr,
3308                                le32_to_cpu(user_dma_pkt->tsize),
3309                                hdev->asic_prop.sram_user_base_address,
3310                                hdev->asic_prop.sram_end_address)) {
3311                dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3312                        sram_memory_addr, user_dma_pkt->tsize);
3313                return -EFAULT;
3314        }
3315
3316        if (!hl_mem_area_inside_range(dram_memory_addr,
3317                                le32_to_cpu(user_dma_pkt->tsize),
3318                                hdev->asic_prop.dram_user_base_address,
3319                                hdev->asic_prop.dram_end_address)) {
3320                dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3321                        dram_memory_addr, user_dma_pkt->tsize);
3322                return -EFAULT;
3323        }
3324
3325        parser->patched_cb_size += sizeof(*user_dma_pkt);
3326
3327        return 0;
3328}
3329
3330static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3331                                struct hl_cs_parser *parser,
3332                                struct packet_lin_dma *user_dma_pkt)
3333{
3334        enum goya_dma_direction user_dir;
3335        u32 ctl;
3336        int rc;
3337
3338        dev_dbg(hdev->dev, "DMA packet details:\n");
3339        dev_dbg(hdev->dev, "source == 0x%llx\n",
3340                le64_to_cpu(user_dma_pkt->src_addr));
3341        dev_dbg(hdev->dev, "destination == 0x%llx\n",
3342                le64_to_cpu(user_dma_pkt->dst_addr));
3343        dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3344
3345        ctl = le32_to_cpu(user_dma_pkt->ctl);
3346        user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3347                        GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3348
3349        /*
3350         * Special handling for DMA with size 0. The H/W has a bug where
3351         * this can cause the QMAN DMA to get stuck, so block it here.
3352         */
3353        if (user_dma_pkt->tsize == 0) {
3354                dev_err(hdev->dev,
3355                        "Got DMA with size 0, might reset the device\n");
3356                return -EINVAL;
3357        }
3358
3359        if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3360                rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3361        else
3362                rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3363
3364        return rc;
3365}
3366
3367static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3368                                struct hl_cs_parser *parser,
3369                                struct packet_lin_dma *user_dma_pkt)
3370{
3371        dev_dbg(hdev->dev, "DMA packet details:\n");
3372        dev_dbg(hdev->dev, "source == 0x%llx\n",
3373                le64_to_cpu(user_dma_pkt->src_addr));
3374        dev_dbg(hdev->dev, "destination == 0x%llx\n",
3375                le64_to_cpu(user_dma_pkt->dst_addr));
3376        dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3377
3378        /*
3379         * WA for HW-23.
3380         * We can't allow user to read from Host using QMANs other than 1.
3381         */
3382        if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
3383                hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3384                                le32_to_cpu(user_dma_pkt->tsize),
3385                                hdev->asic_prop.va_space_host_start_address,
3386                                hdev->asic_prop.va_space_host_end_address)) {
3387                dev_err(hdev->dev,
3388                        "Can't DMA from host on queue other then 1\n");
3389                return -EFAULT;
3390        }
3391
3392        if (user_dma_pkt->tsize == 0) {
3393                dev_err(hdev->dev,
3394                        "Got DMA with size 0, might reset the device\n");
3395                return -EINVAL;
3396        }
3397
3398        parser->patched_cb_size += sizeof(*user_dma_pkt);
3399
3400        return 0;
3401}
3402
3403static int goya_validate_wreg32(struct hl_device *hdev,
3404                                struct hl_cs_parser *parser,
3405                                struct packet_wreg32 *wreg_pkt)
3406{
3407        struct goya_device *goya = hdev->asic_specific;
3408        u32 sob_start_addr, sob_end_addr;
3409        u16 reg_offset;
3410
3411        reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3412                        GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3413
3414        dev_dbg(hdev->dev, "WREG32 packet details:\n");
3415        dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3416        dev_dbg(hdev->dev, "value      == 0x%x\n",
3417                le32_to_cpu(wreg_pkt->value));
3418
3419        if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3420                dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3421                        reg_offset);
3422                return -EPERM;
3423        }
3424
3425        /*
3426         * With MMU, DMA channels are not secured, so it doesn't matter where
3427         * the WR COMP will be written to because it will go out with
3428         * non-secured property
3429         */
3430        if (goya->hw_cap_initialized & HW_CAP_MMU)
3431                return 0;
3432
3433        sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3434        sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3435
3436        if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3437                        (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3438
3439                dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3440                        wreg_pkt->value);
3441                return -EPERM;
3442        }
3443
3444        return 0;
3445}
3446
3447static int goya_validate_cb(struct hl_device *hdev,
3448                        struct hl_cs_parser *parser, bool is_mmu)
3449{
3450        u32 cb_parsed_length = 0;
3451        int rc = 0;
3452
3453        parser->patched_cb_size = 0;
3454
3455        /* cb_user_size is more than 0 so loop will always be executed */
3456        while (cb_parsed_length < parser->user_cb_size) {
3457                enum packet_id pkt_id;
3458                u16 pkt_size;
3459                struct goya_packet *user_pkt;
3460
3461                user_pkt = (struct goya_packet *) (uintptr_t)
3462                        (parser->user_cb->kernel_address + cb_parsed_length);
3463
3464                pkt_id = (enum packet_id) (
3465                                (le64_to_cpu(user_pkt->header) &
3466                                PACKET_HEADER_PACKET_ID_MASK) >>
3467                                        PACKET_HEADER_PACKET_ID_SHIFT);
3468
3469                pkt_size = goya_packet_sizes[pkt_id];
3470                cb_parsed_length += pkt_size;
3471                if (cb_parsed_length > parser->user_cb_size) {
3472                        dev_err(hdev->dev,
3473                                "packet 0x%x is out of CB boundary\n", pkt_id);
3474                        rc = -EINVAL;
3475                        break;
3476                }
3477
3478                switch (pkt_id) {
3479                case PACKET_WREG_32:
3480                        /*
3481                         * Although it is validated after copy in patch_cb(),
3482                         * need to validate here as well because patch_cb() is
3483                         * not called in MMU path while this function is called
3484                         */
3485                        rc = goya_validate_wreg32(hdev,
3486                                parser, (struct packet_wreg32 *) user_pkt);
3487                        break;
3488
3489                case PACKET_WREG_BULK:
3490                        dev_err(hdev->dev,
3491                                "User not allowed to use WREG_BULK\n");
3492                        rc = -EPERM;
3493                        break;
3494
3495                case PACKET_MSG_PROT:
3496                        dev_err(hdev->dev,
3497                                "User not allowed to use MSG_PROT\n");
3498                        rc = -EPERM;
3499                        break;
3500
3501                case PACKET_CP_DMA:
3502                        dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3503                        rc = -EPERM;
3504                        break;
3505
3506                case PACKET_STOP:
3507                        dev_err(hdev->dev, "User not allowed to use STOP\n");
3508                        rc = -EPERM;
3509                        break;
3510
3511                case PACKET_LIN_DMA:
3512                        if (is_mmu)
3513                                rc = goya_validate_dma_pkt_mmu(hdev, parser,
3514                                        (struct packet_lin_dma *) user_pkt);
3515                        else
3516                                rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3517                                        (struct packet_lin_dma *) user_pkt);
3518                        break;
3519
3520                case PACKET_MSG_LONG:
3521                case PACKET_MSG_SHORT:
3522                case PACKET_FENCE:
3523                case PACKET_NOP:
3524                        parser->patched_cb_size += pkt_size;
3525                        break;
3526
3527                default:
3528                        dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3529                                pkt_id);
3530                        rc = -EINVAL;
3531                        break;
3532                }
3533
3534                if (rc)
3535                        break;
3536        }
3537
3538        /*
3539         * The new CB should have space at the end for two MSG_PROT packets:
3540         * 1. A packet that will act as a completion packet
3541         * 2. A packet that will generate MSI-X interrupt
3542         */
3543        parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3544
3545        return rc;
3546}
3547
3548static int goya_patch_dma_packet(struct hl_device *hdev,
3549                                struct hl_cs_parser *parser,
3550                                struct packet_lin_dma *user_dma_pkt,
3551                                struct packet_lin_dma *new_dma_pkt,
3552                                u32 *new_dma_pkt_size)
3553{
3554        struct hl_userptr *userptr;
3555        struct scatterlist *sg, *sg_next_iter;
3556        u32 count, dma_desc_cnt;
3557        u64 len, len_next;
3558        dma_addr_t dma_addr, dma_addr_next;
3559        enum goya_dma_direction user_dir;
3560        u64 device_memory_addr, addr;
3561        enum dma_data_direction dir;
3562        struct sg_table *sgt;
3563        bool skip_host_mem_pin = false;
3564        bool user_memset;
3565        u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3566
3567        ctl = le32_to_cpu(user_dma_pkt->ctl);
3568
3569        user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3570                        GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3571
3572        user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3573                        GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3574
3575        if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3576                        (user_dma_pkt->tsize == 0)) {
3577                memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3578                *new_dma_pkt_size = sizeof(*new_dma_pkt);
3579                return 0;
3580        }
3581
3582        if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3583                addr = le64_to_cpu(user_dma_pkt->src_addr);
3584                device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3585                dir = DMA_TO_DEVICE;
3586                if (user_memset)
3587                        skip_host_mem_pin = true;
3588        } else {
3589                addr = le64_to_cpu(user_dma_pkt->dst_addr);
3590                device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3591                dir = DMA_FROM_DEVICE;
3592        }
3593
3594        if ((!skip_host_mem_pin) &&
3595                (hl_userptr_is_pinned(hdev, addr,
3596                        le32_to_cpu(user_dma_pkt->tsize),
3597                        parser->job_userptr_list, &userptr) == false)) {
3598                dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3599                                addr, user_dma_pkt->tsize);
3600                return -EFAULT;
3601        }
3602
3603        if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3604                memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3605                *new_dma_pkt_size = sizeof(*user_dma_pkt);
3606                return 0;
3607        }
3608
3609        user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3610
3611        user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3612
3613        sgt = userptr->sgt;
3614        dma_desc_cnt = 0;
3615
3616        for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3617                len = sg_dma_len(sg);
3618                dma_addr = sg_dma_address(sg);
3619
3620                if (len == 0)
3621                        break;
3622
3623                while ((count + 1) < sgt->nents) {
3624                        sg_next_iter = sg_next(sg);
3625                        len_next = sg_dma_len(sg_next_iter);
3626                        dma_addr_next = sg_dma_address(sg_next_iter);
3627
3628                        if (len_next == 0)
3629                                break;
3630
3631                        if ((dma_addr + len == dma_addr_next) &&
3632                                (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3633                                len += len_next;
3634                                count++;
3635                                sg = sg_next_iter;
3636                        } else {
3637                                break;
3638                        }
3639                }
3640
3641                ctl = le32_to_cpu(user_dma_pkt->ctl);
3642                if (likely(dma_desc_cnt))
3643                        ctl &= ~GOYA_PKT_CTL_EB_MASK;
3644                ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3645                                GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3646                new_dma_pkt->ctl = cpu_to_le32(ctl);
3647                new_dma_pkt->tsize = cpu_to_le32((u32) len);
3648
3649                if (dir == DMA_TO_DEVICE) {
3650                        new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3651                        new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
3652                } else {
3653                        new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3654                        new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
3655                }
3656
3657                if (!user_memset)
3658                        device_memory_addr += len;
3659                dma_desc_cnt++;
3660                new_dma_pkt++;
3661        }
3662
3663        if (!dma_desc_cnt) {
3664                dev_err(hdev->dev,
3665                        "Error of 0 SG entries when patching DMA packet\n");
3666                return -EFAULT;
3667        }
3668
3669        /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3670        new_dma_pkt--;
3671        new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
3672
3673        *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3674
3675        return 0;
3676}
3677
3678static int goya_patch_cb(struct hl_device *hdev,
3679                                struct hl_cs_parser *parser)
3680{
3681        u32 cb_parsed_length = 0;
3682        u32 cb_patched_cur_length = 0;
3683        int rc = 0;
3684
3685        /* cb_user_size is more than 0 so loop will always be executed */
3686        while (cb_parsed_length < parser->user_cb_size) {
3687                enum packet_id pkt_id;
3688                u16 pkt_size;
3689                u32 new_pkt_size = 0;
3690                struct goya_packet *user_pkt, *kernel_pkt;
3691
3692                user_pkt = (struct goya_packet *) (uintptr_t)
3693                        (parser->user_cb->kernel_address + cb_parsed_length);
3694                kernel_pkt = (struct goya_packet *) (uintptr_t)
3695                        (parser->patched_cb->kernel_address +
3696                                        cb_patched_cur_length);
3697
3698                pkt_id = (enum packet_id) (
3699                                (le64_to_cpu(user_pkt->header) &
3700                                PACKET_HEADER_PACKET_ID_MASK) >>
3701                                        PACKET_HEADER_PACKET_ID_SHIFT);
3702
3703                pkt_size = goya_packet_sizes[pkt_id];
3704                cb_parsed_length += pkt_size;
3705                if (cb_parsed_length > parser->user_cb_size) {
3706                        dev_err(hdev->dev,
3707                                "packet 0x%x is out of CB boundary\n", pkt_id);
3708                        rc = -EINVAL;
3709                        break;
3710                }
3711
3712                switch (pkt_id) {
3713                case PACKET_LIN_DMA:
3714                        rc = goya_patch_dma_packet(hdev, parser,
3715                                        (struct packet_lin_dma *) user_pkt,
3716                                        (struct packet_lin_dma *) kernel_pkt,
3717                                        &new_pkt_size);
3718                        cb_patched_cur_length += new_pkt_size;
3719                        break;
3720
3721                case PACKET_WREG_32:
3722                        memcpy(kernel_pkt, user_pkt, pkt_size);
3723                        cb_patched_cur_length += pkt_size;
3724                        rc = goya_validate_wreg32(hdev, parser,
3725                                        (struct packet_wreg32 *) kernel_pkt);
3726                        break;
3727
3728                case PACKET_WREG_BULK:
3729                        dev_err(hdev->dev,
3730                                "User not allowed to use WREG_BULK\n");
3731                        rc = -EPERM;
3732                        break;
3733
3734                case PACKET_MSG_PROT:
3735                        dev_err(hdev->dev,
3736                                "User not allowed to use MSG_PROT\n");
3737                        rc = -EPERM;
3738                        break;
3739
3740                case PACKET_CP_DMA:
3741                        dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3742                        rc = -EPERM;
3743                        break;
3744
3745                case PACKET_STOP:
3746                        dev_err(hdev->dev, "User not allowed to use STOP\n");
3747                        rc = -EPERM;
3748                        break;
3749
3750                case PACKET_MSG_LONG:
3751                case PACKET_MSG_SHORT:
3752                case PACKET_FENCE:
3753                case PACKET_NOP:
3754                        memcpy(kernel_pkt, user_pkt, pkt_size);
3755                        cb_patched_cur_length += pkt_size;
3756                        break;
3757
3758                default:
3759                        dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3760                                pkt_id);
3761                        rc = -EINVAL;
3762                        break;
3763                }
3764
3765                if (rc)
3766                        break;
3767        }
3768
3769        return rc;
3770}
3771
3772static int goya_parse_cb_mmu(struct hl_device *hdev,
3773                struct hl_cs_parser *parser)
3774{
3775        u64 patched_cb_handle;
3776        u32 patched_cb_size;
3777        struct hl_cb *user_cb;
3778        int rc;
3779
3780        /*
3781         * The new CB should have space at the end for two MSG_PROT pkt:
3782         * 1. A packet that will act as a completion packet
3783         * 2. A packet that will generate MSI-X interrupt
3784         */
3785        parser->patched_cb_size = parser->user_cb_size +
3786                        sizeof(struct packet_msg_prot) * 2;
3787
3788        rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3789                                parser->patched_cb_size,
3790                                &patched_cb_handle, HL_KERNEL_ASID_ID);
3791
3792        if (rc) {
3793                dev_err(hdev->dev,
3794                        "Failed to allocate patched CB for DMA CS %d\n",
3795                        rc);
3796                return rc;
3797        }
3798
3799        patched_cb_handle >>= PAGE_SHIFT;
3800        parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3801                                (u32) patched_cb_handle);
3802        /* hl_cb_get should never fail here so use kernel WARN */
3803        WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3804                        (u32) patched_cb_handle);
3805        if (!parser->patched_cb) {
3806                rc = -EFAULT;
3807                goto out;
3808        }
3809
3810        /*
3811         * The check that parser->user_cb_size <= parser->user_cb->size was done
3812         * in validate_queue_index().
3813         */
3814        memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
3815                (void *) (uintptr_t) parser->user_cb->kernel_address,
3816                parser->user_cb_size);
3817
3818        patched_cb_size = parser->patched_cb_size;
3819
3820        /* validate patched CB instead of user CB */
3821        user_cb = parser->user_cb;
3822        parser->user_cb = parser->patched_cb;
3823        rc = goya_validate_cb(hdev, parser, true);
3824        parser->user_cb = user_cb;
3825
3826        if (rc) {
3827                hl_cb_put(parser->patched_cb);
3828                goto out;
3829        }
3830
3831        if (patched_cb_size != parser->patched_cb_size) {
3832                dev_err(hdev->dev, "user CB size mismatch\n");
3833                hl_cb_put(parser->patched_cb);
3834                rc = -EINVAL;
3835                goto out;
3836        }
3837
3838out:
3839        /*
3840         * Always call cb destroy here because we still have 1 reference
3841         * to it by calling cb_get earlier. After the job will be completed,
3842         * cb_put will release it, but here we want to remove it from the
3843         * idr
3844         */
3845        hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3846                                        patched_cb_handle << PAGE_SHIFT);
3847
3848        return rc;
3849}
3850
3851static int goya_parse_cb_no_mmu(struct hl_device *hdev,
3852                                struct hl_cs_parser *parser)
3853{
3854        u64 patched_cb_handle;
3855        int rc;
3856
3857        rc = goya_validate_cb(hdev, parser, false);
3858
3859        if (rc)
3860                goto free_userptr;
3861
3862        rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3863                                parser->patched_cb_size,
3864                                &patched_cb_handle, HL_KERNEL_ASID_ID);
3865        if (rc) {
3866                dev_err(hdev->dev,
3867                        "Failed to allocate patched CB for DMA CS %d\n", rc);
3868                goto free_userptr;
3869        }
3870
3871        patched_cb_handle >>= PAGE_SHIFT;
3872        parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3873                                (u32) patched_cb_handle);
3874        /* hl_cb_get should never fail here so use kernel WARN */
3875        WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3876                        (u32) patched_cb_handle);
3877        if (!parser->patched_cb) {
3878                rc = -EFAULT;
3879                goto out;
3880        }
3881
3882        rc = goya_patch_cb(hdev, parser);
3883
3884        if (rc)
3885                hl_cb_put(parser->patched_cb);
3886
3887out:
3888        /*
3889         * Always call cb destroy here because we still have 1 reference
3890         * to it by calling cb_get earlier. After the job will be completed,
3891         * cb_put will release it, but here we want to remove it from the
3892         * idr
3893         */
3894        hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3895                                patched_cb_handle << PAGE_SHIFT);
3896
3897free_userptr:
3898        if (rc)
3899                hl_userptr_delete_list(hdev, parser->job_userptr_list);
3900        return rc;
3901}
3902
3903static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
3904                                        struct hl_cs_parser *parser)
3905{
3906        struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
3907        struct goya_device *goya = hdev->asic_specific;
3908
3909        if (goya->hw_cap_initialized & HW_CAP_MMU)
3910                return 0;
3911
3912        /* For internal queue jobs, just check if CB address is valid */
3913        if (hl_mem_area_inside_range(
3914                        (u64) (uintptr_t) parser->user_cb,
3915                        parser->user_cb_size,
3916                        asic_prop->sram_user_base_address,
3917                        asic_prop->sram_end_address))
3918                return 0;
3919
3920        if (hl_mem_area_inside_range(
3921                        (u64) (uintptr_t) parser->user_cb,
3922                        parser->user_cb_size,
3923                        asic_prop->dram_user_base_address,
3924                        asic_prop->dram_end_address))
3925                return 0;
3926
3927        dev_err(hdev->dev,
3928                "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
3929                parser->user_cb, parser->user_cb_size);
3930
3931        return -EFAULT;
3932}
3933
3934int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
3935{
3936        struct goya_device *goya = hdev->asic_specific;
3937
3938        if (!parser->ext_queue)
3939                return goya_parse_cb_no_ext_queue(hdev, parser);
3940
3941        if (goya->hw_cap_initialized & HW_CAP_MMU)
3942                return goya_parse_cb_mmu(hdev, parser);
3943        else
3944                return goya_parse_cb_no_mmu(hdev, parser);
3945}
3946
3947void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
3948                                u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec)
3949{
3950        struct packet_msg_prot *cq_pkt;
3951        u32 tmp;
3952
3953        cq_pkt = (struct packet_msg_prot *) (uintptr_t)
3954                (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
3955
3956        tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3957                        (1 << GOYA_PKT_CTL_EB_SHIFT) |
3958                        (1 << GOYA_PKT_CTL_MB_SHIFT);
3959        cq_pkt->ctl = cpu_to_le32(tmp);
3960        cq_pkt->value = cpu_to_le32(cq_val);
3961        cq_pkt->addr = cpu_to_le64(cq_addr);
3962
3963        cq_pkt++;
3964
3965        tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3966                        (1 << GOYA_PKT_CTL_MB_SHIFT);
3967        cq_pkt->ctl = cpu_to_le32(tmp);
3968        cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
3969        cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
3970}
3971
3972void goya_update_eq_ci(struct hl_device *hdev, u32 val)
3973{
3974        WREG32(mmCPU_EQ_CI, val);
3975}
3976
3977void goya_restore_phase_topology(struct hl_device *hdev)
3978{
3979
3980}
3981
3982static void goya_clear_sm_regs(struct hl_device *hdev)
3983{
3984        int i, num_of_sob_in_longs, num_of_mon_in_longs;
3985
3986        num_of_sob_in_longs =
3987                ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
3988
3989        num_of_mon_in_longs =
3990                ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
3991
3992        for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
3993                WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
3994
3995        for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
3996                WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
3997
3998        /* Flush all WREG to prevent race */
3999        i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4000}
4001
4002/*
4003 * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
4004 *                       address.
4005 *
4006 * @hdev:       pointer to hl_device structure
4007 * @addr:       device or host mapped address
4008 * @val:        returned value
4009 *
4010 * In case of DDR address that is not mapped into the default aperture that
4011 * the DDR bar exposes, the function will configure the iATU so that the DDR
4012 * bar will be positioned at a base address that allows reading from the
4013 * required address. Configuring the iATU during normal operation can
4014 * lead to undefined behavior and therefore, should be done with extreme care
4015 *
4016 */
4017static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4018{
4019        struct asic_fixed_properties *prop = &hdev->asic_prop;
4020        u64 ddr_bar_addr;
4021        int rc = 0;
4022
4023        if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4024                *val = RREG32(addr - CFG_BASE);
4025
4026        } else if ((addr >= SRAM_BASE_ADDR) &&
4027                        (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4028
4029                *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4030                                (addr - SRAM_BASE_ADDR));
4031
4032        } else if ((addr >= DRAM_PHYS_BASE) &&
4033                        (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4034
4035                u64 bar_base_addr = DRAM_PHYS_BASE +
4036                                (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4037
4038                ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4039                if (ddr_bar_addr != U64_MAX) {
4040                        *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4041                                                (addr - bar_base_addr));
4042
4043                        ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4044                                                        ddr_bar_addr);
4045                }
4046                if (ddr_bar_addr == U64_MAX)
4047                        rc = -EIO;
4048
4049        } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4050                *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
4051
4052        } else {
4053                rc = -EFAULT;
4054        }
4055
4056        return rc;
4057}
4058
4059/*
4060 * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
4061 *                        address.
4062 *
4063 * @hdev:       pointer to hl_device structure
4064 * @addr:       device or host mapped address
4065 * @val:        returned value
4066 *
4067 * In case of DDR address that is not mapped into the default aperture that
4068 * the DDR bar exposes, the function will configure the iATU so that the DDR
4069 * bar will be positioned at a base address that allows writing to the
4070 * required address. Configuring the iATU during normal operation can
4071 * lead to undefined behavior and therefore, should be done with extreme care
4072 *
4073 */
4074static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4075{
4076        struct asic_fixed_properties *prop = &hdev->asic_prop;
4077        u64 ddr_bar_addr;
4078        int rc = 0;
4079
4080        if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4081                WREG32(addr - CFG_BASE, val);
4082
4083        } else if ((addr >= SRAM_BASE_ADDR) &&
4084                        (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4085
4086                writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4087                                        (addr - SRAM_BASE_ADDR));
4088
4089        } else if ((addr >= DRAM_PHYS_BASE) &&
4090                        (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4091
4092                u64 bar_base_addr = DRAM_PHYS_BASE +
4093                                (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4094
4095                ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4096                if (ddr_bar_addr != U64_MAX) {
4097                        writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4098                                                (addr - bar_base_addr));
4099
4100                        ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4101                                                        ddr_bar_addr);
4102                }
4103                if (ddr_bar_addr == U64_MAX)
4104                        rc = -EIO;
4105
4106        } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4107                *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4108
4109        } else {
4110                rc = -EFAULT;
4111        }
4112
4113        return rc;
4114}
4115
4116static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4117{
4118        struct goya_device *goya = hdev->asic_specific;
4119
4120        if (hdev->hard_reset_pending)
4121                return U64_MAX;
4122
4123        return readq(hdev->pcie_bar[DDR_BAR_ID] +
4124                        (addr - goya->ddr_bar_cur_addr));
4125}
4126
4127static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4128{
4129        struct goya_device *goya = hdev->asic_specific;
4130
4131        if (hdev->hard_reset_pending)
4132                return;
4133
4134        writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4135                        (addr - goya->ddr_bar_cur_addr));
4136}
4137
4138static const char *_goya_get_event_desc(u16 event_type)
4139{
4140        switch (event_type) {
4141        case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4142                return "PCIe_if";
4143        case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4144        case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4145        case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4146        case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4147        case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4148        case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4149        case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4150        case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4151                return "TPC%d_ecc";
4152        case GOYA_ASYNC_EVENT_ID_MME_ECC:
4153                return "MME_ecc";
4154        case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4155                return "MME_ecc_ext";
4156        case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4157                return "MMU_ecc";
4158        case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4159                return "DMA_macro";
4160        case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4161                return "DMA_ecc";
4162        case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4163                return "CPU_if_ecc";
4164        case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4165                return "PSOC_mem";
4166        case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4167                return "PSOC_coresight";
4168        case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4169                return "SRAM%d";
4170        case GOYA_ASYNC_EVENT_ID_GIC500:
4171                return "GIC500";
4172        case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4173                return "PLL%d";
4174        case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4175                return "AXI_ecc";
4176        case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4177                return "L2_ram_ecc";
4178        case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4179                return "PSOC_gpio_05_sw_reset";
4180        case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4181                return "PSOC_gpio_10_vrhot_icrit";
4182        case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4183                return "PCIe_dec";
4184        case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4185        case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4186        case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4187        case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4188        case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4189        case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4190        case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4191        case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4192                return "TPC%d_dec";
4193        case GOYA_ASYNC_EVENT_ID_MME_WACS:
4194                return "MME_wacs";
4195        case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4196                return "MME_wacsd";
4197        case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4198                return "CPU_axi_splitter";
4199        case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4200                return "PSOC_axi_dec";
4201        case GOYA_ASYNC_EVENT_ID_PSOC:
4202                return "PSOC";
4203        case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4204        case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4205        case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4206        case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4207        case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4208        case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4209        case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4210        case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4211                return "TPC%d_krn_err";
4212        case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4213                return "TPC%d_cq";
4214        case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4215                return "TPC%d_qm";
4216        case GOYA_ASYNC_EVENT_ID_MME_QM:
4217                return "MME_qm";
4218        case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4219                return "MME_cq";
4220        case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4221                return "DMA%d_qm";
4222        case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4223                return "DMA%d_ch";
4224        case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4225        case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4226        case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4227        case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4228        case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4229        case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4230        case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4231        case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4232                return "TPC%d_bmon_spmu";
4233        case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4234                return "DMA_bm_ch%d";
4235        default:
4236                return "N/A";
4237        }
4238}
4239
4240static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4241{
4242        u8 index;
4243
4244        switch (event_type) {
4245        case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4246        case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4247        case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4248        case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4249        case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4250        case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4251        case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4252        case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4253                index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
4254                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4255                break;
4256        case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4257                index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
4258                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4259                break;
4260        case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4261                index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
4262                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4263                break;
4264        case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4265        case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4266        case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4267        case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4268        case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4269        case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4270        case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4271        case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4272                index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4273                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4274                break;
4275        case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4276        case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4277        case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4278        case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4279        case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4280        case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4281        case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4282        case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4283                index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4284                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4285                break;
4286        case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4287                index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4288                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4289                break;
4290        case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4291                index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4292                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4293                break;
4294        case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4295                index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4296                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4297                break;
4298        case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4299                index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4300                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4301                break;
4302        case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4303        case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4304        case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4305        case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4306        case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4307        case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4308        case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4309        case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4310                index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
4311                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4312                break;
4313        case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4314                index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
4315                snprintf(desc, size, _goya_get_event_desc(event_type), index);
4316                break;
4317        default:
4318                snprintf(desc, size, _goya_get_event_desc(event_type));
4319                break;
4320        }
4321}
4322
4323static void goya_print_razwi_info(struct hl_device *hdev)
4324{
4325        if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4326                dev_err(hdev->dev, "Illegal write to LBW\n");
4327                WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4328        }
4329
4330        if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4331                dev_err(hdev->dev, "Illegal read from LBW\n");
4332                WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4333        }
4334
4335        if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4336                dev_err(hdev->dev, "Illegal write to HBW\n");
4337                WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4338        }
4339
4340        if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4341                dev_err(hdev->dev, "Illegal read from HBW\n");
4342                WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4343        }
4344}
4345
4346static void goya_print_mmu_error_info(struct hl_device *hdev)
4347{
4348        struct goya_device *goya = hdev->asic_specific;
4349        u64 addr;
4350        u32 val;
4351
4352        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4353                return;
4354
4355        val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4356        if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4357                addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4358                addr <<= 32;
4359                addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4360
4361                dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4362
4363                WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4364        }
4365}
4366
4367static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
4368                                bool razwi)
4369{
4370        char desc[20] = "";
4371
4372        goya_get_event_desc(event_type, desc, sizeof(desc));
4373        dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4374                event_type, desc);
4375
4376        if (razwi) {
4377                goya_print_razwi_info(hdev);
4378                goya_print_mmu_error_info(hdev);
4379        }
4380}
4381
4382static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4383                size_t irq_arr_size)
4384{
4385        struct armcp_unmask_irq_arr_packet *pkt;
4386        size_t total_pkt_size;
4387        long result;
4388        int rc;
4389        int irq_num_entries, irq_arr_index;
4390        __le32 *goya_irq_arr;
4391
4392        total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4393                        irq_arr_size;
4394
4395        /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4396        total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4397
4398        /* total_pkt_size is casted to u16 later on */
4399        if (total_pkt_size > USHRT_MAX) {
4400                dev_err(hdev->dev, "too many elements in IRQ array\n");
4401                return -EINVAL;
4402        }
4403
4404        pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4405        if (!pkt)
4406                return -ENOMEM;
4407
4408        irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4409        pkt->length = cpu_to_le32(irq_num_entries);
4410
4411        /* We must perform any necessary endianness conversation on the irq
4412         * array being passed to the goya hardware
4413         */
4414        for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4415                        irq_arr_index < irq_num_entries ; irq_arr_index++)
4416                goya_irq_arr[irq_arr_index] =
4417                                cpu_to_le32(irq_arr[irq_arr_index]);
4418
4419        pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4420                                                ARMCP_PKT_CTL_OPCODE_SHIFT);
4421
4422        rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
4423                        HL_DEVICE_TIMEOUT_USEC, &result);
4424
4425        if (rc)
4426                dev_err(hdev->dev, "failed to unmask IRQ array\n");
4427
4428        kfree(pkt);
4429
4430        return rc;
4431}
4432
4433static int goya_soft_reset_late_init(struct hl_device *hdev)
4434{
4435        /*
4436         * Unmask all IRQs since some could have been received
4437         * during the soft reset
4438         */
4439        return goya_unmask_irq_arr(hdev, goya_all_events,
4440                                        sizeof(goya_all_events));
4441}
4442
4443static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4444{
4445        struct armcp_packet pkt;
4446        long result;
4447        int rc;
4448
4449        memset(&pkt, 0, sizeof(pkt));
4450
4451        pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
4452                                ARMCP_PKT_CTL_OPCODE_SHIFT);
4453        pkt.value = cpu_to_le64(event_type);
4454
4455        rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4456                        HL_DEVICE_TIMEOUT_USEC, &result);
4457
4458        if (rc)
4459                dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4460
4461        return rc;
4462}
4463
4464void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4465{
4466        u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4467        u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4468                                >> EQ_CTL_EVENT_TYPE_SHIFT);
4469        struct goya_device *goya = hdev->asic_specific;
4470
4471        goya->events_stat[event_type]++;
4472        goya->events_stat_aggregate[event_type]++;
4473
4474        switch (event_type) {
4475        case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4476        case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4477        case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4478        case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4479        case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4480        case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4481        case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4482        case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4483        case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4484        case GOYA_ASYNC_EVENT_ID_MME_ECC:
4485        case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4486        case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4487        case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4488        case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4489        case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4490        case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4491        case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4492        case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4493        case GOYA_ASYNC_EVENT_ID_GIC500:
4494        case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4495        case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4496        case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4497        case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4498                goya_print_irq_info(hdev, event_type, false);
4499                hl_device_reset(hdev, true, false);
4500                break;
4501
4502        case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4503        case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4504        case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4505        case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4506        case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4507        case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4508        case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4509        case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4510        case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4511        case GOYA_ASYNC_EVENT_ID_MME_WACS:
4512        case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4513        case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4514        case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4515        case GOYA_ASYNC_EVENT_ID_PSOC:
4516        case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4517        case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4518        case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4519        case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4520        case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4521        case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4522        case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4523        case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4524        case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4525        case GOYA_ASYNC_EVENT_ID_MME_QM:
4526        case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4527        case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4528        case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4529                goya_print_irq_info(hdev, event_type, true);
4530                goya_unmask_irq(hdev, event_type);
4531                break;
4532
4533        case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4534        case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4535        case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4536        case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4537        case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4538        case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4539        case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4540        case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4541        case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4542        case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4543                goya_print_irq_info(hdev, event_type, false);
4544                goya_unmask_irq(hdev, event_type);
4545                break;
4546
4547        default:
4548                dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4549                                event_type);
4550                break;
4551        }
4552}
4553
4554void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
4555{
4556        struct goya_device *goya = hdev->asic_specific;
4557
4558        if (aggregate) {
4559                *size = (u32) sizeof(goya->events_stat_aggregate);
4560                return goya->events_stat_aggregate;
4561        }
4562
4563        *size = (u32) sizeof(goya->events_stat);
4564        return goya->events_stat;
4565}
4566
4567static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
4568                                u64 val, bool is_dram)
4569{
4570        struct packet_lin_dma *lin_dma_pkt;
4571        struct hl_cs_job *job;
4572        u32 cb_size, ctl;
4573        struct hl_cb *cb;
4574        int rc, lin_dma_pkts_cnt;
4575
4576        lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
4577        cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
4578                                                sizeof(struct packet_msg_prot);
4579        cb = hl_cb_kernel_create(hdev, cb_size);
4580        if (!cb)
4581                return -ENOMEM;
4582
4583        lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
4584
4585        do {
4586                memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4587
4588                ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4589                                (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4590                                (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4591                                (1 << GOYA_PKT_CTL_RB_SHIFT) |
4592                                (1 << GOYA_PKT_CTL_MB_SHIFT));
4593                ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4594                                GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4595                lin_dma_pkt->ctl = cpu_to_le32(ctl);
4596
4597                lin_dma_pkt->src_addr = cpu_to_le64(val);
4598                lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4599                if (lin_dma_pkts_cnt > 1)
4600                        lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
4601                else
4602                        lin_dma_pkt->tsize = cpu_to_le32(size);
4603
4604                size -= SZ_2G;
4605                addr += SZ_2G;
4606                lin_dma_pkt++;
4607        } while (--lin_dma_pkts_cnt);
4608
4609        job = hl_cs_allocate_job(hdev, true);
4610        if (!job) {
4611                dev_err(hdev->dev, "Failed to allocate a new job\n");
4612                rc = -ENOMEM;
4613                goto release_cb;
4614        }
4615
4616        job->id = 0;
4617        job->user_cb = cb;
4618        job->user_cb->cs_cnt++;
4619        job->user_cb_size = cb_size;
4620        job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4621        job->patched_cb = job->user_cb;
4622        job->job_cb_size = job->user_cb_size;
4623
4624        hl_debugfs_add_job(hdev, job);
4625
4626        rc = goya_send_job_on_qman0(hdev, job);
4627
4628        hl_cb_put(job->patched_cb);
4629
4630        hl_debugfs_remove_job(hdev, job);
4631        kfree(job);
4632        cb->cs_cnt--;
4633
4634release_cb:
4635        hl_cb_put(cb);
4636        hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4637
4638        return rc;
4639}
4640
4641int goya_context_switch(struct hl_device *hdev, u32 asid)
4642{
4643        struct asic_fixed_properties *prop = &hdev->asic_prop;
4644        u64 addr = prop->sram_base_address, sob_addr;
4645        u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4646        u64 val = 0x7777777777777777ull;
4647        int rc, dma_id;
4648        u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
4649                                        mmDMA_CH_0_WR_COMP_ADDR_LO;
4650
4651        rc = goya_memset_device_memory(hdev, addr, size, val, false);
4652        if (rc) {
4653                dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4654                return rc;
4655        }
4656
4657        /* we need to reset registers that the user is allowed to change */
4658        sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
4659        WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
4660
4661        for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
4662                sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
4663                                                        (dma_id - 1) * 4;
4664                WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
4665                                                lower_32_bits(sob_addr));
4666        }
4667
4668        WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4669
4670        goya_mmu_prepare(hdev, asid);
4671
4672        goya_clear_sm_regs(hdev);
4673
4674        return 0;
4675}
4676
4677static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4678{
4679        struct asic_fixed_properties *prop = &hdev->asic_prop;
4680        struct goya_device *goya = hdev->asic_specific;
4681        u64 addr = prop->mmu_pgt_addr;
4682        u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4683                        MMU_CACHE_MNG_SIZE;
4684
4685        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4686                return 0;
4687
4688        return goya_memset_device_memory(hdev, addr, size, 0, true);
4689}
4690
4691static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4692{
4693        struct goya_device *goya = hdev->asic_specific;
4694        u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4695        u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4696        u64 val = 0x9999999999999999ull;
4697
4698        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4699                return 0;
4700
4701        return goya_memset_device_memory(hdev, addr, size, val, true);
4702}
4703
4704static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
4705{
4706        struct asic_fixed_properties *prop = &hdev->asic_prop;
4707        struct goya_device *goya = hdev->asic_specific;
4708        s64 off, cpu_off;
4709        int rc;
4710
4711        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4712                return 0;
4713
4714        for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
4715                rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
4716                                prop->dram_base_address + off, PAGE_SIZE_2MB);
4717                if (rc) {
4718                        dev_err(hdev->dev, "Map failed for address 0x%llx\n",
4719                                prop->dram_base_address + off);
4720                        goto unmap;
4721                }
4722        }
4723
4724        if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4725                rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4726                        hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB);
4727
4728                if (rc) {
4729                        dev_err(hdev->dev,
4730                                "Map failed for CPU accessible memory\n");
4731                        off -= PAGE_SIZE_2MB;
4732                        goto unmap;
4733                }
4734        } else {
4735                for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
4736                        rc = hl_mmu_map(hdev->kernel_ctx,
4737                                VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4738                                hdev->cpu_accessible_dma_address + cpu_off,
4739                                PAGE_SIZE_4KB);
4740                        if (rc) {
4741                                dev_err(hdev->dev,
4742                                        "Map failed for CPU accessible memory\n");
4743                                cpu_off -= PAGE_SIZE_4KB;
4744                                goto unmap_cpu;
4745                        }
4746                }
4747        }
4748
4749        goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
4750        goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
4751        WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
4752        WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
4753
4754        /* Make sure configuration is flushed to device */
4755        RREG32(mmCPU_IF_AWUSER_OVR_EN);
4756
4757        goya->device_cpu_mmu_mappings_done = true;
4758
4759        return 0;
4760
4761unmap_cpu:
4762        for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
4763                if (hl_mmu_unmap(hdev->kernel_ctx,
4764                                VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4765                                PAGE_SIZE_4KB))
4766                        dev_warn_ratelimited(hdev->dev,
4767                                "failed to unmap address 0x%llx\n",
4768                                VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4769unmap:
4770        for (; off >= 0 ; off -= PAGE_SIZE_2MB)
4771                if (hl_mmu_unmap(hdev->kernel_ctx,
4772                                prop->dram_base_address + off, PAGE_SIZE_2MB))
4773                        dev_warn_ratelimited(hdev->dev,
4774                                "failed to unmap address 0x%llx\n",
4775                                prop->dram_base_address + off);
4776
4777        return rc;
4778}
4779
4780void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
4781{
4782        struct asic_fixed_properties *prop = &hdev->asic_prop;
4783        struct goya_device *goya = hdev->asic_specific;
4784        u32 off, cpu_off;
4785
4786        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4787                return;
4788
4789        if (!goya->device_cpu_mmu_mappings_done)
4790                return;
4791
4792        WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
4793        WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
4794
4795        if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4796                if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4797                                PAGE_SIZE_2MB))
4798                        dev_warn(hdev->dev,
4799                                "Failed to unmap CPU accessible memory\n");
4800        } else {
4801                for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
4802                        if (hl_mmu_unmap(hdev->kernel_ctx,
4803                                        VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4804                                        PAGE_SIZE_4KB))
4805                                dev_warn_ratelimited(hdev->dev,
4806                                        "failed to unmap address 0x%llx\n",
4807                                        VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4808        }
4809
4810        for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
4811                if (hl_mmu_unmap(hdev->kernel_ctx,
4812                                prop->dram_base_address + off, PAGE_SIZE_2MB))
4813                        dev_warn_ratelimited(hdev->dev,
4814                                        "Failed to unmap address 0x%llx\n",
4815                                        prop->dram_base_address + off);
4816
4817        goya->device_cpu_mmu_mappings_done = false;
4818}
4819
4820static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4821{
4822        struct goya_device *goya = hdev->asic_specific;
4823        int i;
4824
4825        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4826                return;
4827
4828        if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4829                WARN(1, "asid %u is too big\n", asid);
4830                return;
4831        }
4832
4833        /* zero the MMBP and ASID bits and then set the ASID */
4834        for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
4835                goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
4836}
4837
4838static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4839{
4840        struct goya_device *goya = hdev->asic_specific;
4841        u32 status, timeout_usec;
4842        int rc;
4843
4844        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4845                return;
4846
4847        /* no need in L1 only invalidation in Goya */
4848        if (!is_hard)
4849                return;
4850
4851        if (hdev->pldm)
4852                timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4853        else
4854                timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4855
4856        mutex_lock(&hdev->mmu_cache_lock);
4857
4858        /* L0 & L1 invalidation */
4859        WREG32(mmSTLB_INV_ALL_START, 1);
4860
4861        rc = hl_poll_timeout(
4862                hdev,
4863                mmSTLB_INV_ALL_START,
4864                status,
4865                !status,
4866                1000,
4867                timeout_usec);
4868
4869        mutex_unlock(&hdev->mmu_cache_lock);
4870
4871        if (rc)
4872                dev_notice_ratelimited(hdev->dev,
4873                        "Timeout when waiting for MMU cache invalidation\n");
4874}
4875
4876static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4877                bool is_hard, u32 asid, u64 va, u64 size)
4878{
4879        struct goya_device *goya = hdev->asic_specific;
4880        u32 status, timeout_usec, inv_data, pi;
4881        int rc;
4882
4883        if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4884                return;
4885
4886        /* no need in L1 only invalidation in Goya */
4887        if (!is_hard)
4888                return;
4889
4890        if (hdev->pldm)
4891                timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4892        else
4893                timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4894
4895        mutex_lock(&hdev->mmu_cache_lock);
4896
4897        /*
4898         * TODO: currently invalidate entire L0 & L1 as in regular hard
4899         * invalidation. Need to apply invalidation of specific cache lines with
4900         * mask of ASID & VA & size.
4901         * Note that L1 with be flushed entirely in any case.
4902         */
4903
4904        /* L0 & L1 invalidation */
4905        inv_data = RREG32(mmSTLB_CACHE_INV);
4906        /* PI is 8 bit */
4907        pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
4908        WREG32(mmSTLB_CACHE_INV,
4909                        (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
4910
4911        rc = hl_poll_timeout(
4912                hdev,
4913                mmSTLB_INV_CONSUMER_INDEX,
4914                status,
4915                status == pi,
4916                1000,
4917                timeout_usec);
4918
4919        mutex_unlock(&hdev->mmu_cache_lock);
4920
4921        if (rc)
4922                dev_notice_ratelimited(hdev->dev,
4923                        "Timeout when waiting for MMU cache invalidation\n");
4924}
4925
4926int goya_send_heartbeat(struct hl_device *hdev)
4927{
4928        struct goya_device *goya = hdev->asic_specific;
4929
4930        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4931                return 0;
4932
4933        return hl_fw_send_heartbeat(hdev);
4934}
4935
4936int goya_armcp_info_get(struct hl_device *hdev)
4937{
4938        struct goya_device *goya = hdev->asic_specific;
4939        struct asic_fixed_properties *prop = &hdev->asic_prop;
4940        u64 dram_size;
4941        int rc;
4942
4943        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4944                return 0;
4945
4946        rc = hl_fw_armcp_info_get(hdev);
4947        if (rc)
4948                return rc;
4949
4950        dram_size = le64_to_cpu(prop->armcp_info.dram_size);
4951        if (dram_size) {
4952                if ((!is_power_of_2(dram_size)) ||
4953                                (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
4954                        dev_err(hdev->dev,
4955                                "F/W reported invalid DRAM size %llu. Trying to use default size\n",
4956                                dram_size);
4957                        dram_size = DRAM_PHYS_DEFAULT_SIZE;
4958                }
4959
4960                prop->dram_size = dram_size;
4961                prop->dram_end_address = prop->dram_base_address + dram_size;
4962        }
4963
4964        if (!strlen(prop->armcp_info.card_name))
4965                strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
4966                                CARD_NAME_MAX_LEN);
4967
4968        return 0;
4969}
4970
4971static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
4972                                struct seq_file *s)
4973{
4974        const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
4975        const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
4976        u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
4977                mme_arch_sts;
4978        bool is_idle = true, is_eng_idle;
4979        u64 offset;
4980        int i;
4981
4982        if (s)
4983                seq_puts(s, "\nDMA  is_idle  QM_GLBL_STS0  DMA_CORE_STS0\n"
4984                                "---  -------  ------------  -------------\n");
4985
4986        offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
4987
4988        for (i = 0 ; i < DMA_MAX_NUM ; i++) {
4989                qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
4990                dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
4991                is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
4992                                IS_DMA_IDLE(dma_core_sts0);
4993                is_idle &= is_eng_idle;
4994
4995                if (mask)
4996                        *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
4997                if (s)
4998                        seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
4999                                        qm_glbl_sts0, dma_core_sts0);
5000        }
5001
5002        if (s)
5003                seq_puts(s,
5004                        "\nTPC  is_idle  QM_GLBL_STS0  CMDQ_GLBL_STS0  CFG_STATUS\n"
5005                        "---  -------  ------------  --------------  ----------\n");
5006
5007        offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5008
5009        for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5010                qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
5011                cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
5012                tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
5013                is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
5014                                IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
5015                                IS_TPC_IDLE(tpc_cfg_sts);
5016                is_idle &= is_eng_idle;
5017
5018                if (mask)
5019                        *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
5020                if (s)
5021                        seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
5022                                qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
5023        }
5024
5025        if (s)
5026                seq_puts(s,
5027                        "\nMME  is_idle  QM_GLBL_STS0  CMDQ_GLBL_STS0  ARCH_STATUS\n"
5028                        "---  -------  ------------  --------------  -----------\n");
5029
5030        qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
5031        cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
5032        mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
5033        is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
5034                        IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
5035                        IS_MME_IDLE(mme_arch_sts);
5036        is_idle &= is_eng_idle;
5037
5038        if (mask)
5039                *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
5040        if (s) {
5041                seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
5042                                cmdq_glbl_sts0, mme_arch_sts);
5043                seq_puts(s, "\n");
5044        }
5045
5046        return is_idle;
5047}
5048
5049static void goya_hw_queues_lock(struct hl_device *hdev)
5050{
5051        struct goya_device *goya = hdev->asic_specific;
5052
5053        spin_lock(&goya->hw_queues_lock);
5054}
5055
5056static void goya_hw_queues_unlock(struct hl_device *hdev)
5057{
5058        struct goya_device *goya = hdev->asic_specific;
5059
5060        spin_unlock(&goya->hw_queues_lock);
5061}
5062
5063static u32 goya_get_pci_id(struct hl_device *hdev)
5064{
5065        return hdev->pdev->device;
5066}
5067
5068static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5069                                size_t max_size)
5070{
5071        struct goya_device *goya = hdev->asic_specific;
5072
5073        if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5074                return 0;
5075
5076        return hl_fw_get_eeprom_data(hdev, data, max_size);
5077}
5078
5079static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5080{
5081        return RREG32(mmHW_STATE);
5082}
5083
5084static const struct hl_asic_funcs goya_funcs = {
5085        .early_init = goya_early_init,
5086        .early_fini = goya_early_fini,
5087        .late_init = goya_late_init,
5088        .late_fini = goya_late_fini,
5089        .sw_init = goya_sw_init,
5090        .sw_fini = goya_sw_fini,
5091        .hw_init = goya_hw_init,
5092        .hw_fini = goya_hw_fini,
5093        .halt_engines = goya_halt_engines,
5094        .suspend = goya_suspend,
5095        .resume = goya_resume,
5096        .cb_mmap = goya_cb_mmap,
5097        .ring_doorbell = goya_ring_doorbell,
5098        .pqe_write = goya_pqe_write,
5099        .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5100        .asic_dma_free_coherent = goya_dma_free_coherent,
5101        .get_int_queue_base = goya_get_int_queue_base,
5102        .test_queues = goya_test_queues,
5103        .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
5104        .asic_dma_pool_free = goya_dma_pool_free,
5105        .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5106        .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5107        .hl_dma_unmap_sg = goya_dma_unmap_sg,
5108        .cs_parser = goya_cs_parser,
5109        .asic_dma_map_sg = goya_dma_map_sg,
5110        .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5111        .add_end_of_cb_packets = goya_add_end_of_cb_packets,
5112        .update_eq_ci = goya_update_eq_ci,
5113        .context_switch = goya_context_switch,
5114        .restore_phase_topology = goya_restore_phase_topology,
5115        .debugfs_read32 = goya_debugfs_read32,
5116        .debugfs_write32 = goya_debugfs_write32,
5117        .add_device_attr = goya_add_device_attr,
5118        .handle_eqe = goya_handle_eqe,
5119        .set_pll_profile = goya_set_pll_profile,
5120        .get_events_stat = goya_get_events_stat,
5121        .read_pte = goya_read_pte,
5122        .write_pte = goya_write_pte,
5123        .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5124        .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5125        .send_heartbeat = goya_send_heartbeat,
5126        .debug_coresight = goya_debug_coresight,
5127        .is_device_idle = goya_is_device_idle,
5128        .soft_reset_late_init = goya_soft_reset_late_init,
5129        .hw_queues_lock = goya_hw_queues_lock,
5130        .hw_queues_unlock = goya_hw_queues_unlock,
5131        .get_pci_id = goya_get_pci_id,
5132        .get_eeprom_data = goya_get_eeprom_data,
5133        .send_cpu_message = goya_send_cpu_message,
5134        .get_hw_state = goya_get_hw_state,
5135        .pci_bars_map = goya_pci_bars_map,
5136        .set_dram_bar_base = goya_set_ddr_bar_base,
5137        .init_iatu = goya_init_iatu,
5138        .rreg = hl_rreg,
5139        .wreg = hl_wreg,
5140        .halt_coresight = goya_halt_coresight
5141};
5142
5143/*
5144 * goya_set_asic_funcs - set Goya function pointers
5145 *
5146 * @*hdev: pointer to hl_device structure
5147 *
5148 */
5149void goya_set_asic_funcs(struct hl_device *hdev)
5150{
5151        hdev->asic_funcs = &goya_funcs;
5152}
5153