linux/drivers/misc/habanalabs/common/habanalabs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0
   2 *
   3 * Copyright 2016-2019 HabanaLabs, Ltd.
   4 * All Rights Reserved.
   5 *
   6 */
   7
   8#ifndef HABANALABSP_H_
   9#define HABANALABSP_H_
  10
  11#include "../include/common/cpucp_if.h"
  12#include "../include/common/qman_if.h"
  13#include "../include/hw_ip/mmu/mmu_general.h"
  14#include <uapi/misc/habanalabs.h>
  15
  16#include <linux/cdev.h>
  17#include <linux/iopoll.h>
  18#include <linux/irqreturn.h>
  19#include <linux/dma-direction.h>
  20#include <linux/scatterlist.h>
  21#include <linux/hashtable.h>
  22#include <linux/debugfs.h>
  23#include <linux/bitfield.h>
  24#include <linux/genalloc.h>
  25#include <linux/sched/signal.h>
  26#include <linux/io-64-nonatomic-lo-hi.h>
  27#include <linux/coresight.h>
  28
  29#define HL_NAME                         "habanalabs"
  30
  31/* Use upper bits of mmap offset to store habana driver specific information.
  32 * bits[63:61] - Encode mmap type
  33 * bits[45:0]  - mmap offset value
  34 *
  35 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
  36 *  defines are w.r.t to PAGE_SIZE
  37 */
  38#define HL_MMAP_TYPE_SHIFT              (61 - PAGE_SHIFT)
  39#define HL_MMAP_TYPE_MASK               (0x7ull << HL_MMAP_TYPE_SHIFT)
  40#define HL_MMAP_TYPE_BLOCK              (0x4ull << HL_MMAP_TYPE_SHIFT)
  41#define HL_MMAP_TYPE_CB                 (0x2ull << HL_MMAP_TYPE_SHIFT)
  42
  43#define HL_MMAP_OFFSET_VALUE_MASK       (0x1FFFFFFFFFFFull >> PAGE_SHIFT)
  44#define HL_MMAP_OFFSET_VALUE_GET(off)   (off & HL_MMAP_OFFSET_VALUE_MASK)
  45
  46#define HL_PENDING_RESET_PER_SEC        10
  47#define HL_PENDING_RESET_MAX_TRIALS     60 /* 10 minutes */
  48#define HL_PENDING_RESET_LONG_SEC       60
  49
  50#define HL_HARD_RESET_MAX_TIMEOUT       120
  51#define HL_PLDM_HARD_RESET_MAX_TIMEOUT  (HL_HARD_RESET_MAX_TIMEOUT * 3)
  52
  53#define HL_DEVICE_TIMEOUT_USEC          1000000 /* 1 s */
  54
  55#define HL_HEARTBEAT_PER_USEC           5000000 /* 5 s */
  56
  57#define HL_PLL_LOW_JOB_FREQ_USEC        5000000 /* 5 s */
  58
  59#define HL_CPUCP_INFO_TIMEOUT_USEC      10000000 /* 10s */
  60#define HL_CPUCP_EEPROM_TIMEOUT_USEC    10000000 /* 10s */
  61
  62#define HL_PCI_ELBI_TIMEOUT_MSEC        10 /* 10ms */
  63
  64#define HL_SIM_MAX_TIMEOUT_US           10000000 /* 10s */
  65
  66#define HL_COMMON_USER_INTERRUPT_ID     0xFFF
  67
  68/* Memory */
  69#define MEM_HASH_TABLE_BITS             7 /* 1 << 7 buckets */
  70
  71/* MMU */
  72#define MMU_HASH_TABLE_BITS             7 /* 1 << 7 buckets */
  73
  74/**
  75 * enum hl_mmu_page_table_locaion - mmu page table location
  76 * @MMU_DR_PGT: page-table is located on device DRAM.
  77 * @MMU_HR_PGT: page-table is located on host memory.
  78 * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
  79 */
  80enum hl_mmu_page_table_location {
  81        MMU_DR_PGT = 0,         /* device-dram-resident MMU PGT */
  82        MMU_HR_PGT,             /* host resident MMU PGT */
  83        MMU_NUM_PGT_LOCATIONS   /* num of PGT locations */
  84};
  85
  86/*
  87 * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
  88 * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
  89 */
  90#define HL_RSVD_SOBS                    2
  91#define HL_RSVD_MONS                    1
  92
  93/*
  94 * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream
  95 */
  96#define HL_COLLECTIVE_RSVD_MSTR_MONS    2
  97
  98#define HL_MAX_SOB_VAL                  (1 << 15)
  99
 100#define IS_POWER_OF_2(n)                (n != 0 && ((n & (n - 1)) == 0))
 101#define IS_MAX_PENDING_CS_VALID(n)      (IS_POWER_OF_2(n) && (n > 1))
 102
 103#define HL_PCI_NUM_BARS                 6
 104
 105#define HL_MAX_DCORES                   4
 106
 107/*
 108 * Reset Flags
 109 *
 110 * - HL_RESET_HARD
 111 *       If set do hard reset to all engines. If not set reset just
 112 *       compute/DMA engines.
 113 *
 114 * - HL_RESET_FROM_RESET_THREAD
 115 *       Set if the caller is the hard-reset thread
 116 *
 117 * - HL_RESET_HEARTBEAT
 118 *       Set if reset is due to heartbeat
 119 *
 120 * - HL_RESET_TDR
 121 *       Set if reset is due to TDR
 122 *
 123 * - HL_RESET_DEVICE_RELEASE
 124 *       Set if reset is due to device release
 125 */
 126#define HL_RESET_HARD                   (1 << 0)
 127#define HL_RESET_FROM_RESET_THREAD      (1 << 1)
 128#define HL_RESET_HEARTBEAT              (1 << 2)
 129#define HL_RESET_TDR                    (1 << 3)
 130#define HL_RESET_DEVICE_RELEASE         (1 << 4)
 131
 132#define HL_MAX_SOBS_PER_MONITOR 8
 133
 134/**
 135 * struct hl_gen_wait_properties - properties for generating a wait CB
 136 * @data: command buffer
 137 * @q_idx: queue id is used to extract fence register address
 138 * @size: offset in command buffer
 139 * @sob_base: SOB base to use in this wait CB
 140 * @sob_val: SOB value to wait for
 141 * @mon_id: monitor to use in this wait CB
 142 * @sob_mask: each bit represents a SOB offset from sob_base to be used
 143 */
 144struct hl_gen_wait_properties {
 145        void    *data;
 146        u32     q_idx;
 147        u32     size;
 148        u16     sob_base;
 149        u16     sob_val;
 150        u16     mon_id;
 151        u8      sob_mask;
 152};
 153
 154/**
 155 * struct pgt_info - MMU hop page info.
 156 * @node: hash linked-list node for the pgts shadow hash of pgts.
 157 * @phys_addr: physical address of the pgt.
 158 * @shadow_addr: shadow hop in the host.
 159 * @ctx: pointer to the owner ctx.
 160 * @num_of_ptes: indicates how many ptes are used in the pgt.
 161 *
 162 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
 163 * is needed during mapping, a new page is allocated and this structure holds
 164 * its essential information. During unmapping, if no valid PTEs remained in the
 165 * page, it is freed with its pgt_info structure.
 166 */
 167struct pgt_info {
 168        struct hlist_node       node;
 169        u64                     phys_addr;
 170        u64                     shadow_addr;
 171        struct hl_ctx           *ctx;
 172        int                     num_of_ptes;
 173};
 174
 175struct hl_device;
 176struct hl_fpriv;
 177
 178/**
 179 * enum hl_pci_match_mode - pci match mode per region
 180 * @PCI_ADDRESS_MATCH_MODE: address match mode
 181 * @PCI_BAR_MATCH_MODE: bar match mode
 182 */
 183enum hl_pci_match_mode {
 184        PCI_ADDRESS_MATCH_MODE,
 185        PCI_BAR_MATCH_MODE
 186};
 187
 188/**
 189 * enum hl_fw_component - F/W components to read version through registers.
 190 * @FW_COMP_BOOT_FIT: boot fit.
 191 * @FW_COMP_PREBOOT: preboot.
 192 * @FW_COMP_LINUX: linux.
 193 */
 194enum hl_fw_component {
 195        FW_COMP_BOOT_FIT,
 196        FW_COMP_PREBOOT,
 197        FW_COMP_LINUX,
 198};
 199
 200/**
 201 * enum hl_fw_types - F/W types present in the system
 202 * @FW_TYPE_LINUX: Linux image for device CPU
 203 * @FW_TYPE_BOOT_CPU: Boot image for device CPU
 204 * @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
 205 *                       (preboot, ppboot etc...)
 206 * @FW_TYPE_ALL_TYPES: Mask for all types
 207 */
 208enum hl_fw_types {
 209        FW_TYPE_LINUX = 0x1,
 210        FW_TYPE_BOOT_CPU = 0x2,
 211        FW_TYPE_PREBOOT_CPU = 0x4,
 212        FW_TYPE_ALL_TYPES =
 213                (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU | FW_TYPE_PREBOOT_CPU)
 214};
 215
 216/**
 217 * enum hl_queue_type - Supported QUEUE types.
 218 * @QUEUE_TYPE_NA: queue is not available.
 219 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
 220 *                  host.
 221 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
 222 *                      memories and/or operates the compute engines.
 223 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
 224 * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
 225 *                 notifications are sent by H/W.
 226 */
 227enum hl_queue_type {
 228        QUEUE_TYPE_NA,
 229        QUEUE_TYPE_EXT,
 230        QUEUE_TYPE_INT,
 231        QUEUE_TYPE_CPU,
 232        QUEUE_TYPE_HW
 233};
 234
 235enum hl_cs_type {
 236        CS_TYPE_DEFAULT,
 237        CS_TYPE_SIGNAL,
 238        CS_TYPE_WAIT,
 239        CS_TYPE_COLLECTIVE_WAIT
 240};
 241
 242/*
 243 * struct hl_inbound_pci_region - inbound region descriptor
 244 * @mode: pci match mode for this region
 245 * @addr: region target address
 246 * @size: region size in bytes
 247 * @offset_in_bar: offset within bar (address match mode)
 248 * @bar: bar id
 249 */
 250struct hl_inbound_pci_region {
 251        enum hl_pci_match_mode  mode;
 252        u64                     addr;
 253        u64                     size;
 254        u64                     offset_in_bar;
 255        u8                      bar;
 256};
 257
 258/*
 259 * struct hl_outbound_pci_region - outbound region descriptor
 260 * @addr: region target address
 261 * @size: region size in bytes
 262 */
 263struct hl_outbound_pci_region {
 264        u64     addr;
 265        u64     size;
 266};
 267
 268/*
 269 * enum queue_cb_alloc_flags - Indicates queue support for CBs that
 270 * allocated by Kernel or by User
 271 * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel
 272 * @CB_ALLOC_USER: support only CBs that allocated by User
 273 */
 274enum queue_cb_alloc_flags {
 275        CB_ALLOC_KERNEL = 0x1,
 276        CB_ALLOC_USER   = 0x2
 277};
 278
 279/*
 280 * struct hl_hw_sob - H/W SOB info.
 281 * @hdev: habanalabs device structure.
 282 * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
 283 * @sob_id: id of this SOB.
 284 * @q_idx: the H/W queue that uses this SOB.
 285 */
 286struct hl_hw_sob {
 287        struct hl_device        *hdev;
 288        struct kref             kref;
 289        u32                     sob_id;
 290        u32                     q_idx;
 291};
 292
 293enum hl_collective_mode {
 294        HL_COLLECTIVE_NOT_SUPPORTED = 0x0,
 295        HL_COLLECTIVE_MASTER = 0x1,
 296        HL_COLLECTIVE_SLAVE = 0x2
 297};
 298
 299/**
 300 * struct hw_queue_properties - queue information.
 301 * @type: queue type.
 302 * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
 303 *                        that allocated by the Kernel driver and therefore,
 304 *                        a CB handle can be provided for jobs on this queue.
 305 *                        Otherwise, a CB address must be provided.
 306 * @collective_mode: collective mode of current queue
 307 * @driver_only: true if only the driver is allowed to send a job to this queue,
 308 *               false otherwise.
 309 * @supports_sync_stream: True if queue supports sync stream
 310 */
 311struct hw_queue_properties {
 312        enum hl_queue_type      type;
 313        enum queue_cb_alloc_flags cb_alloc_flags;
 314        enum hl_collective_mode collective_mode;
 315        u8                      driver_only;
 316        u8                      supports_sync_stream;
 317};
 318
 319/**
 320 * enum vm_type_t - virtual memory mapping request information.
 321 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
 322 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
 323 */
 324enum vm_type_t {
 325        VM_TYPE_USERPTR = 0x1,
 326        VM_TYPE_PHYS_PACK = 0x2
 327};
 328
 329/**
 330 * enum hl_device_hw_state - H/W device state. use this to understand whether
 331 *                           to do reset before hw_init or not
 332 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
 333 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
 334 *                            hw_init
 335 */
 336enum hl_device_hw_state {
 337        HL_DEVICE_HW_STATE_CLEAN = 0,
 338        HL_DEVICE_HW_STATE_DIRTY
 339};
 340
 341#define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0
 342
 343/**
 344 * struct hl_mmu_properties - ASIC specific MMU address translation properties.
 345 * @start_addr: virtual start address of the memory region.
 346 * @end_addr: virtual end address of the memory region.
 347 * @hop0_shift: shift of hop 0 mask.
 348 * @hop1_shift: shift of hop 1 mask.
 349 * @hop2_shift: shift of hop 2 mask.
 350 * @hop3_shift: shift of hop 3 mask.
 351 * @hop4_shift: shift of hop 4 mask.
 352 * @hop5_shift: shift of hop 5 mask.
 353 * @hop0_mask: mask to get the PTE address in hop 0.
 354 * @hop1_mask: mask to get the PTE address in hop 1.
 355 * @hop2_mask: mask to get the PTE address in hop 2.
 356 * @hop3_mask: mask to get the PTE address in hop 3.
 357 * @hop4_mask: mask to get the PTE address in hop 4.
 358 * @hop5_mask: mask to get the PTE address in hop 5.
 359 * @page_size: default page size used to allocate memory.
 360 * @num_hops: The amount of hops supported by the translation table.
 361 * @host_resident: Should the MMU page table reside in host memory or in the
 362 *                 device DRAM.
 363 */
 364struct hl_mmu_properties {
 365        u64     start_addr;
 366        u64     end_addr;
 367        u64     hop0_shift;
 368        u64     hop1_shift;
 369        u64     hop2_shift;
 370        u64     hop3_shift;
 371        u64     hop4_shift;
 372        u64     hop5_shift;
 373        u64     hop0_mask;
 374        u64     hop1_mask;
 375        u64     hop2_mask;
 376        u64     hop3_mask;
 377        u64     hop4_mask;
 378        u64     hop5_mask;
 379        u32     page_size;
 380        u32     num_hops;
 381        u8      host_resident;
 382};
 383
 384/**
 385 * struct asic_fixed_properties - ASIC specific immutable properties.
 386 * @hw_queues_props: H/W queues properties.
 387 * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
 388 *              available sensors.
 389 * @uboot_ver: F/W U-boot version.
 390 * @preboot_ver: F/W Preboot version.
 391 * @dmmu: DRAM MMU address translation properties.
 392 * @pmmu: PCI (host) MMU address translation properties.
 393 * @pmmu_huge: PCI (host) MMU address translation properties for memory
 394 *              allocated with huge pages.
 395 * @sram_base_address: SRAM physical start address.
 396 * @sram_end_address: SRAM physical end address.
 397 * @sram_user_base_address - SRAM physical start address for user access.
 398 * @dram_base_address: DRAM physical start address.
 399 * @dram_end_address: DRAM physical end address.
 400 * @dram_user_base_address: DRAM physical start address for user access.
 401 * @dram_size: DRAM total size.
 402 * @dram_pci_bar_size: size of PCI bar towards DRAM.
 403 * @max_power_default: max power of the device after reset
 404 * @dc_power_default: power consumed by the device in mode idle.
 405 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
 406 *                                      fault.
 407 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
 408 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
 409 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
 410 * @mmu_dram_default_page_addr: DRAM default page physical address.
 411 * @cb_va_start_addr: virtual start address of command buffers which are mapped
 412 *                    to the device's MMU.
 413 * @cb_va_end_addr: virtual end address of command buffers which are mapped to
 414 *                  the device's MMU.
 415 * @mmu_pgt_size: MMU page tables total size.
 416 * @mmu_pte_size: PTE size in MMU page tables.
 417 * @mmu_hop_table_size: MMU hop table size.
 418 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
 419 * @dram_page_size: page size for MMU DRAM allocation.
 420 * @cfg_size: configuration space size on SRAM.
 421 * @sram_size: total size of SRAM.
 422 * @max_asid: maximum number of open contexts (ASIDs).
 423 * @num_of_events: number of possible internal H/W IRQs.
 424 * @psoc_pci_pll_nr: PCI PLL NR value.
 425 * @psoc_pci_pll_nf: PCI PLL NF value.
 426 * @psoc_pci_pll_od: PCI PLL OD value.
 427 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
 428 * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
 429 * @high_pll: high PLL frequency used by the device.
 430 * @cb_pool_cb_cnt: number of CBs in the CB pool.
 431 * @cb_pool_cb_size: size of each CB in the CB pool.
 432 * @max_pending_cs: maximum of concurrent pending command submissions
 433 * @max_queues: maximum amount of queues in the system
 434 * @fw_preboot_cpu_boot_dev_sts0: bitmap representation of preboot cpu
 435 *                                capabilities reported by FW, bit description
 436 *                                can be found in CPU_BOOT_DEV_STS0
 437 * @fw_preboot_cpu_boot_dev_sts1: bitmap representation of preboot cpu
 438 *                                capabilities reported by FW, bit description
 439 *                                can be found in CPU_BOOT_DEV_STS1
 440 * @fw_bootfit_cpu_boot_dev_sts0: bitmap representation of boot cpu security
 441 *                                status reported by FW, bit description can be
 442 *                                found in CPU_BOOT_DEV_STS0
 443 * @fw_bootfit_cpu_boot_dev_sts1: bitmap representation of boot cpu security
 444 *                                status reported by FW, bit description can be
 445 *                                found in CPU_BOOT_DEV_STS1
 446 * @fw_app_cpu_boot_dev_sts0: bitmap representation of application security
 447 *                            status reported by FW, bit description can be
 448 *                            found in CPU_BOOT_DEV_STS0
 449 * @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
 450 *                            status reported by FW, bit description can be
 451 *                            found in CPU_BOOT_DEV_STS1
 452 * @collective_first_sob: first sync object available for collective use
 453 * @collective_first_mon: first monitor available for collective use
 454 * @sync_stream_first_sob: first sync object available for sync stream use
 455 * @sync_stream_first_mon: first monitor available for sync stream use
 456 * @first_available_user_sob: first sob available for the user
 457 * @first_available_user_mon: first monitor available for the user
 458 * @first_available_user_msix_interrupt: first available msix interrupt
 459 *                                       reserved for the user
 460 * @first_available_cq: first available CQ for the user.
 461 * @user_interrupt_count: number of user interrupts.
 462 * @tpc_enabled_mask: which TPCs are enabled.
 463 * @completion_queues_count: number of completion queues.
 464 * @fw_security_enabled: true if security measures are enabled in firmware,
 465 *                       false otherwise
 466 * @fw_cpu_boot_dev_sts0_valid: status bits are valid and can be fetched from
 467 *                              BOOT_DEV_STS0
 468 * @fw_cpu_boot_dev_sts1_valid: status bits are valid and can be fetched from
 469 *                              BOOT_DEV_STS1
 470 * @dram_supports_virtual_memory: is there an MMU towards the DRAM
 471 * @hard_reset_done_by_fw: true if firmware is handling hard reset flow
 472 * @num_functional_hbms: number of functional HBMs in each DCORE.
 473 * @iatu_done_by_fw: true if iATU configuration is being done by FW.
 474 * @dynamic_fw_load: is dynamic FW load is supported.
 475 * @gic_interrupts_enable: true if FW is not blocking GIC controller,
 476 *                         false otherwise.
 477 */
 478struct asic_fixed_properties {
 479        struct hw_queue_properties      *hw_queues_props;
 480        struct cpucp_info               cpucp_info;
 481        char                            uboot_ver[VERSION_MAX_LEN];
 482        char                            preboot_ver[VERSION_MAX_LEN];
 483        struct hl_mmu_properties        dmmu;
 484        struct hl_mmu_properties        pmmu;
 485        struct hl_mmu_properties        pmmu_huge;
 486        u64                             sram_base_address;
 487        u64                             sram_end_address;
 488        u64                             sram_user_base_address;
 489        u64                             dram_base_address;
 490        u64                             dram_end_address;
 491        u64                             dram_user_base_address;
 492        u64                             dram_size;
 493        u64                             dram_pci_bar_size;
 494        u64                             max_power_default;
 495        u64                             dc_power_default;
 496        u64                             dram_size_for_default_page_mapping;
 497        u64                             pcie_dbi_base_address;
 498        u64                             pcie_aux_dbi_reg_addr;
 499        u64                             mmu_pgt_addr;
 500        u64                             mmu_dram_default_page_addr;
 501        u64                             cb_va_start_addr;
 502        u64                             cb_va_end_addr;
 503        u32                             mmu_pgt_size;
 504        u32                             mmu_pte_size;
 505        u32                             mmu_hop_table_size;
 506        u32                             mmu_hop0_tables_total_size;
 507        u32                             dram_page_size;
 508        u32                             cfg_size;
 509        u32                             sram_size;
 510        u32                             max_asid;
 511        u32                             num_of_events;
 512        u32                             psoc_pci_pll_nr;
 513        u32                             psoc_pci_pll_nf;
 514        u32                             psoc_pci_pll_od;
 515        u32                             psoc_pci_pll_div_factor;
 516        u32                             psoc_timestamp_frequency;
 517        u32                             high_pll;
 518        u32                             cb_pool_cb_cnt;
 519        u32                             cb_pool_cb_size;
 520        u32                             max_pending_cs;
 521        u32                             max_queues;
 522        u32                             fw_preboot_cpu_boot_dev_sts0;
 523        u32                             fw_preboot_cpu_boot_dev_sts1;
 524        u32                             fw_bootfit_cpu_boot_dev_sts0;
 525        u32                             fw_bootfit_cpu_boot_dev_sts1;
 526        u32                             fw_app_cpu_boot_dev_sts0;
 527        u32                             fw_app_cpu_boot_dev_sts1;
 528        u16                             collective_first_sob;
 529        u16                             collective_first_mon;
 530        u16                             sync_stream_first_sob;
 531        u16                             sync_stream_first_mon;
 532        u16                             first_available_user_sob[HL_MAX_DCORES];
 533        u16                             first_available_user_mon[HL_MAX_DCORES];
 534        u16                             first_available_user_msix_interrupt;
 535        u16                             first_available_cq[HL_MAX_DCORES];
 536        u16                             user_interrupt_count;
 537        u8                              tpc_enabled_mask;
 538        u8                              completion_queues_count;
 539        u8                              fw_security_enabled;
 540        u8                              fw_cpu_boot_dev_sts0_valid;
 541        u8                              fw_cpu_boot_dev_sts1_valid;
 542        u8                              dram_supports_virtual_memory;
 543        u8                              hard_reset_done_by_fw;
 544        u8                              num_functional_hbms;
 545        u8                              iatu_done_by_fw;
 546        u8                              dynamic_fw_load;
 547        u8                              gic_interrupts_enable;
 548};
 549
 550/**
 551 * struct hl_fence - software synchronization primitive
 552 * @completion: fence is implemented using completion
 553 * @refcount: refcount for this fence
 554 * @cs_sequence: sequence of the corresponding command submission
 555 * @error: mark this fence with error
 556 * @timestamp: timestamp upon completion
 557 *
 558 */
 559struct hl_fence {
 560        struct completion       completion;
 561        struct kref             refcount;
 562        u64                     cs_sequence;
 563        int                     error;
 564        ktime_t                 timestamp;
 565};
 566
 567/**
 568 * struct hl_cs_compl - command submission completion object.
 569 * @sob_reset_work: workqueue object to run SOB reset flow.
 570 * @base_fence: hl fence object.
 571 * @lock: spinlock to protect fence.
 572 * @hdev: habanalabs device structure.
 573 * @hw_sob: the H/W SOB used in this signal/wait CS.
 574 * @cs_seq: command submission sequence number.
 575 * @type: type of the CS - signal/wait.
 576 * @sob_val: the SOB value that is used in this signal/wait CS.
 577 * @sob_group: the SOB group that is used in this collective wait CS.
 578 */
 579struct hl_cs_compl {
 580        struct work_struct      sob_reset_work;
 581        struct hl_fence         base_fence;
 582        spinlock_t              lock;
 583        struct hl_device        *hdev;
 584        struct hl_hw_sob        *hw_sob;
 585        u64                     cs_seq;
 586        enum hl_cs_type         type;
 587        u16                     sob_val;
 588        u16                     sob_group;
 589};
 590
 591/*
 592 * Command Buffers
 593 */
 594
 595/**
 596 * struct hl_cb_mgr - describes a Command Buffer Manager.
 597 * @cb_lock: protects cb_handles.
 598 * @cb_handles: an idr to hold all command buffer handles.
 599 */
 600struct hl_cb_mgr {
 601        spinlock_t              cb_lock;
 602        struct idr              cb_handles; /* protected by cb_lock */
 603};
 604
 605/**
 606 * struct hl_cb - describes a Command Buffer.
 607 * @refcount: reference counter for usage of the CB.
 608 * @hdev: pointer to device this CB belongs to.
 609 * @ctx: pointer to the CB owner's context.
 610 * @lock: spinlock to protect mmap flows.
 611 * @debugfs_list: node in debugfs list of command buffers.
 612 * @pool_list: node in pool list of command buffers.
 613 * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
 614 *                 the device's MMU.
 615 * @id: the CB's ID.
 616 * @kernel_address: Holds the CB's kernel virtual address.
 617 * @bus_address: Holds the CB's DMA address.
 618 * @mmap_size: Holds the CB's size that was mmaped.
 619 * @size: holds the CB's size.
 620 * @cs_cnt: holds number of CS that this CB participates in.
 621 * @mmap: true if the CB is currently mmaped to user.
 622 * @is_pool: true if CB was acquired from the pool, false otherwise.
 623 * @is_internal: internaly allocated
 624 * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
 625 */
 626struct hl_cb {
 627        struct kref             refcount;
 628        struct hl_device        *hdev;
 629        struct hl_ctx           *ctx;
 630        spinlock_t              lock;
 631        struct list_head        debugfs_list;
 632        struct list_head        pool_list;
 633        struct list_head        va_block_list;
 634        u64                     id;
 635        void                    *kernel_address;
 636        dma_addr_t              bus_address;
 637        u32                     mmap_size;
 638        u32                     size;
 639        atomic_t                cs_cnt;
 640        u8                      mmap;
 641        u8                      is_pool;
 642        u8                      is_internal;
 643        u8                      is_mmu_mapped;
 644};
 645
 646
 647/*
 648 * QUEUES
 649 */
 650
 651struct hl_cs;
 652struct hl_cs_job;
 653
 654/* Queue length of external and HW queues */
 655#define HL_QUEUE_LENGTH                 4096
 656#define HL_QUEUE_SIZE_IN_BYTES          (HL_QUEUE_LENGTH * HL_BD_SIZE)
 657
 658#if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
 659#error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
 660#endif
 661
 662/* HL_CQ_LENGTH is in units of struct hl_cq_entry */
 663#define HL_CQ_LENGTH                    HL_QUEUE_LENGTH
 664#define HL_CQ_SIZE_IN_BYTES             (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
 665
 666/* Must be power of 2 */
 667#define HL_EQ_LENGTH                    64
 668#define HL_EQ_SIZE_IN_BYTES             (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
 669
 670/* Host <-> CPU-CP shared memory size */
 671#define HL_CPU_ACCESSIBLE_MEM_SIZE      SZ_2M
 672
 673/**
 674 * struct hl_sync_stream_properties -
 675 *     describes a H/W queue sync stream properties
 676 * @hw_sob: array of the used H/W SOBs by this H/W queue.
 677 * @next_sob_val: the next value to use for the currently used SOB.
 678 * @base_sob_id: the base SOB id of the SOBs used by this queue.
 679 * @base_mon_id: the base MON id of the MONs used by this queue.
 680 * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue
 681 *                          in order to sync with all slave queues.
 682 * @collective_slave_mon_id: the MON id used by this slave queue in order to
 683 *                           sync with its master queue.
 684 * @collective_sob_id: current SOB id used by this collective slave queue
 685 *                     to signal its collective master queue upon completion.
 686 * @curr_sob_offset: the id offset to the currently used SOB from the
 687 *                   HL_RSVD_SOBS that are being used by this queue.
 688 */
 689struct hl_sync_stream_properties {
 690        struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
 691        u16             next_sob_val;
 692        u16             base_sob_id;
 693        u16             base_mon_id;
 694        u16             collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS];
 695        u16             collective_slave_mon_id;
 696        u16             collective_sob_id;
 697        u8              curr_sob_offset;
 698};
 699
 700/**
 701 * struct hl_hw_queue - describes a H/W transport queue.
 702 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
 703 * @sync_stream_prop: sync stream queue properties
 704 * @queue_type: type of queue.
 705 * @collective_mode: collective mode of current queue
 706 * @kernel_address: holds the queue's kernel virtual address.
 707 * @bus_address: holds the queue's DMA address.
 708 * @pi: holds the queue's pi value.
 709 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
 710 * @hw_queue_id: the id of the H/W queue.
 711 * @cq_id: the id for the corresponding CQ for this H/W queue.
 712 * @msi_vec: the IRQ number of the H/W queue.
 713 * @int_queue_len: length of internal queue (number of entries).
 714 * @valid: is the queue valid (we have array of 32 queues, not all of them
 715 *         exist).
 716 * @supports_sync_stream: True if queue supports sync stream
 717 */
 718struct hl_hw_queue {
 719        struct hl_cs_job                        **shadow_queue;
 720        struct hl_sync_stream_properties        sync_stream_prop;
 721        enum hl_queue_type                      queue_type;
 722        enum hl_collective_mode                 collective_mode;
 723        void                                    *kernel_address;
 724        dma_addr_t                              bus_address;
 725        u32                                     pi;
 726        atomic_t                                ci;
 727        u32                                     hw_queue_id;
 728        u32                                     cq_id;
 729        u32                                     msi_vec;
 730        u16                                     int_queue_len;
 731        u8                                      valid;
 732        u8                                      supports_sync_stream;
 733};
 734
 735/**
 736 * struct hl_cq - describes a completion queue
 737 * @hdev: pointer to the device structure
 738 * @kernel_address: holds the queue's kernel virtual address
 739 * @bus_address: holds the queue's DMA address
 740 * @cq_idx: completion queue index in array
 741 * @hw_queue_id: the id of the matching H/W queue
 742 * @ci: ci inside the queue
 743 * @pi: pi inside the queue
 744 * @free_slots_cnt: counter of free slots in queue
 745 */
 746struct hl_cq {
 747        struct hl_device        *hdev;
 748        void                    *kernel_address;
 749        dma_addr_t              bus_address;
 750        u32                     cq_idx;
 751        u32                     hw_queue_id;
 752        u32                     ci;
 753        u32                     pi;
 754        atomic_t                free_slots_cnt;
 755};
 756
 757/**
 758 * struct hl_user_interrupt - holds user interrupt information
 759 * @hdev: pointer to the device structure
 760 * @wait_list_head: head to the list of user threads pending on this interrupt
 761 * @wait_list_lock: protects wait_list_head
 762 * @interrupt_id: msix interrupt id
 763 */
 764struct hl_user_interrupt {
 765        struct hl_device        *hdev;
 766        struct list_head        wait_list_head;
 767        spinlock_t              wait_list_lock;
 768        u32                     interrupt_id;
 769};
 770
 771/**
 772 * struct hl_user_pending_interrupt - holds a context to a user thread
 773 *                                    pending on an interrupt
 774 * @wait_list_node: node in the list of user threads pending on an interrupt
 775 * @fence: hl fence object for interrupt completion
 776 */
 777struct hl_user_pending_interrupt {
 778        struct list_head        wait_list_node;
 779        struct hl_fence         fence;
 780};
 781
 782/**
 783 * struct hl_eq - describes the event queue (single one per device)
 784 * @hdev: pointer to the device structure
 785 * @kernel_address: holds the queue's kernel virtual address
 786 * @bus_address: holds the queue's DMA address
 787 * @ci: ci inside the queue
 788 * @prev_eqe_index: the index of the previous event queue entry. The index of
 789 *                  the current entry's index must be +1 of the previous one.
 790 * @check_eqe_index: do we need to check the index of the current entry vs. the
 791 *                   previous one. This is for backward compatibility with older
 792 *                   firmwares
 793 */
 794struct hl_eq {
 795        struct hl_device        *hdev;
 796        void                    *kernel_address;
 797        dma_addr_t              bus_address;
 798        u32                     ci;
 799        u32                     prev_eqe_index;
 800        bool                    check_eqe_index;
 801};
 802
 803
 804/*
 805 * ASICs
 806 */
 807
 808/**
 809 * enum hl_asic_type - supported ASIC types.
 810 * @ASIC_INVALID: Invalid ASIC type.
 811 * @ASIC_GOYA: Goya device.
 812 * @ASIC_GAUDI: Gaudi device.
 813 * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
 814 */
 815enum hl_asic_type {
 816        ASIC_INVALID,
 817        ASIC_GOYA,
 818        ASIC_GAUDI,
 819        ASIC_GAUDI_SEC
 820};
 821
 822struct hl_cs_parser;
 823
 824/**
 825 * enum hl_pm_mng_profile - power management profile.
 826 * @PM_AUTO: internal clock is set by the Linux driver.
 827 * @PM_MANUAL: internal clock is set by the user.
 828 * @PM_LAST: last power management type.
 829 */
 830enum hl_pm_mng_profile {
 831        PM_AUTO = 1,
 832        PM_MANUAL,
 833        PM_LAST
 834};
 835
 836/**
 837 * enum hl_pll_frequency - PLL frequency.
 838 * @PLL_HIGH: high frequency.
 839 * @PLL_LOW: low frequency.
 840 * @PLL_LAST: last frequency values that were configured by the user.
 841 */
 842enum hl_pll_frequency {
 843        PLL_HIGH = 1,
 844        PLL_LOW,
 845        PLL_LAST
 846};
 847
 848#define PLL_REF_CLK 50
 849
 850enum div_select_defs {
 851        DIV_SEL_REF_CLK = 0,
 852        DIV_SEL_PLL_CLK = 1,
 853        DIV_SEL_DIVIDED_REF = 2,
 854        DIV_SEL_DIVIDED_PLL = 3,
 855};
 856
 857enum pci_region {
 858        PCI_REGION_CFG,
 859        PCI_REGION_SRAM,
 860        PCI_REGION_DRAM,
 861        PCI_REGION_SP_SRAM,
 862        PCI_REGION_NUMBER,
 863};
 864
 865/**
 866 * struct pci_mem_region - describe memory region in a PCI bar
 867 * @region_base: region base address
 868 * @region_size: region size
 869 * @bar_size: size of the BAR
 870 * @offset_in_bar: region offset into the bar
 871 * @bar_id: bar ID of the region
 872 * @used: if used 1, otherwise 0
 873 */
 874struct pci_mem_region {
 875        u64 region_base;
 876        u64 region_size;
 877        u64 bar_size;
 878        u32 offset_in_bar;
 879        u8 bar_id;
 880        u8 used;
 881};
 882
 883/**
 884 * struct static_fw_load_mgr - static FW load manager
 885 * @preboot_version_max_off: max offset to preboot version
 886 * @boot_fit_version_max_off: max offset to boot fit version
 887 * @kmd_msg_to_cpu_reg: register address for KDM->CPU messages
 888 * @cpu_cmd_status_to_host_reg: register address for CPU command status response
 889 * @cpu_boot_status_reg: boot status register
 890 * @cpu_boot_dev_status0_reg: boot device status register 0
 891 * @cpu_boot_dev_status1_reg: boot device status register 1
 892 * @boot_err0_reg: boot error register 0
 893 * @boot_err1_reg: boot error register 1
 894 * @preboot_version_offset_reg: SRAM offset to preboot version register
 895 * @boot_fit_version_offset_reg: SRAM offset to boot fit version register
 896 * @sram_offset_mask: mask for getting offset into the SRAM
 897 * @cpu_reset_wait_msec: used when setting WFE via kmd_msg_to_cpu_reg
 898 */
 899struct static_fw_load_mgr {
 900        u64 preboot_version_max_off;
 901        u64 boot_fit_version_max_off;
 902        u32 kmd_msg_to_cpu_reg;
 903        u32 cpu_cmd_status_to_host_reg;
 904        u32 cpu_boot_status_reg;
 905        u32 cpu_boot_dev_status0_reg;
 906        u32 cpu_boot_dev_status1_reg;
 907        u32 boot_err0_reg;
 908        u32 boot_err1_reg;
 909        u32 preboot_version_offset_reg;
 910        u32 boot_fit_version_offset_reg;
 911        u32 sram_offset_mask;
 912        u32 cpu_reset_wait_msec;
 913};
 914
 915/**
 916 * struct fw_response - FW response to LKD command
 917 * @ram_offset: descriptor offset into the RAM
 918 * @ram_type: RAM type containing the descriptor (SRAM/DRAM)
 919 * @status: command status
 920 */
 921struct fw_response {
 922        u32 ram_offset;
 923        u8 ram_type;
 924        u8 status;
 925};
 926
 927/**
 928 * struct dynamic_fw_load_mgr - dynamic FW load manager
 929 * @response: FW to LKD response
 930 * @comm_desc: the communication descriptor with FW
 931 * @image_region: region to copy the FW image to
 932 * @fw_image_size: size of FW image to load
 933 * @wait_for_bl_timeout: timeout for waiting for boot loader to respond
 934 */
 935struct dynamic_fw_load_mgr {
 936        struct fw_response response;
 937        struct lkd_fw_comms_desc comm_desc;
 938        struct pci_mem_region *image_region;
 939        size_t fw_image_size;
 940        u32 wait_for_bl_timeout;
 941};
 942
 943/**
 944 * struct fw_image_props - properties of FW image
 945 * @image_name: name of the image
 946 * @src_off: offset in src FW to copy from
 947 * @copy_size: amount of bytes to copy (0 to copy the whole binary)
 948 */
 949struct fw_image_props {
 950        char *image_name;
 951        u32 src_off;
 952        u32 copy_size;
 953};
 954
 955/**
 956 * struct fw_load_mgr - manager FW loading process
 957 * @dynamic_loader: specific structure for dynamic load
 958 * @static_loader: specific structure for static load
 959 * @boot_fit_img: boot fit image properties
 960 * @linux_img: linux image properties
 961 * @cpu_timeout: CPU response timeout in usec
 962 * @boot_fit_timeout: Boot fit load timeout in usec
 963 * @skip_bmc: should BMC be skipped
 964 * @sram_bar_id: SRAM bar ID
 965 * @dram_bar_id: DRAM bar ID
 966 * @linux_loaded: true if linux was loaded so far
 967 */
 968struct fw_load_mgr {
 969        union {
 970                struct dynamic_fw_load_mgr dynamic_loader;
 971                struct static_fw_load_mgr static_loader;
 972        };
 973        struct fw_image_props boot_fit_img;
 974        struct fw_image_props linux_img;
 975        u32 cpu_timeout;
 976        u32 boot_fit_timeout;
 977        u8 skip_bmc;
 978        u8 sram_bar_id;
 979        u8 dram_bar_id;
 980        u8 linux_loaded;
 981};
 982
 983/**
 984 * struct hl_asic_funcs - ASIC specific functions that are can be called from
 985 *                        common code.
 986 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
 987 * @early_fini: tears down what was done in early_init.
 988 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
 989 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
 990 * @sw_init: sets up driver state, does not configure H/W.
 991 * @sw_fini: tears down driver state, does not configure H/W.
 992 * @hw_init: sets up the H/W state.
 993 * @hw_fini: tears down the H/W state.
 994 * @halt_engines: halt engines, needed for reset sequence. This also disables
 995 *                interrupts from the device. Should be called before
 996 *                hw_fini and before CS rollback.
 997 * @suspend: handles IP specific H/W or SW changes for suspend.
 998 * @resume: handles IP specific H/W or SW changes for resume.
 999 * @cb_mmap: maps a CB.
1000 * @ring_doorbell: increment PI on a given QMAN.
1001 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
1002 *             function because the PQs are located in different memory areas
1003 *             per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
1004 *             writing the PQE must match the destination memory area
1005 *             properties.
1006 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
1007 *                           dma_alloc_coherent(). This is ASIC function because
1008 *                           its implementation is not trivial when the driver
1009 *                           is loaded in simulation mode (not upstreamed).
1010 * @asic_dma_free_coherent:  Free coherent DMA memory by calling
1011 *                           dma_free_coherent(). This is ASIC function because
1012 *                           its implementation is not trivial when the driver
1013 *                           is loaded in simulation mode (not upstreamed).
1014 * @scrub_device_mem: Scrub device memory given an address and size
1015 * @get_int_queue_base: get the internal queue base address.
1016 * @test_queues: run simple test on all queues for sanity check.
1017 * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
1018 *                        size of allocation is HL_DMA_POOL_BLK_SIZE.
1019 * @asic_dma_pool_free: free small DMA allocation from pool.
1020 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
1021 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
1022 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
1023 * @cs_parser: parse Command Submission.
1024 * @asic_dma_map_sg: DMA map scatter-gather list.
1025 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
1026 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
1027 * @update_eq_ci: update event queue CI.
1028 * @context_switch: called upon ASID context switch.
1029 * @restore_phase_topology: clear all SOBs amd MONs.
1030 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
1031 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
1032 * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
1033 * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
1034 * @debugfs_read_dma: debug interface for reading up to 2MB from the device's
1035 *                    internal memory via DMA engine.
1036 * @add_device_attr: add ASIC specific device attributes.
1037 * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
1038 * @set_pll_profile: change PLL profile (manual/automatic).
1039 * @get_events_stat: retrieve event queue entries histogram.
1040 * @read_pte: read MMU page table entry from DRAM.
1041 * @write_pte: write MMU page table entry to DRAM.
1042 * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
1043 *                        (L1 only) or hard (L0 & L1) flush.
1044 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
1045 *                              ASID-VA-size mask.
1046 * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
1047 * @set_clock_gating: enable/disable clock gating per engine according to
1048 *                    clock gating mask in hdev
1049 * @disable_clock_gating: disable clock gating completely
1050 * @debug_coresight: perform certain actions on Coresight for debugging.
1051 * @is_device_idle: return true if device is idle, false otherwise.
1052 * @soft_reset_late_init: perform certain actions needed after soft reset.
1053 * @hw_queues_lock: acquire H/W queues lock.
1054 * @hw_queues_unlock: release H/W queues lock.
1055 * @get_pci_id: retrieve PCI ID.
1056 * @get_eeprom_data: retrieve EEPROM data from F/W.
1057 * @send_cpu_message: send message to F/W. If the message is timedout, the
1058 *                    driver will eventually reset the device. The timeout can
1059 *                    be determined by the calling function or it can be 0 and
1060 *                    then the timeout is the default timeout for the specific
1061 *                    ASIC
1062 * @get_hw_state: retrieve the H/W state
1063 * @pci_bars_map: Map PCI BARs.
1064 * @init_iatu: Initialize the iATU unit inside the PCI controller.
1065 * @rreg: Read a register. Needed for simulator support.
1066 * @wreg: Write a register. Needed for simulator support.
1067 * @halt_coresight: stop the ETF and ETR traces.
1068 * @ctx_init: context dependent initialization.
1069 * @ctx_fini: context dependent cleanup.
1070 * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
1071 * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
1072 * @load_firmware_to_device: load the firmware to the device's memory
1073 * @load_boot_fit_to_device: load boot fit to device's memory
1074 * @get_signal_cb_size: Get signal CB size.
1075 * @get_wait_cb_size: Get wait CB size.
1076 * @gen_signal_cb: Generate a signal CB.
1077 * @gen_wait_cb: Generate a wait CB.
1078 * @reset_sob: Reset a SOB.
1079 * @reset_sob_group: Reset SOB group
1080 * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
1081 *                        firmware configuration
1082 * @get_device_time: Get the device time.
1083 * @collective_wait_init_cs: Generate collective master/slave packets
1084 *                           and place them in the relevant cs jobs
1085 * @collective_wait_create_jobs: allocate collective wait cs jobs
1086 * @scramble_addr: Routine to scramble the address prior of mapping it
1087 *                 in the MMU.
1088 * @descramble_addr: Routine to de-scramble the address prior of
1089 *                   showing it to users.
1090 * @ack_protection_bits_errors: ack and dump all security violations
1091 * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it.
1092 *                   also returns the size of the block if caller supplies
1093 *                   a valid pointer for it
1094 * @hw_block_mmap: mmap a HW block with a given id.
1095 * @enable_events_from_fw: send interrupt to firmware to notify them the
1096 *                         driver is ready to receive asynchronous events. This
1097 *                         function should be called during the first init and
1098 *                         after every hard-reset of the device
1099 * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
1100 * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
1101 *                         generic f/w compatible PLL Indexes
1102 * @init_firmware_loader: initialize data for FW loader.
1103 * @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
1104 */
1105struct hl_asic_funcs {
1106        int (*early_init)(struct hl_device *hdev);
1107        int (*early_fini)(struct hl_device *hdev);
1108        int (*late_init)(struct hl_device *hdev);
1109        void (*late_fini)(struct hl_device *hdev);
1110        int (*sw_init)(struct hl_device *hdev);
1111        int (*sw_fini)(struct hl_device *hdev);
1112        int (*hw_init)(struct hl_device *hdev);
1113        void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
1114        void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
1115        int (*suspend)(struct hl_device *hdev);
1116        int (*resume)(struct hl_device *hdev);
1117        int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1118                        void *cpu_addr, dma_addr_t dma_addr, size_t size);
1119        void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
1120        void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
1121                        struct hl_bd *bd);
1122        void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
1123                                        dma_addr_t *dma_handle, gfp_t flag);
1124        void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
1125                                        void *cpu_addr, dma_addr_t dma_handle);
1126        int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
1127        void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
1128                                dma_addr_t *dma_handle, u16 *queue_len);
1129        int (*test_queues)(struct hl_device *hdev);
1130        void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
1131                                gfp_t mem_flags, dma_addr_t *dma_handle);
1132        void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
1133                                dma_addr_t dma_addr);
1134        void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
1135                                size_t size, dma_addr_t *dma_handle);
1136        void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
1137                                size_t size, void *vaddr);
1138        void (*hl_dma_unmap_sg)(struct hl_device *hdev,
1139                                struct scatterlist *sgl, int nents,
1140                                enum dma_data_direction dir);
1141        int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
1142        int (*asic_dma_map_sg)(struct hl_device *hdev,
1143                                struct scatterlist *sgl, int nents,
1144                                enum dma_data_direction dir);
1145        u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
1146                                        struct sg_table *sgt);
1147        void (*add_end_of_cb_packets)(struct hl_device *hdev,
1148                                        void *kernel_address, u32 len,
1149                                        u64 cq_addr, u32 cq_val, u32 msix_num,
1150                                        bool eb);
1151        void (*update_eq_ci)(struct hl_device *hdev, u32 val);
1152        int (*context_switch)(struct hl_device *hdev, u32 asid);
1153        void (*restore_phase_topology)(struct hl_device *hdev);
1154        int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
1155                                bool user_address, u32 *val);
1156        int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
1157                                bool user_address, u32 val);
1158        int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
1159                                bool user_address, u64 *val);
1160        int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
1161                                bool user_address, u64 val);
1162        int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
1163                                void *blob_addr);
1164        void (*add_device_attr)(struct hl_device *hdev,
1165                                struct attribute_group *dev_attr_grp);
1166        void (*handle_eqe)(struct hl_device *hdev,
1167                                struct hl_eq_entry *eq_entry);
1168        void (*set_pll_profile)(struct hl_device *hdev,
1169                        enum hl_pll_frequency freq);
1170        void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
1171                                u32 *size);
1172        u64 (*read_pte)(struct hl_device *hdev, u64 addr);
1173        void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
1174        int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
1175                                        u32 flags);
1176        int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
1177                                u32 flags, u32 asid, u64 va, u64 size);
1178        int (*send_heartbeat)(struct hl_device *hdev);
1179        void (*set_clock_gating)(struct hl_device *hdev);
1180        void (*disable_clock_gating)(struct hl_device *hdev);
1181        int (*debug_coresight)(struct hl_device *hdev, void *data);
1182        bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
1183                                        u8 mask_len, struct seq_file *s);
1184        int (*soft_reset_late_init)(struct hl_device *hdev);
1185        void (*hw_queues_lock)(struct hl_device *hdev);
1186        void (*hw_queues_unlock)(struct hl_device *hdev);
1187        u32 (*get_pci_id)(struct hl_device *hdev);
1188        int (*get_eeprom_data)(struct hl_device *hdev, void *data,
1189                                size_t max_size);
1190        int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
1191                                u16 len, u32 timeout, u64 *result);
1192        int (*pci_bars_map)(struct hl_device *hdev);
1193        int (*init_iatu)(struct hl_device *hdev);
1194        u32 (*rreg)(struct hl_device *hdev, u32 reg);
1195        void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
1196        void (*halt_coresight)(struct hl_device *hdev);
1197        int (*ctx_init)(struct hl_ctx *ctx);
1198        void (*ctx_fini)(struct hl_ctx *ctx);
1199        int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
1200        u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
1201        int (*load_firmware_to_device)(struct hl_device *hdev);
1202        int (*load_boot_fit_to_device)(struct hl_device *hdev);
1203        u32 (*get_signal_cb_size)(struct hl_device *hdev);
1204        u32 (*get_wait_cb_size)(struct hl_device *hdev);
1205        u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
1206                        u32 size, bool eb);
1207        u32 (*gen_wait_cb)(struct hl_device *hdev,
1208                        struct hl_gen_wait_properties *prop);
1209        void (*reset_sob)(struct hl_device *hdev, void *data);
1210        void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
1211        void (*set_dma_mask_from_fw)(struct hl_device *hdev);
1212        u64 (*get_device_time)(struct hl_device *hdev);
1213        void (*collective_wait_init_cs)(struct hl_cs *cs);
1214        int (*collective_wait_create_jobs)(struct hl_device *hdev,
1215                        struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
1216                        u32 collective_engine_id);
1217        u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
1218        u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
1219        void (*ack_protection_bits_errors)(struct hl_device *hdev);
1220        int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr,
1221                                u32 *block_size, u32 *block_id);
1222        int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1223                        u32 block_id, u32 block_size);
1224        void (*enable_events_from_fw)(struct hl_device *hdev);
1225        void (*get_msi_info)(__le32 *table);
1226        int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
1227        void (*init_firmware_loader)(struct hl_device *hdev);
1228        void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
1229};
1230
1231
1232/*
1233 * CONTEXTS
1234 */
1235
1236#define HL_KERNEL_ASID_ID       0
1237
1238/**
1239 * enum hl_va_range_type - virtual address range type.
1240 * @HL_VA_RANGE_TYPE_HOST: range type of host pages
1241 * @HL_VA_RANGE_TYPE_HOST_HUGE: range type of host huge pages
1242 * @HL_VA_RANGE_TYPE_DRAM: range type of dram pages
1243 */
1244enum hl_va_range_type {
1245        HL_VA_RANGE_TYPE_HOST,
1246        HL_VA_RANGE_TYPE_HOST_HUGE,
1247        HL_VA_RANGE_TYPE_DRAM,
1248        HL_VA_RANGE_TYPE_MAX
1249};
1250
1251/**
1252 * struct hl_va_range - virtual addresses range.
1253 * @lock: protects the virtual addresses list.
1254 * @list: list of virtual addresses blocks available for mappings.
1255 * @start_addr: range start address.
1256 * @end_addr: range end address.
1257 * @page_size: page size of this va range.
1258 */
1259struct hl_va_range {
1260        struct mutex            lock;
1261        struct list_head        list;
1262        u64                     start_addr;
1263        u64                     end_addr;
1264        u32                     page_size;
1265};
1266
1267/**
1268 * struct hl_cs_counters_atomic - command submission counters
1269 * @out_of_mem_drop_cnt: dropped due to memory allocation issue
1270 * @parsing_drop_cnt: dropped due to error in packet parsing
1271 * @queue_full_drop_cnt: dropped due to queue full
1272 * @device_in_reset_drop_cnt: dropped due to device in reset
1273 * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
1274 * @validation_drop_cnt: dropped due to error in validation
1275 */
1276struct hl_cs_counters_atomic {
1277        atomic64_t out_of_mem_drop_cnt;
1278        atomic64_t parsing_drop_cnt;
1279        atomic64_t queue_full_drop_cnt;
1280        atomic64_t device_in_reset_drop_cnt;
1281        atomic64_t max_cs_in_flight_drop_cnt;
1282        atomic64_t validation_drop_cnt;
1283};
1284
1285/**
1286 * struct hl_pending_cb - pending command buffer structure
1287 * @cb_node: cb node in pending cb list
1288 * @cb: command buffer to send in next submission
1289 * @cb_size: command buffer size
1290 * @hw_queue_id: destination queue id
1291 */
1292struct hl_pending_cb {
1293        struct list_head        cb_node;
1294        struct hl_cb            *cb;
1295        u32                     cb_size;
1296        u32                     hw_queue_id;
1297};
1298
1299/**
1300 * struct hl_ctx - user/kernel context.
1301 * @mem_hash: holds mapping from virtual address to virtual memory area
1302 *              descriptor (hl_vm_phys_pg_list or hl_userptr).
1303 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
1304 * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
1305 * @hdev: pointer to the device structure.
1306 * @refcount: reference counter for the context. Context is released only when
1307 *              this hits 0l. It is incremented on CS and CS_WAIT.
1308 * @cs_pending: array of hl fence objects representing pending CS.
1309 * @va_range: holds available virtual addresses for host and dram mappings.
1310 * @mem_hash_lock: protects the mem_hash.
1311 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
1312 *            MMU hash or walking the PGT requires talking this lock.
1313 * @hw_block_list_lock: protects the HW block memory list.
1314 * @debugfs_list: node in debugfs list of contexts.
1315 * pending_cb_list: list of pending command buffers waiting to be sent upon
1316 *                  next user command submission context.
1317 * @hw_block_mem_list: list of HW block virtual mapped addresses.
1318 * @cs_counters: context command submission counters.
1319 * @cb_va_pool: device VA pool for command buffers which are mapped to the
1320 *              device's MMU.
1321 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
1322 *                      to user so user could inquire about CS. It is used as
1323 *                      index to cs_pending array.
1324 * @dram_default_hops: array that holds all hops addresses needed for default
1325 *                     DRAM mapping.
1326 * @pending_cb_lock: spinlock to protect pending cb list
1327 * @cs_lock: spinlock to protect cs_sequence.
1328 * @dram_phys_mem: amount of used physical DRAM memory by this context.
1329 * @thread_ctx_switch_token: token to prevent multiple threads of the same
1330 *                              context from running the context switch phase.
1331 *                              Only a single thread should run it.
1332 * @thread_pending_cb_token: token to prevent multiple threads from processing
1333 *                              the pending CB list. Only a single thread should
1334 *                              process the list since it is protected by a
1335 *                              spinlock and we don't want to halt the entire
1336 *                              command submission sequence.
1337 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
1338 *                              the context switch phase from moving to their
1339 *                              execution phase before the context switch phase
1340 *                              has finished.
1341 * @asid: context's unique address space ID in the device's MMU.
1342 * @handle: context's opaque handle for user
1343 */
1344struct hl_ctx {
1345        DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
1346        DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
1347        struct hl_fpriv                 *hpriv;
1348        struct hl_device                *hdev;
1349        struct kref                     refcount;
1350        struct hl_fence                 **cs_pending;
1351        struct hl_va_range              *va_range[HL_VA_RANGE_TYPE_MAX];
1352        struct mutex                    mem_hash_lock;
1353        struct mutex                    mmu_lock;
1354        struct mutex                    hw_block_list_lock;
1355        struct list_head                debugfs_list;
1356        struct list_head                pending_cb_list;
1357        struct list_head                hw_block_mem_list;
1358        struct hl_cs_counters_atomic    cs_counters;
1359        struct gen_pool                 *cb_va_pool;
1360        u64                             cs_sequence;
1361        u64                             *dram_default_hops;
1362        spinlock_t                      pending_cb_lock;
1363        spinlock_t                      cs_lock;
1364        atomic64_t                      dram_phys_mem;
1365        atomic_t                        thread_ctx_switch_token;
1366        atomic_t                        thread_pending_cb_token;
1367        u32                             thread_ctx_switch_wait_token;
1368        u32                             asid;
1369        u32                             handle;
1370};
1371
1372/**
1373 * struct hl_ctx_mgr - for handling multiple contexts.
1374 * @ctx_lock: protects ctx_handles.
1375 * @ctx_handles: idr to hold all ctx handles.
1376 */
1377struct hl_ctx_mgr {
1378        struct mutex            ctx_lock;
1379        struct idr              ctx_handles;
1380};
1381
1382
1383
1384/*
1385 * COMMAND SUBMISSIONS
1386 */
1387
1388/**
1389 * struct hl_userptr - memory mapping chunk information
1390 * @vm_type: type of the VM.
1391 * @job_node: linked-list node for hanging the object on the Job's list.
1392 * @pages: pointer to struct page array
1393 * @npages: size of @pages array
1394 * @sgt: pointer to the scatter-gather table that holds the pages.
1395 * @dir: for DMA unmapping, the direction must be supplied, so save it.
1396 * @debugfs_list: node in debugfs list of command submissions.
1397 * @addr: user-space virtual address of the start of the memory area.
1398 * @size: size of the memory area to pin & map.
1399 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
1400 */
1401struct hl_userptr {
1402        enum vm_type_t          vm_type; /* must be first */
1403        struct list_head        job_node;
1404        struct page             **pages;
1405        unsigned int            npages;
1406        struct sg_table         *sgt;
1407        enum dma_data_direction dir;
1408        struct list_head        debugfs_list;
1409        u64                     addr;
1410        u32                     size;
1411        u8                      dma_mapped;
1412};
1413
1414/**
1415 * struct hl_cs - command submission.
1416 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
1417 * @ctx: the context this CS belongs to.
1418 * @job_list: list of the CS's jobs in the various queues.
1419 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
1420 * @refcount: reference counter for usage of the CS.
1421 * @fence: pointer to the fence object of this CS.
1422 * @signal_fence: pointer to the fence object of the signal CS (used by wait
1423 *                CS only).
1424 * @finish_work: workqueue object to run when CS is completed by H/W.
1425 * @work_tdr: delayed work node for TDR.
1426 * @mirror_node : node in device mirror list of command submissions.
1427 * @staged_cs_node: node in the staged cs list.
1428 * @debugfs_list: node in debugfs list of command submissions.
1429 * @sequence: the sequence number of this CS.
1430 * @staged_sequence: the sequence of the staged submission this CS is part of,
1431 *                   relevant only if staged_cs is set.
1432 * @timeout_jiffies: cs timeout in jiffies.
1433 * @submission_time_jiffies: submission time of the cs
1434 * @type: CS_TYPE_*.
1435 * @submitted: true if CS was submitted to H/W.
1436 * @completed: true if CS was completed by device.
1437 * @timedout : true if CS was timedout.
1438 * @tdr_active: true if TDR was activated for this CS (to prevent
1439 *              double TDR activation).
1440 * @aborted: true if CS was aborted due to some device error.
1441 * @timestamp: true if a timestmap must be captured upon completion.
1442 * @staged_last: true if this is the last staged CS and needs completion.
1443 * @staged_first: true if this is the first staged CS and we need to receive
1444 *                timeout for this CS.
1445 * @staged_cs: true if this CS is part of a staged submission.
1446 * @skip_reset_on_timeout: true if we shall not reset the device in case
1447 *                         timeout occurs (debug scenario).
1448 */
1449struct hl_cs {
1450        u16                     *jobs_in_queue_cnt;
1451        struct hl_ctx           *ctx;
1452        struct list_head        job_list;
1453        spinlock_t              job_lock;
1454        struct kref             refcount;
1455        struct hl_fence         *fence;
1456        struct hl_fence         *signal_fence;
1457        struct work_struct      finish_work;
1458        struct delayed_work     work_tdr;
1459        struct list_head        mirror_node;
1460        struct list_head        staged_cs_node;
1461        struct list_head        debugfs_list;
1462        u64                     sequence;
1463        u64                     staged_sequence;
1464        u64                     timeout_jiffies;
1465        u64                     submission_time_jiffies;
1466        enum hl_cs_type         type;
1467        u8                      submitted;
1468        u8                      completed;
1469        u8                      timedout;
1470        u8                      tdr_active;
1471        u8                      aborted;
1472        u8                      timestamp;
1473        u8                      staged_last;
1474        u8                      staged_first;
1475        u8                      staged_cs;
1476        u8                      skip_reset_on_timeout;
1477};
1478
1479/**
1480 * struct hl_cs_job - command submission job.
1481 * @cs_node: the node to hang on the CS jobs list.
1482 * @cs: the CS this job belongs to.
1483 * @user_cb: the CB we got from the user.
1484 * @patched_cb: in case of patching, this is internal CB which is submitted on
1485 *              the queue instead of the CB we got from the IOCTL.
1486 * @finish_work: workqueue object to run when job is completed.
1487 * @userptr_list: linked-list of userptr mappings that belong to this job and
1488 *                      wait for completion.
1489 * @debugfs_list: node in debugfs list of command submission jobs.
1490 * @refcount: reference counter for usage of the CS job.
1491 * @queue_type: the type of the H/W queue this job is submitted to.
1492 * @id: the id of this job inside a CS.
1493 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1494 * @user_cb_size: the actual size of the CB we got from the user.
1495 * @job_cb_size: the actual size of the CB that we put on the queue.
1496 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1497 *                          handle to a kernel-allocated CB object, false
1498 *                          otherwise (SRAM/DRAM/host address).
1499 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1500 *                    info is needed later, when adding the 2xMSG_PROT at the
1501 *                    end of the JOB, to know which barriers to put in the
1502 *                    MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1503 *                    have streams so the engine can't be busy by another
1504 *                    stream.
1505 */
1506struct hl_cs_job {
1507        struct list_head        cs_node;
1508        struct hl_cs            *cs;
1509        struct hl_cb            *user_cb;
1510        struct hl_cb            *patched_cb;
1511        struct work_struct      finish_work;
1512        struct list_head        userptr_list;
1513        struct list_head        debugfs_list;
1514        struct kref             refcount;
1515        enum hl_queue_type      queue_type;
1516        u32                     id;
1517        u32                     hw_queue_id;
1518        u32                     user_cb_size;
1519        u32                     job_cb_size;
1520        u8                      is_kernel_allocated_cb;
1521        u8                      contains_dma_pkt;
1522};
1523
1524/**
1525 * struct hl_cs_parser - command submission parser properties.
1526 * @user_cb: the CB we got from the user.
1527 * @patched_cb: in case of patching, this is internal CB which is submitted on
1528 *              the queue instead of the CB we got from the IOCTL.
1529 * @job_userptr_list: linked-list of userptr mappings that belong to the related
1530 *                      job and wait for completion.
1531 * @cs_sequence: the sequence number of the related CS.
1532 * @queue_type: the type of the H/W queue this job is submitted to.
1533 * @ctx_id: the ID of the context the related CS belongs to.
1534 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1535 * @user_cb_size: the actual size of the CB we got from the user.
1536 * @patched_cb_size: the size of the CB after parsing.
1537 * @job_id: the id of the related job inside the related CS.
1538 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1539 *                          handle to a kernel-allocated CB object, false
1540 *                          otherwise (SRAM/DRAM/host address).
1541 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1542 *                    info is needed later, when adding the 2xMSG_PROT at the
1543 *                    end of the JOB, to know which barriers to put in the
1544 *                    MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1545 *                    have streams so the engine can't be busy by another
1546 *                    stream.
1547 * @completion: true if we need completion for this CS.
1548 */
1549struct hl_cs_parser {
1550        struct hl_cb            *user_cb;
1551        struct hl_cb            *patched_cb;
1552        struct list_head        *job_userptr_list;
1553        u64                     cs_sequence;
1554        enum hl_queue_type      queue_type;
1555        u32                     ctx_id;
1556        u32                     hw_queue_id;
1557        u32                     user_cb_size;
1558        u32                     patched_cb_size;
1559        u8                      job_id;
1560        u8                      is_kernel_allocated_cb;
1561        u8                      contains_dma_pkt;
1562        u8                      completion;
1563};
1564
1565/*
1566 * MEMORY STRUCTURE
1567 */
1568
1569/**
1570 * struct hl_vm_hash_node - hash element from virtual address to virtual
1571 *                              memory area descriptor (hl_vm_phys_pg_list or
1572 *                              hl_userptr).
1573 * @node: node to hang on the hash table in context object.
1574 * @vaddr: key virtual address.
1575 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
1576 */
1577struct hl_vm_hash_node {
1578        struct hlist_node       node;
1579        u64                     vaddr;
1580        void                    *ptr;
1581};
1582
1583/**
1584 * struct hl_vm_hw_block_list_node - list element from user virtual address to
1585 *                              HW block id.
1586 * @node: node to hang on the list in context object.
1587 * @ctx: the context this node belongs to.
1588 * @vaddr: virtual address of the HW block.
1589 * @size: size of the block.
1590 * @id: HW block id (handle).
1591 */
1592struct hl_vm_hw_block_list_node {
1593        struct list_head        node;
1594        struct hl_ctx           *ctx;
1595        unsigned long           vaddr;
1596        u32                     size;
1597        u32                     id;
1598};
1599
1600/**
1601 * struct hl_vm_phys_pg_pack - physical page pack.
1602 * @vm_type: describes the type of the virtual area descriptor.
1603 * @pages: the physical page array.
1604 * @npages: num physical pages in the pack.
1605 * @total_size: total size of all the pages in this list.
1606 * @mapping_cnt: number of shared mappings.
1607 * @asid: the context related to this list.
1608 * @page_size: size of each page in the pack.
1609 * @flags: HL_MEM_* flags related to this list.
1610 * @handle: the provided handle related to this list.
1611 * @offset: offset from the first page.
1612 * @contiguous: is contiguous physical memory.
1613 * @created_from_userptr: is product of host virtual address.
1614 */
1615struct hl_vm_phys_pg_pack {
1616        enum vm_type_t          vm_type; /* must be first */
1617        u64                     *pages;
1618        u64                     npages;
1619        u64                     total_size;
1620        atomic_t                mapping_cnt;
1621        u32                     asid;
1622        u32                     page_size;
1623        u32                     flags;
1624        u32                     handle;
1625        u32                     offset;
1626        u8                      contiguous;
1627        u8                      created_from_userptr;
1628};
1629
1630/**
1631 * struct hl_vm_va_block - virtual range block information.
1632 * @node: node to hang on the virtual range list in context object.
1633 * @start: virtual range start address.
1634 * @end: virtual range end address.
1635 * @size: virtual range size.
1636 */
1637struct hl_vm_va_block {
1638        struct list_head        node;
1639        u64                     start;
1640        u64                     end;
1641        u64                     size;
1642};
1643
1644/**
1645 * struct hl_vm - virtual memory manager for MMU.
1646 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
1647 * @dram_pg_pool_refcount: reference counter for the pool usage.
1648 * @idr_lock: protects the phys_pg_list_handles.
1649 * @phys_pg_pack_handles: idr to hold all device allocations handles.
1650 * @init_done: whether initialization was done. We need this because VM
1651 *              initialization might be skipped during device initialization.
1652 */
1653struct hl_vm {
1654        struct gen_pool         *dram_pg_pool;
1655        struct kref             dram_pg_pool_refcount;
1656        spinlock_t              idr_lock;
1657        struct idr              phys_pg_pack_handles;
1658        u8                      init_done;
1659};
1660
1661
1662/*
1663 * DEBUG, PROFILING STRUCTURE
1664 */
1665
1666/**
1667 * struct hl_debug_params - Coresight debug parameters.
1668 * @input: pointer to component specific input parameters.
1669 * @output: pointer to component specific output parameters.
1670 * @output_size: size of output buffer.
1671 * @reg_idx: relevant register ID.
1672 * @op: component operation to execute.
1673 * @enable: true if to enable component debugging, false otherwise.
1674 */
1675struct hl_debug_params {
1676        void *input;
1677        void *output;
1678        u32 output_size;
1679        u32 reg_idx;
1680        u32 op;
1681        bool enable;
1682};
1683
1684/*
1685 * FILE PRIVATE STRUCTURE
1686 */
1687
1688/**
1689 * struct hl_fpriv - process information stored in FD private data.
1690 * @hdev: habanalabs device structure.
1691 * @filp: pointer to the given file structure.
1692 * @taskpid: current process ID.
1693 * @ctx: current executing context. TODO: remove for multiple ctx per process
1694 * @ctx_mgr: context manager to handle multiple context for this FD.
1695 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
1696 * @debugfs_list: list of relevant ASIC debugfs.
1697 * @dev_node: node in the device list of file private data
1698 * @refcount: number of related contexts.
1699 * @restore_phase_mutex: lock for context switch and restore phase.
1700 * @is_control: true for control device, false otherwise
1701 */
1702struct hl_fpriv {
1703        struct hl_device        *hdev;
1704        struct file             *filp;
1705        struct pid              *taskpid;
1706        struct hl_ctx           *ctx;
1707        struct hl_ctx_mgr       ctx_mgr;
1708        struct hl_cb_mgr        cb_mgr;
1709        struct list_head        debugfs_list;
1710        struct list_head        dev_node;
1711        struct kref             refcount;
1712        struct mutex            restore_phase_mutex;
1713        u8                      is_control;
1714};
1715
1716
1717/*
1718 * DebugFS
1719 */
1720
1721/**
1722 * struct hl_info_list - debugfs file ops.
1723 * @name: file name.
1724 * @show: function to output information.
1725 * @write: function to write to the file.
1726 */
1727struct hl_info_list {
1728        const char      *name;
1729        int             (*show)(struct seq_file *s, void *data);
1730        ssize_t         (*write)(struct file *file, const char __user *buf,
1731                                size_t count, loff_t *f_pos);
1732};
1733
1734/**
1735 * struct hl_debugfs_entry - debugfs dentry wrapper.
1736 * @info_ent: dentry realted ops.
1737 * @dev_entry: ASIC specific debugfs manager.
1738 */
1739struct hl_debugfs_entry {
1740        const struct hl_info_list       *info_ent;
1741        struct hl_dbg_device_entry      *dev_entry;
1742};
1743
1744/**
1745 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
1746 * @root: root dentry.
1747 * @hdev: habanalabs device structure.
1748 * @entry_arr: array of available hl_debugfs_entry.
1749 * @file_list: list of available debugfs files.
1750 * @file_mutex: protects file_list.
1751 * @cb_list: list of available CBs.
1752 * @cb_spinlock: protects cb_list.
1753 * @cs_list: list of available CSs.
1754 * @cs_spinlock: protects cs_list.
1755 * @cs_job_list: list of available CB jobs.
1756 * @cs_job_spinlock: protects cs_job_list.
1757 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
1758 * @userptr_spinlock: protects userptr_list.
1759 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
1760 * @ctx_mem_hash_spinlock: protects cb_list.
1761 * @blob_desc: descriptor of blob
1762 * @addr: next address to read/write from/to in read/write32.
1763 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
1764 * @mmu_asid: ASID to use while translating in mmu_show.
1765 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
1766 * @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
1767 * @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
1768 */
1769struct hl_dbg_device_entry {
1770        struct dentry                   *root;
1771        struct hl_device                *hdev;
1772        struct hl_debugfs_entry         *entry_arr;
1773        struct list_head                file_list;
1774        struct mutex                    file_mutex;
1775        struct list_head                cb_list;
1776        spinlock_t                      cb_spinlock;
1777        struct list_head                cs_list;
1778        spinlock_t                      cs_spinlock;
1779        struct list_head                cs_job_list;
1780        spinlock_t                      cs_job_spinlock;
1781        struct list_head                userptr_list;
1782        spinlock_t                      userptr_spinlock;
1783        struct list_head                ctx_mem_hash_list;
1784        spinlock_t                      ctx_mem_hash_spinlock;
1785        struct debugfs_blob_wrapper     blob_desc;
1786        u64                             addr;
1787        u64                             mmu_addr;
1788        u32                             mmu_asid;
1789        u8                              i2c_bus;
1790        u8                              i2c_addr;
1791        u8                              i2c_reg;
1792};
1793
1794
1795/*
1796 * DEVICES
1797 */
1798
1799#define HL_STR_MAX      32
1800
1801#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1)
1802
1803/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
1804 * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
1805 */
1806#define HL_MAX_MINORS   256
1807
1808/*
1809 * Registers read & write functions.
1810 */
1811
1812u32 hl_rreg(struct hl_device *hdev, u32 reg);
1813void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1814
1815#define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
1816#define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
1817#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n",    \
1818                        hdev->asic_funcs->rreg(hdev, (reg)))
1819
1820#define WREG32_P(reg, val, mask)                                \
1821        do {                                                    \
1822                u32 tmp_ = RREG32(reg);                         \
1823                tmp_ &= (mask);                                 \
1824                tmp_ |= ((val) & ~(mask));                      \
1825                WREG32(reg, tmp_);                              \
1826        } while (0)
1827#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1828#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1829
1830#define RMWREG32(reg, val, mask)                                \
1831        do {                                                    \
1832                u32 tmp_ = RREG32(reg);                         \
1833                tmp_ &= ~(mask);                                \
1834                tmp_ |= ((val) << __ffs(mask));                 \
1835                WREG32(reg, tmp_);                              \
1836        } while (0)
1837
1838#define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
1839
1840#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
1841#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
1842#define WREG32_FIELD(reg, offset, field, val)   \
1843        WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
1844                                ~REG_FIELD_MASK(reg, field)) | \
1845                                (val) << REG_FIELD_SHIFT(reg, field))
1846
1847/* Timeout should be longer when working with simulator but cap the
1848 * increased timeout to some maximum
1849 */
1850#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
1851({ \
1852        ktime_t __timeout; \
1853        if (hdev->pdev) \
1854                __timeout = ktime_add_us(ktime_get(), timeout_us); \
1855        else \
1856                __timeout = ktime_add_us(ktime_get(),\
1857                                min((u64)(timeout_us * 10), \
1858                                        (u64) HL_SIM_MAX_TIMEOUT_US)); \
1859        might_sleep_if(sleep_us); \
1860        for (;;) { \
1861                (val) = RREG32(addr); \
1862                if (cond) \
1863                        break; \
1864                if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1865                        (val) = RREG32(addr); \
1866                        break; \
1867                } \
1868                if (sleep_us) \
1869                        usleep_range((sleep_us >> 2) + 1, sleep_us); \
1870        } \
1871        (cond) ? 0 : -ETIMEDOUT; \
1872})
1873
1874/*
1875 * address in this macro points always to a memory location in the
1876 * host's (server's) memory. That location is updated asynchronously
1877 * either by the direct access of the device or by another core.
1878 *
1879 * To work both in LE and BE architectures, we need to distinguish between the
1880 * two states (device or another core updates the memory location). Therefore,
1881 * if mem_written_by_device is true, the host memory being polled will be
1882 * updated directly by the device. If false, the host memory being polled will
1883 * be updated by host CPU. Required so host knows whether or not the memory
1884 * might need to be byte-swapped before returning value to caller.
1885 */
1886#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
1887                                mem_written_by_device) \
1888({ \
1889        ktime_t __timeout; \
1890        if (hdev->pdev) \
1891                __timeout = ktime_add_us(ktime_get(), timeout_us); \
1892        else \
1893                __timeout = ktime_add_us(ktime_get(),\
1894                                min((u64)(timeout_us * 10), \
1895                                        (u64) HL_SIM_MAX_TIMEOUT_US)); \
1896        might_sleep_if(sleep_us); \
1897        for (;;) { \
1898                /* Verify we read updates done by other cores or by device */ \
1899                mb(); \
1900                (val) = *((u32 *)(addr)); \
1901                if (mem_written_by_device) \
1902                        (val) = le32_to_cpu(*(__le32 *) &(val)); \
1903                if (cond) \
1904                        break; \
1905                if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1906                        (val) = *((u32 *)(addr)); \
1907                        if (mem_written_by_device) \
1908                                (val) = le32_to_cpu(*(__le32 *) &(val)); \
1909                        break; \
1910                } \
1911                if (sleep_us) \
1912                        usleep_range((sleep_us >> 2) + 1, sleep_us); \
1913        } \
1914        (cond) ? 0 : -ETIMEDOUT; \
1915})
1916
1917#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
1918                                        timeout_us) \
1919({ \
1920        ktime_t __timeout; \
1921        if (hdev->pdev) \
1922                __timeout = ktime_add_us(ktime_get(), timeout_us); \
1923        else \
1924                __timeout = ktime_add_us(ktime_get(),\
1925                                min((u64)(timeout_us * 10), \
1926                                        (u64) HL_SIM_MAX_TIMEOUT_US)); \
1927        might_sleep_if(sleep_us); \
1928        for (;;) { \
1929                (val) = readl(addr); \
1930                if (cond) \
1931                        break; \
1932                if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1933                        (val) = readl(addr); \
1934                        break; \
1935                } \
1936                if (sleep_us) \
1937                        usleep_range((sleep_us >> 2) + 1, sleep_us); \
1938        } \
1939        (cond) ? 0 : -ETIMEDOUT; \
1940})
1941
1942struct hwmon_chip_info;
1943
1944/**
1945 * struct hl_device_reset_work - reset workqueue task wrapper.
1946 * @wq: work queue for device reset procedure.
1947 * @reset_work: reset work to be done.
1948 * @hdev: habanalabs device structure.
1949 */
1950struct hl_device_reset_work {
1951        struct workqueue_struct         *wq;
1952        struct delayed_work             reset_work;
1953        struct hl_device                *hdev;
1954};
1955
1956/**
1957 * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
1958 * information.
1959 * @virt_addr: the virtual address of the hop.
1960 * @phys-addr: the physical address of the hop (used by the device-mmu).
1961 * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
1962 */
1963struct hr_mmu_hop_addrs {
1964        u64 virt_addr;
1965        u64 phys_addr;
1966        u64 shadow_addr;
1967};
1968
1969/**
1970 * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
1971 * page-table internal information.
1972 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
1973 * @mmu_shadow_hop0: shadow array of hop0 tables.
1974 */
1975struct hl_mmu_hr_priv {
1976        struct gen_pool *mmu_pgt_pool;
1977        struct hr_mmu_hop_addrs *mmu_shadow_hop0;
1978};
1979
1980/**
1981 * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident
1982 * page-table internal information.
1983 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
1984 * @mmu_shadow_hop0: shadow array of hop0 tables.
1985 */
1986struct hl_mmu_dr_priv {
1987        struct gen_pool *mmu_pgt_pool;
1988        void *mmu_shadow_hop0;
1989};
1990
1991/**
1992 * struct hl_mmu_priv - used for holding per-device mmu internal information.
1993 * @dr: information on the device-resident MMU, when exists.
1994 * @hr: information on the host-resident MMU, when exists.
1995 */
1996struct hl_mmu_priv {
1997        struct hl_mmu_dr_priv dr;
1998        struct hl_mmu_hr_priv hr;
1999};
2000
2001/**
2002 * struct hl_mmu_per_hop_info - A structure describing one TLB HOP and its entry
2003 *                that was created in order to translate a virtual address to a
2004 *                physical one.
2005 * @hop_addr: The address of the hop.
2006 * @hop_pte_addr: The address of the hop entry.
2007 * @hop_pte_val: The value in the hop entry.
2008 */
2009struct hl_mmu_per_hop_info {
2010        u64 hop_addr;
2011        u64 hop_pte_addr;
2012        u64 hop_pte_val;
2013};
2014
2015/**
2016 * struct hl_mmu_hop_info - A structure describing the TLB hops and their
2017 * hop-entries that were created in order to translate a virtual address to a
2018 * physical one.
2019 * @scrambled_vaddr: The value of the virtual address after scrambling. This
2020 *                   address replaces the original virtual-address when mapped
2021 *                   in the MMU tables.
2022 * @unscrambled_paddr: The un-scrambled physical address.
2023 * @hop_info: Array holding the per-hop information used for the translation.
2024 * @used_hops: The number of hops used for the translation.
2025 * @range_type: virtual address range type.
2026 */
2027struct hl_mmu_hop_info {
2028        u64 scrambled_vaddr;
2029        u64 unscrambled_paddr;
2030        struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
2031        u32 used_hops;
2032        enum hl_va_range_type range_type;
2033};
2034
2035/**
2036 * struct hl_mmu_funcs - Device related MMU functions.
2037 * @init: initialize the MMU module.
2038 * @fini: release the MMU module.
2039 * @ctx_init: Initialize a context for using the MMU module.
2040 * @ctx_fini: disable a ctx from using the mmu module.
2041 * @map: maps a virtual address to physical address for a context.
2042 * @unmap: unmap a virtual address of a context.
2043 * @flush: flush all writes from all cores to reach device MMU.
2044 * @swap_out: marks all mapping of the given context as swapped out.
2045 * @swap_in: marks all mapping of the given context as swapped in.
2046 * @get_tlb_info: returns the list of hops and hop-entries used that were
2047 *                created in order to translate the giver virtual address to a
2048 *                physical one.
2049 */
2050struct hl_mmu_funcs {
2051        int (*init)(struct hl_device *hdev);
2052        void (*fini)(struct hl_device *hdev);
2053        int (*ctx_init)(struct hl_ctx *ctx);
2054        void (*ctx_fini)(struct hl_ctx *ctx);
2055        int (*map)(struct hl_ctx *ctx,
2056                        u64 virt_addr, u64 phys_addr, u32 page_size,
2057                        bool is_dram_addr);
2058        int (*unmap)(struct hl_ctx *ctx,
2059                        u64 virt_addr, bool is_dram_addr);
2060        void (*flush)(struct hl_ctx *ctx);
2061        void (*swap_out)(struct hl_ctx *ctx);
2062        void (*swap_in)(struct hl_ctx *ctx);
2063        int (*get_tlb_info)(struct hl_ctx *ctx,
2064                        u64 virt_addr, struct hl_mmu_hop_info *hops);
2065};
2066
2067/**
2068 * struct hl_device - habanalabs device structure.
2069 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
2070 * @pcie_bar_phys: array of available PCIe bars physical addresses.
2071 *                 (required only for PCI address match mode)
2072 * @pcie_bar: array of available PCIe bars virtual addresses.
2073 * @rmmio: configuration area address on SRAM.
2074 * @cdev: related char device.
2075 * @cdev_ctrl: char device for control operations only (INFO IOCTL)
2076 * @dev: related kernel basic device structure.
2077 * @dev_ctrl: related kernel device structure for the control device
2078 * @work_freq: delayed work to lower device frequency if possible.
2079 * @work_heartbeat: delayed work for CPU-CP is-alive check.
2080 * @device_reset_work: delayed work which performs hard reset
2081 * @asic_name: ASIC specific name.
2082 * @asic_type: ASIC specific type.
2083 * @completion_queue: array of hl_cq.
2084 * @user_interrupt: array of hl_user_interrupt. upon the corresponding user
2085 *                  interrupt, driver will monitor the list of fences
2086 *                  registered to this interrupt.
2087 * @common_user_interrupt: common user interrupt for all user interrupts.
2088 *                         upon any user interrupt, driver will monitor the
2089 *                         list of fences registered to this common structure.
2090 * @cq_wq: work queues of completion queues for executing work in process
2091 *         context.
2092 * @eq_wq: work queue of event queue for executing work in process context.
2093 * @sob_reset_wq: work queue for sob reset executions.
2094 * @kernel_ctx: Kernel driver context structure.
2095 * @kernel_queues: array of hl_hw_queue.
2096 * @cs_mirror_list: CS mirror list for TDR.
2097 * @cs_mirror_lock: protects cs_mirror_list.
2098 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs.
2099 * @event_queue: event queue for IRQ from CPU-CP.
2100 * @dma_pool: DMA pool for small allocations.
2101 * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
2102 * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address.
2103 * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool.
2104 * @asid_bitmap: holds used/available ASIDs.
2105 * @asid_mutex: protects asid_bitmap.
2106 * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
2107 * @debug_lock: protects critical section of setting debug mode for device
2108 * @asic_prop: ASIC specific immutable properties.
2109 * @asic_funcs: ASIC specific functions.
2110 * @asic_specific: ASIC specific information to use only from ASIC files.
2111 * @vm: virtual memory manager for MMU.
2112 * @hwmon_dev: H/W monitor device.
2113 * @pm_mng_profile: current power management profile.
2114 * @hl_chip_info: ASIC's sensors information.
2115 * @device_status_description: device status description.
2116 * @hl_debugfs: device's debugfs manager.
2117 * @cb_pool: list of preallocated CBs.
2118 * @cb_pool_lock: protects the CB pool.
2119 * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
2120 * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
2121 * @internal_cb_pool: internal command buffer memory pool.
2122 * @internal_cb_va_base: internal cb pool mmu virtual address base
2123 * @fpriv_list: list of file private data structures. Each structure is created
2124 *              when a user opens the device
2125 * @fpriv_list_lock: protects the fpriv_list
2126 * @compute_ctx: current compute context executing.
2127 * @aggregated_cs_counters: aggregated cs counters among all contexts
2128 * @mmu_priv: device-specific MMU data.
2129 * @mmu_func: device-related MMU functions.
2130 * @fw_loader: FW loader manager.
2131 * @pci_mem_region: array of memory regions in the PCI
2132 * @dram_used_mem: current DRAM memory consumption.
2133 * @timeout_jiffies: device CS timeout value.
2134 * @max_power: the max power of the device, as configured by the sysadmin. This
2135 *             value is saved so in case of hard-reset, the driver will restore
2136 *             this value and update the F/W after the re-initialization
2137 * @clock_gating_mask: is clock gating enabled. bitmask that represents the
2138 *                     different engines. See debugfs-driver-habanalabs for
2139 *                     details.
2140 * @boot_error_status_mask: contains a mask of the device boot error status.
2141 *                          Each bit represents a different error, according to
2142 *                          the defines in hl_boot_if.h. If the bit is cleared,
2143 *                          the error will be ignored by the driver during
2144 *                          device initialization. Mainly used to debug and
2145 *                          workaround firmware bugs
2146 * @last_successful_open_jif: timestamp (jiffies) of the last successful
2147 *                            device open.
2148 * @last_open_session_duration_jif: duration (jiffies) of the last device open
2149 *                                  session.
2150 * @open_counter: number of successful device open operations.
2151 * @in_reset: is device in reset flow.
2152 * @curr_pll_profile: current PLL profile.
2153 * @card_type: Various ASICs have several card types. This indicates the card
2154 *             type of the current device.
2155 * @major: habanalabs kernel driver major.
2156 * @high_pll: high PLL profile frequency.
2157 * @soft_reset_cnt: number of soft reset since the driver was loaded.
2158 * @hard_reset_cnt: number of hard reset since the driver was loaded.
2159 * @clk_throttling_reason: bitmask represents the current clk throttling reasons
2160 * @id: device minor.
2161 * @id_control: minor of the control device
2162 * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
2163 *                    addresses.
2164 * @disabled: is device disabled.
2165 * @late_init_done: is late init stage was done during initialization.
2166 * @hwmon_initialized: is H/W monitor sensors was initialized.
2167 * @hard_reset_pending: is there a hard reset work pending.
2168 * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
2169 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
2170 *                   otherwise.
2171 * @dram_default_page_mapping: is DRAM default page mapping enabled.
2172 * @memory_scrub: true to perform device memory scrub in various locations,
2173 *                such as context-switch, context close, page free, etc.
2174 * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
2175 *                   huge pages.
2176 * @init_done: is the initialization of the device done.
2177 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
2178 * @dma_mask: the dma mask that was set for this device
2179 * @in_debug: is device under debug. This, together with fpriv_list, enforces
2180 *            that only a single user is configuring the debug infrastructure.
2181 * @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant
2182 *                           only to POWER9 machines.
2183 * @cdev_sysfs_created: were char devices and sysfs nodes created.
2184 * @stop_on_err: true if engines should stop on error.
2185 * @supports_sync_stream: is sync stream supported.
2186 * @sync_stream_queue_idx: helper index for sync stream queues initialization.
2187 * @collective_mon_idx: helper index for collective initialization
2188 * @supports_coresight: is CoreSight supported.
2189 * @supports_soft_reset: is soft reset supported.
2190 * @allow_external_soft_reset: true if soft reset initiated by user or TDR is
2191 *                             allowed.
2192 * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
2193 * @needs_reset: true if reset_on_lockup is false and device should be reset
2194 *               due to lockup.
2195 * @process_kill_trial_cnt: number of trials reset thread tried killing
2196 *                          user processes
2197 * @device_fini_pending: true if device_fini was called and might be
2198 *                       waiting for the reset thread to finish
2199 * @supports_staged_submission: true if staged submissions are supported
2200 * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
2201 *                    triggered, and cleared after it is shared with preboot.
2202 * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
2203 *                         complete instead.
2204 * @device_cpu_is_halted: Flag to indicate whether the device CPU was already
2205 *                        halted. We can't halt it again because the COMMS
2206 *                        protocol will throw an error. Relevant only for
2207 *                        cases where Linux was not loaded to device CPU
2208 */
2209struct hl_device {
2210        struct pci_dev                  *pdev;
2211        u64                             pcie_bar_phys[HL_PCI_NUM_BARS];
2212        void __iomem                    *pcie_bar[HL_PCI_NUM_BARS];
2213        void __iomem                    *rmmio;
2214        struct cdev                     cdev;
2215        struct cdev                     cdev_ctrl;
2216        struct device                   *dev;
2217        struct device                   *dev_ctrl;
2218        struct delayed_work             work_freq;
2219        struct delayed_work             work_heartbeat;
2220        struct hl_device_reset_work     device_reset_work;
2221        char                            asic_name[HL_STR_MAX];
2222        char                            status[HL_DEV_STS_MAX][HL_STR_MAX];
2223        enum hl_asic_type               asic_type;
2224        struct hl_cq                    *completion_queue;
2225        struct hl_user_interrupt        *user_interrupt;
2226        struct hl_user_interrupt        common_user_interrupt;
2227        struct workqueue_struct         **cq_wq;
2228        struct workqueue_struct         *eq_wq;
2229        struct workqueue_struct         *sob_reset_wq;
2230        struct hl_ctx                   *kernel_ctx;
2231        struct hl_hw_queue              *kernel_queues;
2232        struct list_head                cs_mirror_list;
2233        spinlock_t                      cs_mirror_lock;
2234        struct hl_cb_mgr                kernel_cb_mgr;
2235        struct hl_eq                    event_queue;
2236        struct dma_pool                 *dma_pool;
2237        void                            *cpu_accessible_dma_mem;
2238        dma_addr_t                      cpu_accessible_dma_address;
2239        struct gen_pool                 *cpu_accessible_dma_pool;
2240        unsigned long                   *asid_bitmap;
2241        struct mutex                    asid_mutex;
2242        struct mutex                    send_cpu_message_lock;
2243        struct mutex                    debug_lock;
2244        struct asic_fixed_properties    asic_prop;
2245        const struct hl_asic_funcs      *asic_funcs;
2246        void                            *asic_specific;
2247        struct hl_vm                    vm;
2248        struct device                   *hwmon_dev;
2249        enum hl_pm_mng_profile          pm_mng_profile;
2250        struct hwmon_chip_info          *hl_chip_info;
2251
2252        struct hl_dbg_device_entry      hl_debugfs;
2253
2254        struct list_head                cb_pool;
2255        spinlock_t                      cb_pool_lock;
2256
2257        void                            *internal_cb_pool_virt_addr;
2258        dma_addr_t                      internal_cb_pool_dma_addr;
2259        struct gen_pool                 *internal_cb_pool;
2260        u64                             internal_cb_va_base;
2261
2262        struct list_head                fpriv_list;
2263        struct mutex                    fpriv_list_lock;
2264
2265        struct hl_ctx                   *compute_ctx;
2266
2267        struct hl_cs_counters_atomic    aggregated_cs_counters;
2268
2269        struct hl_mmu_priv              mmu_priv;
2270        struct hl_mmu_funcs             mmu_func[MMU_NUM_PGT_LOCATIONS];
2271
2272        struct fw_load_mgr              fw_loader;
2273
2274        struct pci_mem_region           pci_mem_region[PCI_REGION_NUMBER];
2275
2276        atomic64_t                      dram_used_mem;
2277        u64                             timeout_jiffies;
2278        u64                             max_power;
2279        u64                             clock_gating_mask;
2280        u64                             boot_error_status_mask;
2281        u64                             last_successful_open_jif;
2282        u64                             last_open_session_duration_jif;
2283        u64                             open_counter;
2284        atomic_t                        in_reset;
2285        enum hl_pll_frequency           curr_pll_profile;
2286        enum cpucp_card_types           card_type;
2287        u32                             major;
2288        u32                             high_pll;
2289        u32                             soft_reset_cnt;
2290        u32                             hard_reset_cnt;
2291        u32                             clk_throttling_reason;
2292        u16                             id;
2293        u16                             id_control;
2294        u16                             cpu_pci_msb_addr;
2295        u8                              disabled;
2296        u8                              late_init_done;
2297        u8                              hwmon_initialized;
2298        u8                              hard_reset_pending;
2299        u8                              heartbeat;
2300        u8                              reset_on_lockup;
2301        u8                              dram_default_page_mapping;
2302        u8                              memory_scrub;
2303        u8                              pmmu_huge_range;
2304        u8                              init_done;
2305        u8                              device_cpu_disabled;
2306        u8                              dma_mask;
2307        u8                              in_debug;
2308        u8                              power9_64bit_dma_enable;
2309        u8                              cdev_sysfs_created;
2310        u8                              stop_on_err;
2311        u8                              supports_sync_stream;
2312        u8                              sync_stream_queue_idx;
2313        u8                              collective_mon_idx;
2314        u8                              supports_coresight;
2315        u8                              supports_soft_reset;
2316        u8                              allow_external_soft_reset;
2317        u8                              supports_cb_mapping;
2318        u8                              needs_reset;
2319        u8                              process_kill_trial_cnt;
2320        u8                              device_fini_pending;
2321        u8                              supports_staged_submission;
2322        u8                              curr_reset_cause;
2323        u8                              skip_reset_on_timeout;
2324        u8                              device_cpu_is_halted;
2325
2326        /* Parameters for bring-up */
2327        u64                             nic_ports_mask;
2328        u64                             fw_components;
2329        u8                              mmu_enable;
2330        u8                              mmu_huge_page_opt;
2331        u8                              reset_pcilink;
2332        u8                              cpu_queues_enable;
2333        u8                              pldm;
2334        u8                              axi_drain;
2335        u8                              sram_scrambler_enable;
2336        u8                              dram_scrambler_enable;
2337        u8                              hard_reset_on_fw_events;
2338        u8                              bmc_enable;
2339        u8                              rl_enable;
2340        u8                              reset_on_preboot_fail;
2341        u8                              reset_upon_device_release;
2342        u8                              reset_if_device_not_idle;
2343};
2344
2345
2346/*
2347 * IOCTLs
2348 */
2349
2350/**
2351 * typedef hl_ioctl_t - typedef for ioctl function in the driver
2352 * @hpriv: pointer to the FD's private data, which contains state of
2353 *              user process
2354 * @data: pointer to the input/output arguments structure of the IOCTL
2355 *
2356 * Return: 0 for success, negative value for error
2357 */
2358typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
2359
2360/**
2361 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
2362 * @cmd: the IOCTL code as created by the kernel macros.
2363 * @func: pointer to the driver's function that should be called for this IOCTL.
2364 */
2365struct hl_ioctl_desc {
2366        unsigned int cmd;
2367        hl_ioctl_t *func;
2368};
2369
2370
2371/*
2372 * Kernel module functions that can be accessed by entire module
2373 */
2374
2375/**
2376 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
2377 * @address: The start address of the area we want to validate.
2378 * @size: The size in bytes of the area we want to validate.
2379 * @range_start_address: The start address of the valid range.
2380 * @range_end_address: The end address of the valid range.
2381 *
2382 * Return: true if the area is inside the valid range, false otherwise.
2383 */
2384static inline bool hl_mem_area_inside_range(u64 address, u64 size,
2385                                u64 range_start_address, u64 range_end_address)
2386{
2387        u64 end_address = address + size;
2388
2389        if ((address >= range_start_address) &&
2390                        (end_address <= range_end_address) &&
2391                        (end_address > address))
2392                return true;
2393
2394        return false;
2395}
2396
2397/**
2398 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
2399 * @address: The start address of the area we want to validate.
2400 * @size: The size in bytes of the area we want to validate.
2401 * @range_start_address: The start address of the valid range.
2402 * @range_end_address: The end address of the valid range.
2403 *
2404 * Return: true if the area overlaps part or all of the valid range,
2405 *              false otherwise.
2406 */
2407static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
2408                                u64 range_start_address, u64 range_end_address)
2409{
2410        u64 end_address = address + size;
2411
2412        if ((address >= range_start_address) &&
2413                        (address < range_end_address))
2414                return true;
2415
2416        if ((end_address >= range_start_address) &&
2417                        (end_address < range_end_address))
2418                return true;
2419
2420        if ((address < range_start_address) &&
2421                        (end_address >= range_end_address))
2422                return true;
2423
2424        return false;
2425}
2426
2427int hl_device_open(struct inode *inode, struct file *filp);
2428int hl_device_open_ctrl(struct inode *inode, struct file *filp);
2429bool hl_device_operational(struct hl_device *hdev,
2430                enum hl_device_status *status);
2431enum hl_device_status hl_device_status(struct hl_device *hdev);
2432int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
2433int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
2434                enum hl_asic_type asic_type, int minor);
2435void destroy_hdev(struct hl_device *hdev);
2436int hl_hw_queues_create(struct hl_device *hdev);
2437void hl_hw_queues_destroy(struct hl_device *hdev);
2438int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
2439                                u32 cb_size, u64 cb_ptr);
2440int hl_hw_queue_schedule_cs(struct hl_cs *cs);
2441u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
2442void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
2443void hl_hw_queue_update_ci(struct hl_cs *cs);
2444void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
2445
2446#define hl_queue_inc_ptr(p)             hl_hw_queue_add_ptr(p, 1)
2447#define hl_pi_2_offset(pi)              ((pi) & (HL_QUEUE_LENGTH - 1))
2448
2449int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
2450void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
2451int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
2452void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
2453void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
2454void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
2455irqreturn_t hl_irq_handler_cq(int irq, void *arg);
2456irqreturn_t hl_irq_handler_eq(int irq, void *arg);
2457irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
2458irqreturn_t hl_irq_handler_default(int irq, void *arg);
2459u32 hl_cq_inc_ptr(u32 ptr);
2460
2461int hl_asid_init(struct hl_device *hdev);
2462void hl_asid_fini(struct hl_device *hdev);
2463unsigned long hl_asid_alloc(struct hl_device *hdev);
2464void hl_asid_free(struct hl_device *hdev, unsigned long asid);
2465
2466int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
2467void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
2468int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
2469void hl_ctx_do_release(struct kref *ref);
2470void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
2471int hl_ctx_put(struct hl_ctx *ctx);
2472struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
2473void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
2474void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
2475
2476int hl_device_init(struct hl_device *hdev, struct class *hclass);
2477void hl_device_fini(struct hl_device *hdev);
2478int hl_device_suspend(struct hl_device *hdev);
2479int hl_device_resume(struct hl_device *hdev);
2480int hl_device_reset(struct hl_device *hdev, u32 flags);
2481void hl_hpriv_get(struct hl_fpriv *hpriv);
2482int hl_hpriv_put(struct hl_fpriv *hpriv);
2483int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
2484int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
2485
2486int hl_build_hwmon_channel_info(struct hl_device *hdev,
2487                struct cpucp_sensor *sensors_arr);
2488
2489int hl_sysfs_init(struct hl_device *hdev);
2490void hl_sysfs_fini(struct hl_device *hdev);
2491
2492int hl_hwmon_init(struct hl_device *hdev);
2493void hl_hwmon_fini(struct hl_device *hdev);
2494
2495int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
2496                        struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
2497                        bool map_cb, u64 *handle);
2498int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
2499int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
2500int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
2501struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
2502                        u32 handle);
2503void hl_cb_put(struct hl_cb *cb);
2504void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
2505void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
2506struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
2507                                        bool internal_cb);
2508int hl_cb_pool_init(struct hl_device *hdev);
2509int hl_cb_pool_fini(struct hl_device *hdev);
2510int hl_cb_va_pool_init(struct hl_ctx *ctx);
2511void hl_cb_va_pool_fini(struct hl_ctx *ctx);
2512
2513void hl_cs_rollback_all(struct hl_device *hdev);
2514void hl_pending_cb_list_flush(struct hl_ctx *ctx);
2515struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
2516                enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
2517void hl_sob_reset_error(struct kref *ref);
2518int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
2519void hl_fence_put(struct hl_fence *fence);
2520void hl_fence_get(struct hl_fence *fence);
2521void cs_get(struct hl_cs *cs);
2522bool cs_needs_completion(struct hl_cs *cs);
2523bool cs_needs_timeout(struct hl_cs *cs);
2524bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
2525struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
2526
2527void goya_set_asic_funcs(struct hl_device *hdev);
2528void gaudi_set_asic_funcs(struct hl_device *hdev);
2529
2530int hl_vm_ctx_init(struct hl_ctx *ctx);
2531void hl_vm_ctx_fini(struct hl_ctx *ctx);
2532
2533int hl_vm_init(struct hl_device *hdev);
2534void hl_vm_fini(struct hl_device *hdev);
2535
2536void hl_hw_block_mem_init(struct hl_ctx *ctx);
2537void hl_hw_block_mem_fini(struct hl_ctx *ctx);
2538
2539u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
2540                enum hl_va_range_type type, u32 size, u32 alignment);
2541int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
2542                u64 start_addr, u64 size);
2543int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2544                        struct hl_userptr *userptr);
2545void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
2546void hl_userptr_delete_list(struct hl_device *hdev,
2547                                struct list_head *userptr_list);
2548bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
2549                                struct list_head *userptr_list,
2550                                struct hl_userptr **userptr);
2551
2552int hl_mmu_init(struct hl_device *hdev);
2553void hl_mmu_fini(struct hl_device *hdev);
2554int hl_mmu_ctx_init(struct hl_ctx *ctx);
2555void hl_mmu_ctx_fini(struct hl_ctx *ctx);
2556int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
2557                u32 page_size, bool flush_pte);
2558int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
2559                bool flush_pte);
2560int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
2561                                        u64 phys_addr, u32 size);
2562int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
2563void hl_mmu_swap_out(struct hl_ctx *ctx);
2564void hl_mmu_swap_in(struct hl_ctx *ctx);
2565int hl_mmu_if_set_funcs(struct hl_device *hdev);
2566void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
2567int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
2568int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
2569                        struct hl_mmu_hop_info *hops);
2570u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
2571u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
2572bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
2573
2574int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
2575                                void __iomem *dst, u32 src_offset, u32 size);
2576int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
2577int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
2578                                u16 len, u32 timeout, u64 *result);
2579int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
2580int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
2581                size_t irq_arr_size);
2582int hl_fw_test_cpu_queue(struct hl_device *hdev);
2583void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
2584                                                dma_addr_t *dma_handle);
2585void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
2586                                        void *vaddr);
2587int hl_fw_send_heartbeat(struct hl_device *hdev);
2588int hl_fw_cpucp_info_get(struct hl_device *hdev,
2589                                u32 sts_boot_dev_sts0_reg,
2590                                u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
2591                                u32 boot_err1_reg);
2592int hl_fw_cpucp_handshake(struct hl_device *hdev,
2593                                u32 sts_boot_dev_sts0_reg,
2594                                u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
2595                                u32 boot_err1_reg);
2596int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
2597int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
2598                struct hl_info_pci_counters *counters);
2599int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
2600                        u64 *total_energy);
2601int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
2602                                                enum pll_index *pll_index);
2603int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
2604                u16 *pll_freq_arr);
2605int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
2606void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
2607void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
2608int hl_fw_init_cpu(struct hl_device *hdev);
2609int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
2610                                u32 sts_boot_dev_sts0_reg,
2611                                u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
2612                                u32 boot_err1_reg, u32 timeout);
2613int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
2614                                struct fw_load_mgr *fw_loader,
2615                                enum comms_cmd cmd, unsigned int size,
2616                                bool wait_ok, u32 timeout);
2617int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
2618                        bool is_wc[3]);
2619int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
2620int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
2621int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
2622                struct hl_inbound_pci_region *pci_region);
2623int hl_pci_set_outbound_region(struct hl_device *hdev,
2624                struct hl_outbound_pci_region *pci_region);
2625enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr);
2626int hl_pci_init(struct hl_device *hdev);
2627void hl_pci_fini(struct hl_device *hdev);
2628
2629long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
2630                                                                bool curr);
2631void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
2632                                                                u64 freq);
2633int hl_get_temperature(struct hl_device *hdev,
2634                       int sensor_index, u32 attr, long *value);
2635int hl_set_temperature(struct hl_device *hdev,
2636                       int sensor_index, u32 attr, long value);
2637int hl_get_voltage(struct hl_device *hdev,
2638                   int sensor_index, u32 attr, long *value);
2639int hl_get_current(struct hl_device *hdev,
2640                   int sensor_index, u32 attr, long *value);
2641int hl_get_fan_speed(struct hl_device *hdev,
2642                     int sensor_index, u32 attr, long *value);
2643int hl_get_pwm_info(struct hl_device *hdev,
2644                    int sensor_index, u32 attr, long *value);
2645void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
2646                        long value);
2647u64 hl_get_max_power(struct hl_device *hdev);
2648void hl_set_max_power(struct hl_device *hdev);
2649int hl_set_voltage(struct hl_device *hdev,
2650                        int sensor_index, u32 attr, long value);
2651int hl_set_current(struct hl_device *hdev,
2652                        int sensor_index, u32 attr, long value);
2653void hl_release_pending_user_interrupts(struct hl_device *hdev);
2654int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
2655                        struct hl_hw_sob **hw_sob, u32 count);
2656
2657#ifdef CONFIG_DEBUG_FS
2658
2659void hl_debugfs_init(void);
2660void hl_debugfs_fini(void);
2661void hl_debugfs_add_device(struct hl_device *hdev);
2662void hl_debugfs_remove_device(struct hl_device *hdev);
2663void hl_debugfs_add_file(struct hl_fpriv *hpriv);
2664void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
2665void hl_debugfs_add_cb(struct hl_cb *cb);
2666void hl_debugfs_remove_cb(struct hl_cb *cb);
2667void hl_debugfs_add_cs(struct hl_cs *cs);
2668void hl_debugfs_remove_cs(struct hl_cs *cs);
2669void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
2670void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
2671void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
2672void hl_debugfs_remove_userptr(struct hl_device *hdev,
2673                                struct hl_userptr *userptr);
2674void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
2675void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
2676
2677#else
2678
2679static inline void __init hl_debugfs_init(void)
2680{
2681}
2682
2683static inline void hl_debugfs_fini(void)
2684{
2685}
2686
2687static inline void hl_debugfs_add_device(struct hl_device *hdev)
2688{
2689}
2690
2691static inline void hl_debugfs_remove_device(struct hl_device *hdev)
2692{
2693}
2694
2695static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
2696{
2697}
2698
2699static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
2700{
2701}
2702
2703static inline void hl_debugfs_add_cb(struct hl_cb *cb)
2704{
2705}
2706
2707static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
2708{
2709}
2710
2711static inline void hl_debugfs_add_cs(struct hl_cs *cs)
2712{
2713}
2714
2715static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
2716{
2717}
2718
2719static inline void hl_debugfs_add_job(struct hl_device *hdev,
2720                                        struct hl_cs_job *job)
2721{
2722}
2723
2724static inline void hl_debugfs_remove_job(struct hl_device *hdev,
2725                                        struct hl_cs_job *job)
2726{
2727}
2728
2729static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
2730                                        struct hl_userptr *userptr)
2731{
2732}
2733
2734static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
2735                                        struct hl_userptr *userptr)
2736{
2737}
2738
2739static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
2740                                        struct hl_ctx *ctx)
2741{
2742}
2743
2744static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
2745                                        struct hl_ctx *ctx)
2746{
2747}
2748
2749#endif
2750
2751/* IOCTLs */
2752long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
2753long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
2754int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
2755int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
2756int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data);
2757int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
2758
2759#endif /* HABANALABSP_H_ */
2760