linux/drivers/misc/habanalabs/habanalabs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0
   2 *
   3 * Copyright 2016-2019 HabanaLabs, Ltd.
   4 * All Rights Reserved.
   5 *
   6 */
   7
   8#ifndef HABANALABSP_H_
   9#define HABANALABSP_H_
  10
  11#include "include/armcp_if.h"
  12#include "include/qman_if.h"
  13
  14#include <linux/cdev.h>
  15#include <linux/iopoll.h>
  16#include <linux/irqreturn.h>
  17#include <linux/dma-fence.h>
  18#include <linux/dma-direction.h>
  19#include <linux/scatterlist.h>
  20#include <linux/hashtable.h>
  21
  22#define HL_NAME                         "habanalabs"
  23
  24#define HL_MMAP_CB_MASK                 (0x8000000000000000ull >> PAGE_SHIFT)
  25
  26#define HL_PENDING_RESET_PER_SEC        5
  27
  28#define HL_DEVICE_TIMEOUT_USEC          1000000 /* 1 s */
  29
  30#define HL_HEARTBEAT_PER_USEC           5000000 /* 5 s */
  31
  32#define HL_PLL_LOW_JOB_FREQ_USEC        5000000 /* 5 s */
  33
  34#define HL_ARMCP_INFO_TIMEOUT_USEC      10000000 /* 10s */
  35#define HL_ARMCP_EEPROM_TIMEOUT_USEC    10000000 /* 10s */
  36
  37#define HL_PCI_ELBI_TIMEOUT_MSEC        10 /* 10ms */
  38
  39#define HL_MAX_QUEUES                   128
  40
  41#define HL_MAX_JOBS_PER_CS              64
  42
  43/* MUST BE POWER OF 2 and larger than 1 */
  44#define HL_MAX_PENDING_CS               64
  45
  46/* Memory */
  47#define MEM_HASH_TABLE_BITS             7 /* 1 << 7 buckets */
  48
  49/* MMU */
  50#define MMU_HASH_TABLE_BITS             7 /* 1 << 7 buckets */
  51
  52/**
  53 * struct pgt_info - MMU hop page info.
  54 * @node: hash linked-list node for the pgts shadow hash of pgts.
  55 * @phys_addr: physical address of the pgt.
  56 * @shadow_addr: shadow hop in the host.
  57 * @ctx: pointer to the owner ctx.
  58 * @num_of_ptes: indicates how many ptes are used in the pgt.
  59 *
  60 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
  61 * is needed during mapping, a new page is allocated and this structure holds
  62 * its essential information. During unmapping, if no valid PTEs remained in the
  63 * page, it is freed with its pgt_info structure.
  64 */
  65struct pgt_info {
  66        struct hlist_node       node;
  67        u64                     phys_addr;
  68        u64                     shadow_addr;
  69        struct hl_ctx           *ctx;
  70        int                     num_of_ptes;
  71};
  72
  73struct hl_device;
  74struct hl_fpriv;
  75
  76/**
  77 * enum hl_queue_type - Supported QUEUE types.
  78 * @QUEUE_TYPE_NA: queue is not available.
  79 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
  80 *                  host.
  81 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
  82 *                      memories and/or operates the compute engines.
  83 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
  84 */
  85enum hl_queue_type {
  86        QUEUE_TYPE_NA,
  87        QUEUE_TYPE_EXT,
  88        QUEUE_TYPE_INT,
  89        QUEUE_TYPE_CPU
  90};
  91
  92/**
  93 * struct hw_queue_properties - queue information.
  94 * @type: queue type.
  95 * @kmd_only: true if only KMD is allowed to send a job to this queue, false
  96 *            otherwise.
  97 */
  98struct hw_queue_properties {
  99        enum hl_queue_type      type;
 100        u8                      kmd_only;
 101};
 102
 103/**
 104 * enum vm_type_t - virtual memory mapping request information.
 105 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
 106 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
 107 */
 108enum vm_type_t {
 109        VM_TYPE_USERPTR,
 110        VM_TYPE_PHYS_PACK
 111};
 112
 113/**
 114 * enum hl_device_hw_state - H/W device state. use this to understand whether
 115 *                           to do reset before hw_init or not
 116 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
 117 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
 118 *                            hw_init
 119 */
 120enum hl_device_hw_state {
 121        HL_DEVICE_HW_STATE_CLEAN = 0,
 122        HL_DEVICE_HW_STATE_DIRTY
 123};
 124
 125/**
 126 * struct asic_fixed_properties - ASIC specific immutable properties.
 127 * @hw_queues_props: H/W queues properties.
 128 * @armcp_info: received various information from ArmCP regarding the H/W, e.g.
 129 *              available sensors.
 130 * @uboot_ver: F/W U-boot version.
 131 * @preboot_ver: F/W Preboot version.
 132 * @sram_base_address: SRAM physical start address.
 133 * @sram_end_address: SRAM physical end address.
 134 * @sram_user_base_address - SRAM physical start address for user access.
 135 * @dram_base_address: DRAM physical start address.
 136 * @dram_end_address: DRAM physical end address.
 137 * @dram_user_base_address: DRAM physical start address for user access.
 138 * @dram_size: DRAM total size.
 139 * @dram_pci_bar_size: size of PCI bar towards DRAM.
 140 * @max_power_default: max power of the device after reset
 141 * @va_space_host_start_address: base address of virtual memory range for
 142 *                               mapping host memory.
 143 * @va_space_host_end_address: end address of virtual memory range for
 144 *                             mapping host memory.
 145 * @va_space_dram_start_address: base address of virtual memory range for
 146 *                               mapping DRAM memory.
 147 * @va_space_dram_end_address: end address of virtual memory range for
 148 *                             mapping DRAM memory.
 149 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
 150 *                                      fault.
 151 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
 152 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
 153 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
 154 * @mmu_dram_default_page_addr: DRAM default page physical address.
 155 * @mmu_pgt_size: MMU page tables total size.
 156 * @mmu_pte_size: PTE size in MMU page tables.
 157 * @mmu_hop_table_size: MMU hop table size.
 158 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
 159 * @dram_page_size: page size for MMU DRAM allocation.
 160 * @cfg_size: configuration space size on SRAM.
 161 * @sram_size: total size of SRAM.
 162 * @max_asid: maximum number of open contexts (ASIDs).
 163 * @num_of_events: number of possible internal H/W IRQs.
 164 * @psoc_pci_pll_nr: PCI PLL NR value.
 165 * @psoc_pci_pll_nf: PCI PLL NF value.
 166 * @psoc_pci_pll_od: PCI PLL OD value.
 167 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
 168 * @completion_queues_count: number of completion queues.
 169 * @high_pll: high PLL frequency used by the device.
 170 * @cb_pool_cb_cnt: number of CBs in the CB pool.
 171 * @cb_pool_cb_size: size of each CB in the CB pool.
 172 * @tpc_enabled_mask: which TPCs are enabled.
 173 */
 174struct asic_fixed_properties {
 175        struct hw_queue_properties      hw_queues_props[HL_MAX_QUEUES];
 176        struct armcp_info       armcp_info;
 177        char                    uboot_ver[VERSION_MAX_LEN];
 178        char                    preboot_ver[VERSION_MAX_LEN];
 179        u64                     sram_base_address;
 180        u64                     sram_end_address;
 181        u64                     sram_user_base_address;
 182        u64                     dram_base_address;
 183        u64                     dram_end_address;
 184        u64                     dram_user_base_address;
 185        u64                     dram_size;
 186        u64                     dram_pci_bar_size;
 187        u64                     max_power_default;
 188        u64                     va_space_host_start_address;
 189        u64                     va_space_host_end_address;
 190        u64                     va_space_dram_start_address;
 191        u64                     va_space_dram_end_address;
 192        u64                     dram_size_for_default_page_mapping;
 193        u64                     pcie_dbi_base_address;
 194        u64                     pcie_aux_dbi_reg_addr;
 195        u64                     mmu_pgt_addr;
 196        u64                     mmu_dram_default_page_addr;
 197        u32                     mmu_pgt_size;
 198        u32                     mmu_pte_size;
 199        u32                     mmu_hop_table_size;
 200        u32                     mmu_hop0_tables_total_size;
 201        u32                     dram_page_size;
 202        u32                     cfg_size;
 203        u32                     sram_size;
 204        u32                     max_asid;
 205        u32                     num_of_events;
 206        u32                     psoc_pci_pll_nr;
 207        u32                     psoc_pci_pll_nf;
 208        u32                     psoc_pci_pll_od;
 209        u32                     psoc_pci_pll_div_factor;
 210        u32                     high_pll;
 211        u32                     cb_pool_cb_cnt;
 212        u32                     cb_pool_cb_size;
 213        u8                      completion_queues_count;
 214        u8                      tpc_enabled_mask;
 215};
 216
 217/**
 218 * struct hl_dma_fence - wrapper for fence object used by command submissions.
 219 * @base_fence: kernel fence object.
 220 * @lock: spinlock to protect fence.
 221 * @hdev: habanalabs device structure.
 222 * @cs_seq: command submission sequence number.
 223 */
 224struct hl_dma_fence {
 225        struct dma_fence        base_fence;
 226        spinlock_t              lock;
 227        struct hl_device        *hdev;
 228        u64                     cs_seq;
 229};
 230
 231/*
 232 * Command Buffers
 233 */
 234
 235#define HL_MAX_CB_SIZE          0x200000        /* 2MB */
 236
 237/**
 238 * struct hl_cb_mgr - describes a Command Buffer Manager.
 239 * @cb_lock: protects cb_handles.
 240 * @cb_handles: an idr to hold all command buffer handles.
 241 */
 242struct hl_cb_mgr {
 243        spinlock_t              cb_lock;
 244        struct idr              cb_handles; /* protected by cb_lock */
 245};
 246
 247/**
 248 * struct hl_cb - describes a Command Buffer.
 249 * @refcount: reference counter for usage of the CB.
 250 * @hdev: pointer to device this CB belongs to.
 251 * @lock: spinlock to protect mmap/cs flows.
 252 * @debugfs_list: node in debugfs list of command buffers.
 253 * @pool_list: node in pool list of command buffers.
 254 * @kernel_address: Holds the CB's kernel virtual address.
 255 * @bus_address: Holds the CB's DMA address.
 256 * @mmap_size: Holds the CB's size that was mmaped.
 257 * @size: holds the CB's size.
 258 * @id: the CB's ID.
 259 * @cs_cnt: holds number of CS that this CB participates in.
 260 * @ctx_id: holds the ID of the owner's context.
 261 * @mmap: true if the CB is currently mmaped to user.
 262 * @is_pool: true if CB was acquired from the pool, false otherwise.
 263 */
 264struct hl_cb {
 265        struct kref             refcount;
 266        struct hl_device        *hdev;
 267        spinlock_t              lock;
 268        struct list_head        debugfs_list;
 269        struct list_head        pool_list;
 270        u64                     kernel_address;
 271        dma_addr_t              bus_address;
 272        u32                     mmap_size;
 273        u32                     size;
 274        u32                     id;
 275        u32                     cs_cnt;
 276        u32                     ctx_id;
 277        u8                      mmap;
 278        u8                      is_pool;
 279};
 280
 281
 282/*
 283 * QUEUES
 284 */
 285
 286struct hl_cs_job;
 287
 288/*
 289 * Currently, there are two limitations on the maximum length of a queue:
 290 *
 291 * 1. The memory footprint of the queue. The current allocated space for the
 292 *    queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
 293 *    the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
 294 *    which currently is 4096/16 = 256 entries.
 295 *
 296 *    To increase that, we need either to decrease the size of the
 297 *    BD (difficult), or allocate more than a single page (easier).
 298 *
 299 * 2. Because the size of the JOB handle field in the BD CTL / completion queue
 300 *    is 10-bit, we can have up to 1024 open jobs per hardware queue.
 301 *    Therefore, each queue can hold up to 1024 entries.
 302 *
 303 * HL_QUEUE_LENGTH is in units of struct hl_bd.
 304 * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
 305 */
 306
 307#define HL_PAGE_SIZE                    4096 /* minimum page size */
 308/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
 309#define HL_QUEUE_LENGTH                 256
 310#define HL_QUEUE_SIZE_IN_BYTES          (HL_QUEUE_LENGTH * HL_BD_SIZE)
 311
 312/*
 313 * HL_CQ_LENGTH is in units of struct hl_cq_entry.
 314 * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
 315 */
 316#define HL_CQ_LENGTH                    HL_QUEUE_LENGTH
 317#define HL_CQ_SIZE_IN_BYTES             (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
 318
 319/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
 320#define HL_EQ_LENGTH                    64
 321#define HL_EQ_SIZE_IN_BYTES             (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
 322
 323/* KMD <-> ArmCP shared memory size */
 324#define HL_CPU_ACCESSIBLE_MEM_SIZE      SZ_2M
 325
 326/**
 327 * struct hl_hw_queue - describes a H/W transport queue.
 328 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
 329 * @queue_type: type of queue.
 330 * @kernel_address: holds the queue's kernel virtual address.
 331 * @bus_address: holds the queue's DMA address.
 332 * @pi: holds the queue's pi value.
 333 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
 334 * @hw_queue_id: the id of the H/W queue.
 335 * @int_queue_len: length of internal queue (number of entries).
 336 * @valid: is the queue valid (we have array of 32 queues, not all of them
 337 *              exists).
 338 */
 339struct hl_hw_queue {
 340        struct hl_cs_job        **shadow_queue;
 341        enum hl_queue_type      queue_type;
 342        u64                     kernel_address;
 343        dma_addr_t              bus_address;
 344        u32                     pi;
 345        u32                     ci;
 346        u32                     hw_queue_id;
 347        u16                     int_queue_len;
 348        u8                      valid;
 349};
 350
 351/**
 352 * struct hl_cq - describes a completion queue
 353 * @hdev: pointer to the device structure
 354 * @kernel_address: holds the queue's kernel virtual address
 355 * @bus_address: holds the queue's DMA address
 356 * @hw_queue_id: the id of the matching H/W queue
 357 * @ci: ci inside the queue
 358 * @pi: pi inside the queue
 359 * @free_slots_cnt: counter of free slots in queue
 360 */
 361struct hl_cq {
 362        struct hl_device        *hdev;
 363        u64                     kernel_address;
 364        dma_addr_t              bus_address;
 365        u32                     hw_queue_id;
 366        u32                     ci;
 367        u32                     pi;
 368        atomic_t                free_slots_cnt;
 369};
 370
 371/**
 372 * struct hl_eq - describes the event queue (single one per device)
 373 * @hdev: pointer to the device structure
 374 * @kernel_address: holds the queue's kernel virtual address
 375 * @bus_address: holds the queue's DMA address
 376 * @ci: ci inside the queue
 377 */
 378struct hl_eq {
 379        struct hl_device        *hdev;
 380        u64                     kernel_address;
 381        dma_addr_t              bus_address;
 382        u32                     ci;
 383};
 384
 385
 386/*
 387 * ASICs
 388 */
 389
 390/**
 391 * enum hl_asic_type - supported ASIC types.
 392 * @ASIC_INVALID: Invalid ASIC type.
 393 * @ASIC_GOYA: Goya device.
 394 */
 395enum hl_asic_type {
 396        ASIC_INVALID,
 397        ASIC_GOYA
 398};
 399
 400struct hl_cs_parser;
 401
 402/**
 403 * enum hl_pm_mng_profile - power management profile.
 404 * @PM_AUTO: internal clock is set by KMD.
 405 * @PM_MANUAL: internal clock is set by the user.
 406 * @PM_LAST: last power management type.
 407 */
 408enum hl_pm_mng_profile {
 409        PM_AUTO = 1,
 410        PM_MANUAL,
 411        PM_LAST
 412};
 413
 414/**
 415 * enum hl_pll_frequency - PLL frequency.
 416 * @PLL_HIGH: high frequency.
 417 * @PLL_LOW: low frequency.
 418 * @PLL_LAST: last frequency values that were configured by the user.
 419 */
 420enum hl_pll_frequency {
 421        PLL_HIGH = 1,
 422        PLL_LOW,
 423        PLL_LAST
 424};
 425
 426/**
 427 * struct hl_asic_funcs - ASIC specific functions that are can be called from
 428 *                        common code.
 429 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
 430 * @early_fini: tears down what was done in early_init.
 431 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
 432 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
 433 * @sw_init: sets up driver state, does not configure H/W.
 434 * @sw_fini: tears down driver state, does not configure H/W.
 435 * @hw_init: sets up the H/W state.
 436 * @hw_fini: tears down the H/W state.
 437 * @halt_engines: halt engines, needed for reset sequence. This also disables
 438 *                interrupts from the device. Should be called before
 439 *                hw_fini and before CS rollback.
 440 * @suspend: handles IP specific H/W or SW changes for suspend.
 441 * @resume: handles IP specific H/W or SW changes for resume.
 442 * @cb_mmap: maps a CB.
 443 * @ring_doorbell: increment PI on a given QMAN.
 444 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
 445 *             function because the PQs are located in different memory areas
 446 *             per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
 447 *             writing the PQE must match the destination memory area
 448 *             properties.
 449 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
 450 *                           dma_alloc_coherent(). This is ASIC function because
 451 *                           its implementation is not trivial when the driver
 452 *                           is loaded in simulation mode (not upstreamed).
 453 * @asic_dma_free_coherent:  Free coherent DMA memory by calling
 454 *                           dma_free_coherent(). This is ASIC function because
 455 *                           its implementation is not trivial when the driver
 456 *                           is loaded in simulation mode (not upstreamed).
 457 * @get_int_queue_base: get the internal queue base address.
 458 * @test_queues: run simple test on all queues for sanity check.
 459 * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
 460 *                        size of allocation is HL_DMA_POOL_BLK_SIZE.
 461 * @asic_dma_pool_free: free small DMA allocation from pool.
 462 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
 463 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
 464 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
 465 * @cs_parser: parse Command Submission.
 466 * @asic_dma_map_sg: DMA map scatter-gather list.
 467 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
 468 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
 469 * @update_eq_ci: update event queue CI.
 470 * @context_switch: called upon ASID context switch.
 471 * @restore_phase_topology: clear all SOBs amd MONs.
 472 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
 473 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
 474 * @add_device_attr: add ASIC specific device attributes.
 475 * @handle_eqe: handle event queue entry (IRQ) from ArmCP.
 476 * @set_pll_profile: change PLL profile (manual/automatic).
 477 * @get_events_stat: retrieve event queue entries histogram.
 478 * @read_pte: read MMU page table entry from DRAM.
 479 * @write_pte: write MMU page table entry to DRAM.
 480 * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
 481 *                        hard (L0 & L1) flush.
 482 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
 483 *                              ASID-VA-size mask.
 484 * @send_heartbeat: send is-alive packet to ArmCP and verify response.
 485 * @debug_coresight: perform certain actions on Coresight for debugging.
 486 * @is_device_idle: return true if device is idle, false otherwise.
 487 * @soft_reset_late_init: perform certain actions needed after soft reset.
 488 * @hw_queues_lock: acquire H/W queues lock.
 489 * @hw_queues_unlock: release H/W queues lock.
 490 * @get_pci_id: retrieve PCI ID.
 491 * @get_eeprom_data: retrieve EEPROM data from F/W.
 492 * @send_cpu_message: send buffer to ArmCP.
 493 * @get_hw_state: retrieve the H/W state
 494 * @pci_bars_map: Map PCI BARs.
 495 * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
 496 *                     old address the bar pointed to or U64_MAX for failure
 497 * @init_iatu: Initialize the iATU unit inside the PCI controller.
 498 * @rreg: Read a register. Needed for simulator support.
 499 * @wreg: Write a register. Needed for simulator support.
 500 * @halt_coresight: stop the ETF and ETR traces.
 501 */
 502struct hl_asic_funcs {
 503        int (*early_init)(struct hl_device *hdev);
 504        int (*early_fini)(struct hl_device *hdev);
 505        int (*late_init)(struct hl_device *hdev);
 506        void (*late_fini)(struct hl_device *hdev);
 507        int (*sw_init)(struct hl_device *hdev);
 508        int (*sw_fini)(struct hl_device *hdev);
 509        int (*hw_init)(struct hl_device *hdev);
 510        void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
 511        void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
 512        int (*suspend)(struct hl_device *hdev);
 513        int (*resume)(struct hl_device *hdev);
 514        int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
 515                        u64 kaddress, phys_addr_t paddress, u32 size);
 516        void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
 517        void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
 518                        struct hl_bd *bd);
 519        void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
 520                                        dma_addr_t *dma_handle, gfp_t flag);
 521        void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
 522                                        void *cpu_addr, dma_addr_t dma_handle);
 523        void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
 524                                dma_addr_t *dma_handle, u16 *queue_len);
 525        int (*test_queues)(struct hl_device *hdev);
 526        void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
 527                                gfp_t mem_flags, dma_addr_t *dma_handle);
 528        void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
 529                                dma_addr_t dma_addr);
 530        void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
 531                                size_t size, dma_addr_t *dma_handle);
 532        void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
 533                                size_t size, void *vaddr);
 534        void (*hl_dma_unmap_sg)(struct hl_device *hdev,
 535                                struct scatterlist *sgl, int nents,
 536                                enum dma_data_direction dir);
 537        int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
 538        int (*asic_dma_map_sg)(struct hl_device *hdev,
 539                                struct scatterlist *sgl, int nents,
 540                                enum dma_data_direction dir);
 541        u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
 542                                        struct sg_table *sgt);
 543        void (*add_end_of_cb_packets)(struct hl_device *hdev,
 544                                        u64 kernel_address, u32 len,
 545                                        u64 cq_addr, u32 cq_val, u32 msix_num);
 546        void (*update_eq_ci)(struct hl_device *hdev, u32 val);
 547        int (*context_switch)(struct hl_device *hdev, u32 asid);
 548        void (*restore_phase_topology)(struct hl_device *hdev);
 549        int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
 550        int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
 551        void (*add_device_attr)(struct hl_device *hdev,
 552                                struct attribute_group *dev_attr_grp);
 553        void (*handle_eqe)(struct hl_device *hdev,
 554                                struct hl_eq_entry *eq_entry);
 555        void (*set_pll_profile)(struct hl_device *hdev,
 556                        enum hl_pll_frequency freq);
 557        void* (*get_events_stat)(struct hl_device *hdev, u32 *size);
 558        u64 (*read_pte)(struct hl_device *hdev, u64 addr);
 559        void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
 560        void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
 561        void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
 562                        u32 asid, u64 va, u64 size);
 563        int (*send_heartbeat)(struct hl_device *hdev);
 564        int (*debug_coresight)(struct hl_device *hdev, void *data);
 565        bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
 566                                struct seq_file *s);
 567        int (*soft_reset_late_init)(struct hl_device *hdev);
 568        void (*hw_queues_lock)(struct hl_device *hdev);
 569        void (*hw_queues_unlock)(struct hl_device *hdev);
 570        u32 (*get_pci_id)(struct hl_device *hdev);
 571        int (*get_eeprom_data)(struct hl_device *hdev, void *data,
 572                                size_t max_size);
 573        int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
 574                                u16 len, u32 timeout, long *result);
 575        enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
 576        int (*pci_bars_map)(struct hl_device *hdev);
 577        u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
 578        int (*init_iatu)(struct hl_device *hdev);
 579        u32 (*rreg)(struct hl_device *hdev, u32 reg);
 580        void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
 581        void (*halt_coresight)(struct hl_device *hdev);
 582};
 583
 584
 585/*
 586 * CONTEXTS
 587 */
 588
 589#define HL_KERNEL_ASID_ID       0
 590
 591/**
 592 * struct hl_va_range - virtual addresses range.
 593 * @lock: protects the virtual addresses list.
 594 * @list: list of virtual addresses blocks available for mappings.
 595 * @start_addr: range start address.
 596 * @end_addr: range end address.
 597 */
 598struct hl_va_range {
 599        struct mutex            lock;
 600        struct list_head        list;
 601        u64                     start_addr;
 602        u64                     end_addr;
 603};
 604
 605/**
 606 * struct hl_ctx - user/kernel context.
 607 * @mem_hash: holds mapping from virtual address to virtual memory area
 608 *              descriptor (hl_vm_phys_pg_list or hl_userptr).
 609 * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
 610 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
 611 * @hpriv: pointer to the private (KMD) data of the process (fd).
 612 * @hdev: pointer to the device structure.
 613 * @refcount: reference counter for the context. Context is released only when
 614 *              this hits 0l. It is incremented on CS and CS_WAIT.
 615 * @cs_pending: array of DMA fence objects representing pending CS.
 616 * @host_va_range: holds available virtual addresses for host mappings.
 617 * @dram_va_range: holds available virtual addresses for DRAM mappings.
 618 * @mem_hash_lock: protects the mem_hash.
 619 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
 620 *            MMU hash or walking the PGT requires talking this lock
 621 * @debugfs_list: node in debugfs list of contexts.
 622 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
 623 *                      to user so user could inquire about CS. It is used as
 624 *                      index to cs_pending array.
 625 * @dram_default_hops: array that holds all hops addresses needed for default
 626 *                     DRAM mapping.
 627 * @cs_lock: spinlock to protect cs_sequence.
 628 * @dram_phys_mem: amount of used physical DRAM memory by this context.
 629 * @thread_ctx_switch_token: token to prevent multiple threads of the same
 630 *                              context from running the context switch phase.
 631 *                              Only a single thread should run it.
 632 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
 633 *                              the context switch phase from moving to their
 634 *                              execution phase before the context switch phase
 635 *                              has finished.
 636 * @asid: context's unique address space ID in the device's MMU.
 637 */
 638struct hl_ctx {
 639        DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
 640        DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
 641        DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
 642        struct hl_fpriv         *hpriv;
 643        struct hl_device        *hdev;
 644        struct kref             refcount;
 645        struct dma_fence        *cs_pending[HL_MAX_PENDING_CS];
 646        struct hl_va_range      host_va_range;
 647        struct hl_va_range      dram_va_range;
 648        struct mutex            mem_hash_lock;
 649        struct mutex            mmu_lock;
 650        struct list_head        debugfs_list;
 651        u64                     cs_sequence;
 652        u64                     *dram_default_hops;
 653        spinlock_t              cs_lock;
 654        atomic64_t              dram_phys_mem;
 655        atomic_t                thread_ctx_switch_token;
 656        u32                     thread_ctx_switch_wait_token;
 657        u32                     asid;
 658};
 659
 660/**
 661 * struct hl_ctx_mgr - for handling multiple contexts.
 662 * @ctx_lock: protects ctx_handles.
 663 * @ctx_handles: idr to hold all ctx handles.
 664 */
 665struct hl_ctx_mgr {
 666        struct mutex            ctx_lock;
 667        struct idr              ctx_handles;
 668};
 669
 670
 671
 672/*
 673 * COMMAND SUBMISSIONS
 674 */
 675
 676/**
 677 * struct hl_userptr - memory mapping chunk information
 678 * @vm_type: type of the VM.
 679 * @job_node: linked-list node for hanging the object on the Job's list.
 680 * @vec: pointer to the frame vector.
 681 * @sgt: pointer to the scatter-gather table that holds the pages.
 682 * @dir: for DMA unmapping, the direction must be supplied, so save it.
 683 * @debugfs_list: node in debugfs list of command submissions.
 684 * @addr: user-space virtual pointer to the start of the memory area.
 685 * @size: size of the memory area to pin & map.
 686 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
 687 */
 688struct hl_userptr {
 689        enum vm_type_t          vm_type; /* must be first */
 690        struct list_head        job_node;
 691        struct frame_vector     *vec;
 692        struct sg_table         *sgt;
 693        enum dma_data_direction dir;
 694        struct list_head        debugfs_list;
 695        u64                     addr;
 696        u32                     size;
 697        u8                      dma_mapped;
 698};
 699
 700/**
 701 * struct hl_cs - command submission.
 702 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
 703 * @ctx: the context this CS belongs to.
 704 * @job_list: list of the CS's jobs in the various queues.
 705 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
 706 * @refcount: reference counter for usage of the CS.
 707 * @fence: pointer to the fence object of this CS.
 708 * @work_tdr: delayed work node for TDR.
 709 * @mirror_node : node in device mirror list of command submissions.
 710 * @debugfs_list: node in debugfs list of command submissions.
 711 * @sequence: the sequence number of this CS.
 712 * @submitted: true if CS was submitted to H/W.
 713 * @completed: true if CS was completed by device.
 714 * @timedout : true if CS was timedout.
 715 * @tdr_active: true if TDR was activated for this CS (to prevent
 716 *              double TDR activation).
 717 * @aborted: true if CS was aborted due to some device error.
 718 */
 719struct hl_cs {
 720        u8                      jobs_in_queue_cnt[HL_MAX_QUEUES];
 721        struct hl_ctx           *ctx;
 722        struct list_head        job_list;
 723        spinlock_t              job_lock;
 724        struct kref             refcount;
 725        struct dma_fence        *fence;
 726        struct delayed_work     work_tdr;
 727        struct list_head        mirror_node;
 728        struct list_head        debugfs_list;
 729        u64                     sequence;
 730        u8                      submitted;
 731        u8                      completed;
 732        u8                      timedout;
 733        u8                      tdr_active;
 734        u8                      aborted;
 735};
 736
 737/**
 738 * struct hl_cs_job - command submission job.
 739 * @cs_node: the node to hang on the CS jobs list.
 740 * @cs: the CS this job belongs to.
 741 * @user_cb: the CB we got from the user.
 742 * @patched_cb: in case of patching, this is internal CB which is submitted on
 743 *              the queue instead of the CB we got from the IOCTL.
 744 * @finish_work: workqueue object to run when job is completed.
 745 * @userptr_list: linked-list of userptr mappings that belong to this job and
 746 *                      wait for completion.
 747 * @debugfs_list: node in debugfs list of command submission jobs.
 748 * @id: the id of this job inside a CS.
 749 * @hw_queue_id: the id of the H/W queue this job is submitted to.
 750 * @user_cb_size: the actual size of the CB we got from the user.
 751 * @job_cb_size: the actual size of the CB that we put on the queue.
 752 * @ext_queue: whether the job is for external queue or internal queue.
 753 */
 754struct hl_cs_job {
 755        struct list_head        cs_node;
 756        struct hl_cs            *cs;
 757        struct hl_cb            *user_cb;
 758        struct hl_cb            *patched_cb;
 759        struct work_struct      finish_work;
 760        struct list_head        userptr_list;
 761        struct list_head        debugfs_list;
 762        u32                     id;
 763        u32                     hw_queue_id;
 764        u32                     user_cb_size;
 765        u32                     job_cb_size;
 766        u8                      ext_queue;
 767};
 768
 769/**
 770 * struct hl_cs_parser - command submission paerser properties.
 771 * @user_cb: the CB we got from the user.
 772 * @patched_cb: in case of patching, this is internal CB which is submitted on
 773 *              the queue instead of the CB we got from the IOCTL.
 774 * @job_userptr_list: linked-list of userptr mappings that belong to the related
 775 *                      job and wait for completion.
 776 * @cs_sequence: the sequence number of the related CS.
 777 * @ctx_id: the ID of the context the related CS belongs to.
 778 * @hw_queue_id: the id of the H/W queue this job is submitted to.
 779 * @user_cb_size: the actual size of the CB we got from the user.
 780 * @patched_cb_size: the size of the CB after parsing.
 781 * @ext_queue: whether the job is for external queue or internal queue.
 782 * @job_id: the id of the related job inside the related CS.
 783 */
 784struct hl_cs_parser {
 785        struct hl_cb            *user_cb;
 786        struct hl_cb            *patched_cb;
 787        struct list_head        *job_userptr_list;
 788        u64                     cs_sequence;
 789        u32                     ctx_id;
 790        u32                     hw_queue_id;
 791        u32                     user_cb_size;
 792        u32                     patched_cb_size;
 793        u8                      ext_queue;
 794        u8                      job_id;
 795};
 796
 797
 798/*
 799 * MEMORY STRUCTURE
 800 */
 801
 802/**
 803 * struct hl_vm_hash_node - hash element from virtual address to virtual
 804 *                              memory area descriptor (hl_vm_phys_pg_list or
 805 *                              hl_userptr).
 806 * @node: node to hang on the hash table in context object.
 807 * @vaddr: key virtual address.
 808 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
 809 */
 810struct hl_vm_hash_node {
 811        struct hlist_node       node;
 812        u64                     vaddr;
 813        void                    *ptr;
 814};
 815
 816/**
 817 * struct hl_vm_phys_pg_pack - physical page pack.
 818 * @vm_type: describes the type of the virtual area descriptor.
 819 * @pages: the physical page array.
 820 * @npages: num physical pages in the pack.
 821 * @total_size: total size of all the pages in this list.
 822 * @mapping_cnt: number of shared mappings.
 823 * @asid: the context related to this list.
 824 * @page_size: size of each page in the pack.
 825 * @flags: HL_MEM_* flags related to this list.
 826 * @handle: the provided handle related to this list.
 827 * @offset: offset from the first page.
 828 * @contiguous: is contiguous physical memory.
 829 * @created_from_userptr: is product of host virtual address.
 830 */
 831struct hl_vm_phys_pg_pack {
 832        enum vm_type_t          vm_type; /* must be first */
 833        u64                     *pages;
 834        u64                     npages;
 835        u64                     total_size;
 836        atomic_t                mapping_cnt;
 837        u32                     asid;
 838        u32                     page_size;
 839        u32                     flags;
 840        u32                     handle;
 841        u32                     offset;
 842        u8                      contiguous;
 843        u8                      created_from_userptr;
 844};
 845
 846/**
 847 * struct hl_vm_va_block - virtual range block information.
 848 * @node: node to hang on the virtual range list in context object.
 849 * @start: virtual range start address.
 850 * @end: virtual range end address.
 851 * @size: virtual range size.
 852 */
 853struct hl_vm_va_block {
 854        struct list_head        node;
 855        u64                     start;
 856        u64                     end;
 857        u64                     size;
 858};
 859
 860/**
 861 * struct hl_vm - virtual memory manager for MMU.
 862 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
 863 * @dram_pg_pool_refcount: reference counter for the pool usage.
 864 * @idr_lock: protects the phys_pg_list_handles.
 865 * @phys_pg_pack_handles: idr to hold all device allocations handles.
 866 * @init_done: whether initialization was done. We need this because VM
 867 *              initialization might be skipped during device initialization.
 868 */
 869struct hl_vm {
 870        struct gen_pool         *dram_pg_pool;
 871        struct kref             dram_pg_pool_refcount;
 872        spinlock_t              idr_lock;
 873        struct idr              phys_pg_pack_handles;
 874        u8                      init_done;
 875};
 876
 877
 878/*
 879 * DEBUG, PROFILING STRUCTURE
 880 */
 881
 882/**
 883 * struct hl_debug_params - Coresight debug parameters.
 884 * @input: pointer to component specific input parameters.
 885 * @output: pointer to component specific output parameters.
 886 * @output_size: size of output buffer.
 887 * @reg_idx: relevant register ID.
 888 * @op: component operation to execute.
 889 * @enable: true if to enable component debugging, false otherwise.
 890 */
 891struct hl_debug_params {
 892        void *input;
 893        void *output;
 894        u32 output_size;
 895        u32 reg_idx;
 896        u32 op;
 897        bool enable;
 898};
 899
 900/*
 901 * FILE PRIVATE STRUCTURE
 902 */
 903
 904/**
 905 * struct hl_fpriv - process information stored in FD private data.
 906 * @hdev: habanalabs device structure.
 907 * @filp: pointer to the given file structure.
 908 * @taskpid: current process ID.
 909 * @ctx: current executing context.
 910 * @ctx_mgr: context manager to handle multiple context for this FD.
 911 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
 912 * @debugfs_list: list of relevant ASIC debugfs.
 913 * @refcount: number of related contexts.
 914 * @restore_phase_mutex: lock for context switch and restore phase.
 915 */
 916struct hl_fpriv {
 917        struct hl_device        *hdev;
 918        struct file             *filp;
 919        struct pid              *taskpid;
 920        struct hl_ctx           *ctx; /* TODO: remove for multiple ctx */
 921        struct hl_ctx_mgr       ctx_mgr;
 922        struct hl_cb_mgr        cb_mgr;
 923        struct list_head        debugfs_list;
 924        struct kref             refcount;
 925        struct mutex            restore_phase_mutex;
 926};
 927
 928
 929/*
 930 * DebugFS
 931 */
 932
 933/**
 934 * struct hl_info_list - debugfs file ops.
 935 * @name: file name.
 936 * @show: function to output information.
 937 * @write: function to write to the file.
 938 */
 939struct hl_info_list {
 940        const char      *name;
 941        int             (*show)(struct seq_file *s, void *data);
 942        ssize_t         (*write)(struct file *file, const char __user *buf,
 943                                size_t count, loff_t *f_pos);
 944};
 945
 946/**
 947 * struct hl_debugfs_entry - debugfs dentry wrapper.
 948 * @dent: base debugfs entry structure.
 949 * @info_ent: dentry realted ops.
 950 * @dev_entry: ASIC specific debugfs manager.
 951 */
 952struct hl_debugfs_entry {
 953        struct dentry                   *dent;
 954        const struct hl_info_list       *info_ent;
 955        struct hl_dbg_device_entry      *dev_entry;
 956};
 957
 958/**
 959 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
 960 * @root: root dentry.
 961 * @hdev: habanalabs device structure.
 962 * @entry_arr: array of available hl_debugfs_entry.
 963 * @file_list: list of available debugfs files.
 964 * @file_mutex: protects file_list.
 965 * @cb_list: list of available CBs.
 966 * @cb_spinlock: protects cb_list.
 967 * @cs_list: list of available CSs.
 968 * @cs_spinlock: protects cs_list.
 969 * @cs_job_list: list of available CB jobs.
 970 * @cs_job_spinlock: protects cs_job_list.
 971 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
 972 * @userptr_spinlock: protects userptr_list.
 973 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
 974 * @ctx_mem_hash_spinlock: protects cb_list.
 975 * @addr: next address to read/write from/to in read/write32.
 976 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
 977 * @mmu_asid: ASID to use while translating in mmu_show.
 978 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
 979 * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
 980 * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
 981 */
 982struct hl_dbg_device_entry {
 983        struct dentry                   *root;
 984        struct hl_device                *hdev;
 985        struct hl_debugfs_entry         *entry_arr;
 986        struct list_head                file_list;
 987        struct mutex                    file_mutex;
 988        struct list_head                cb_list;
 989        spinlock_t                      cb_spinlock;
 990        struct list_head                cs_list;
 991        spinlock_t                      cs_spinlock;
 992        struct list_head                cs_job_list;
 993        spinlock_t                      cs_job_spinlock;
 994        struct list_head                userptr_list;
 995        spinlock_t                      userptr_spinlock;
 996        struct list_head                ctx_mem_hash_list;
 997        spinlock_t                      ctx_mem_hash_spinlock;
 998        u64                             addr;
 999        u64                             mmu_addr;
1000        u32                             mmu_asid;
1001        u8                              i2c_bus;
1002        u8                              i2c_addr;
1003        u8                              i2c_reg;
1004};
1005
1006
1007/*
1008 * DEVICES
1009 */
1010
1011/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
1012 * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards
1013 */
1014#define HL_MAX_MINORS   256
1015
1016/*
1017 * Registers read & write functions.
1018 */
1019
1020u32 hl_rreg(struct hl_device *hdev, u32 reg);
1021void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1022
1023#define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
1024#define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
1025#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n",    \
1026                        hdev->asic_funcs->rreg(hdev, (reg)))
1027
1028#define WREG32_P(reg, val, mask)                                \
1029        do {                                                    \
1030                u32 tmp_ = RREG32(reg);                         \
1031                tmp_ &= (mask);                                 \
1032                tmp_ |= ((val) & ~(mask));                      \
1033                WREG32(reg, tmp_);                              \
1034        } while (0)
1035#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1036#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1037
1038#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
1039#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
1040#define WREG32_FIELD(reg, field, val)   \
1041        WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
1042                        (val) << REG_FIELD_SHIFT(reg, field))
1043
1044#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
1045({ \
1046        ktime_t __timeout; \
1047        /* timeout should be longer when working with simulator */ \
1048        if (hdev->pdev) \
1049                __timeout = ktime_add_us(ktime_get(), timeout_us); \
1050        else \
1051                __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
1052        might_sleep_if(sleep_us); \
1053        for (;;) { \
1054                (val) = RREG32(addr); \
1055                if (cond) \
1056                        break; \
1057                if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1058                        (val) = RREG32(addr); \
1059                        break; \
1060                } \
1061                if (sleep_us) \
1062                        usleep_range((sleep_us >> 2) + 1, sleep_us); \
1063        } \
1064        (cond) ? 0 : -ETIMEDOUT; \
1065})
1066
1067/*
1068 * address in this macro points always to a memory location in the
1069 * host's (server's) memory. That location is updated asynchronously
1070 * either by the direct access of the device or by another core.
1071 *
1072 * To work both in LE and BE architectures, we need to distinguish between the
1073 * two states (device or another core updates the memory location). Therefore,
1074 * if mem_written_by_device is true, the host memory being polled will be
1075 * updated directly by the device. If false, the host memory being polled will
1076 * be updated by host CPU. Required so host knows whether or not the memory
1077 * might need to be byte-swapped before returning value to caller.
1078 */
1079#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
1080                                mem_written_by_device) \
1081({ \
1082        ktime_t __timeout; \
1083        /* timeout should be longer when working with simulator */ \
1084        if (hdev->pdev) \
1085                __timeout = ktime_add_us(ktime_get(), timeout_us); \
1086        else \
1087                __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
1088        might_sleep_if(sleep_us); \
1089        for (;;) { \
1090                /* Verify we read updates done by other cores or by device */ \
1091                mb(); \
1092                (val) = *((u32 *) (uintptr_t) (addr)); \
1093                if (mem_written_by_device) \
1094                        (val) = le32_to_cpu(val); \
1095                if (cond) \
1096                        break; \
1097                if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1098                        (val) = *((u32 *) (uintptr_t) (addr)); \
1099                        if (mem_written_by_device) \
1100                                (val) = le32_to_cpu(val); \
1101                        break; \
1102                } \
1103                if (sleep_us) \
1104                        usleep_range((sleep_us >> 2) + 1, sleep_us); \
1105        } \
1106        (cond) ? 0 : -ETIMEDOUT; \
1107})
1108
1109#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
1110                                        timeout_us) \
1111({ \
1112        ktime_t __timeout; \
1113        /* timeout should be longer when working with simulator */ \
1114        if (hdev->pdev) \
1115                __timeout = ktime_add_us(ktime_get(), timeout_us); \
1116        else \
1117                __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
1118        might_sleep_if(sleep_us); \
1119        for (;;) { \
1120                (val) = readl(addr); \
1121                if (cond) \
1122                        break; \
1123                if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1124                        (val) = readl(addr); \
1125                        break; \
1126                } \
1127                if (sleep_us) \
1128                        usleep_range((sleep_us >> 2) + 1, sleep_us); \
1129        } \
1130        (cond) ? 0 : -ETIMEDOUT; \
1131})
1132
1133struct hwmon_chip_info;
1134
1135/**
1136 * struct hl_device_reset_work - reset workqueue task wrapper.
1137 * @reset_work: reset work to be done.
1138 * @hdev: habanalabs device structure.
1139 */
1140struct hl_device_reset_work {
1141        struct work_struct              reset_work;
1142        struct hl_device                *hdev;
1143};
1144
1145/**
1146 * struct hl_device - habanalabs device structure.
1147 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
1148 * @pcie_bar: array of available PCIe bars.
1149 * @rmmio: configuration area address on SRAM.
1150 * @cdev: related char device.
1151 * @dev: realted kernel basic device structure.
1152 * @work_freq: delayed work to lower device frequency if possible.
1153 * @work_heartbeat: delayed work for ArmCP is-alive check.
1154 * @asic_name: ASIC specific nmae.
1155 * @asic_type: ASIC specific type.
1156 * @completion_queue: array of hl_cq.
1157 * @cq_wq: work queue of completion queues for executing work in process context
1158 * @eq_wq: work queue of event queue for executing work in process context.
1159 * @kernel_ctx: KMD context structure.
1160 * @kernel_queues: array of hl_hw_queue.
1161 * @hw_queues_mirror_list: CS mirror list for TDR.
1162 * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
1163 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
1164 * @event_queue: event queue for IRQ from ArmCP.
1165 * @dma_pool: DMA pool for small allocations.
1166 * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address.
1167 * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address.
1168 * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
1169 * @asid_bitmap: holds used/available ASIDs.
1170 * @asid_mutex: protects asid_bitmap.
1171 * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
1172 *                    fd_open_cnt is atomic, we need this lock to serialize
1173 *                    the open function because the driver currently supports
1174 *                    only a single process at a time. In addition, we need a
1175 *                    lock here so we can flush user processes which are opening
1176 *                    the device while we are trying to hard reset it
1177 * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
1178 * @debug_lock: protects critical section of setting debug mode for device
1179 * @asic_prop: ASIC specific immutable properties.
1180 * @asic_funcs: ASIC specific functions.
1181 * @asic_specific: ASIC specific information to use only from ASIC files.
1182 * @mmu_pgt_pool: pool of available MMU hops.
1183 * @vm: virtual memory manager for MMU.
1184 * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
1185 * @mmu_shadow_hop0: shadow mapping of the MMU hop 0 zone.
1186 * @hwmon_dev: H/W monitor device.
1187 * @pm_mng_profile: current power management profile.
1188 * @hl_chip_info: ASIC's sensors information.
1189 * @hl_debugfs: device's debugfs manager.
1190 * @cb_pool: list of preallocated CBs.
1191 * @cb_pool_lock: protects the CB pool.
1192 * @user_ctx: current user context executing.
1193 * @dram_used_mem: current DRAM memory consumption.
1194 * @timeout_jiffies: device CS timeout value.
1195 * @max_power: the max power of the device, as configured by the sysadmin. This
1196 *             value is saved so in case of hard-reset, KMD will restore this
1197 *             value and update the F/W after the re-initialization
1198 * @in_reset: is device in reset flow.
1199 * @curr_pll_profile: current PLL profile.
1200 * @fd_open_cnt: number of open user processes.
1201 * @cs_active_cnt: number of active command submissions on this device (active
1202 *                 means already in H/W queues)
1203 * @major: habanalabs KMD major.
1204 * @high_pll: high PLL profile frequency.
1205 * @soft_reset_cnt: number of soft reset since KMD loading.
1206 * @hard_reset_cnt: number of hard reset since KMD loading.
1207 * @id: device minor.
1208 * @disabled: is device disabled.
1209 * @late_init_done: is late init stage was done during initialization.
1210 * @hwmon_initialized: is H/W monitor sensors was initialized.
1211 * @hard_reset_pending: is there a hard reset work pending.
1212 * @heartbeat: is heartbeat sanity check towards ArmCP enabled.
1213 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
1214 *                   otherwise.
1215 * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
1216 * @dram_default_page_mapping: is DRAM default page mapping enabled.
1217 * @init_done: is the initialization of the device done.
1218 * @mmu_enable: is MMU enabled.
1219 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
1220 * @dma_mask: the dma mask that was set for this device
1221 * @in_debug: is device under debug. This, together with fd_open_cnt, enforces
1222 *            that only a single user is configuring the debug infrastructure.
1223 */
1224struct hl_device {
1225        struct pci_dev                  *pdev;
1226        void __iomem                    *pcie_bar[6];
1227        void __iomem                    *rmmio;
1228        struct cdev                     cdev;
1229        struct device                   *dev;
1230        struct delayed_work             work_freq;
1231        struct delayed_work             work_heartbeat;
1232        char                            asic_name[16];
1233        enum hl_asic_type               asic_type;
1234        struct hl_cq                    *completion_queue;
1235        struct workqueue_struct         *cq_wq;
1236        struct workqueue_struct         *eq_wq;
1237        struct hl_ctx                   *kernel_ctx;
1238        struct hl_hw_queue              *kernel_queues;
1239        struct list_head                hw_queues_mirror_list;
1240        spinlock_t                      hw_queues_mirror_lock;
1241        struct hl_cb_mgr                kernel_cb_mgr;
1242        struct hl_eq                    event_queue;
1243        struct dma_pool                 *dma_pool;
1244        void                            *cpu_accessible_dma_mem;
1245        dma_addr_t                      cpu_accessible_dma_address;
1246        struct gen_pool                 *cpu_accessible_dma_pool;
1247        unsigned long                   *asid_bitmap;
1248        struct mutex                    asid_mutex;
1249        /* TODO: remove fd_open_cnt_lock for multiple process support */
1250        struct mutex                    fd_open_cnt_lock;
1251        struct mutex                    send_cpu_message_lock;
1252        struct mutex                    debug_lock;
1253        struct asic_fixed_properties    asic_prop;
1254        const struct hl_asic_funcs      *asic_funcs;
1255        void                            *asic_specific;
1256        struct gen_pool                 *mmu_pgt_pool;
1257        struct hl_vm                    vm;
1258        struct mutex                    mmu_cache_lock;
1259        void                            *mmu_shadow_hop0;
1260        struct device                   *hwmon_dev;
1261        enum hl_pm_mng_profile          pm_mng_profile;
1262        struct hwmon_chip_info          *hl_chip_info;
1263
1264        struct hl_dbg_device_entry      hl_debugfs;
1265
1266        struct list_head                cb_pool;
1267        spinlock_t                      cb_pool_lock;
1268
1269        /* TODO: remove user_ctx for multiple process support */
1270        struct hl_ctx                   *user_ctx;
1271
1272        atomic64_t                      dram_used_mem;
1273        u64                             timeout_jiffies;
1274        u64                             max_power;
1275        atomic_t                        in_reset;
1276        atomic_t                        curr_pll_profile;
1277        atomic_t                        fd_open_cnt;
1278        atomic_t                        cs_active_cnt;
1279        u32                             major;
1280        u32                             high_pll;
1281        u32                             soft_reset_cnt;
1282        u32                             hard_reset_cnt;
1283        u16                             id;
1284        u8                              disabled;
1285        u8                              late_init_done;
1286        u8                              hwmon_initialized;
1287        u8                              hard_reset_pending;
1288        u8                              heartbeat;
1289        u8                              reset_on_lockup;
1290        u8                              dram_supports_virtual_memory;
1291        u8                              dram_default_page_mapping;
1292        u8                              init_done;
1293        u8                              device_cpu_disabled;
1294        u8                              dma_mask;
1295        u8                              in_debug;
1296
1297        /* Parameters for bring-up */
1298        u8                              mmu_enable;
1299        u8                              cpu_enable;
1300        u8                              reset_pcilink;
1301        u8                              cpu_queues_enable;
1302        u8                              fw_loading;
1303        u8                              pldm;
1304};
1305
1306
1307/*
1308 * IOCTLs
1309 */
1310
1311/**
1312 * typedef hl_ioctl_t - typedef for ioctl function in the driver
1313 * @hpriv: pointer to the FD's private data, which contains state of
1314 *              user process
1315 * @data: pointer to the input/output arguments structure of the IOCTL
1316 *
1317 * Return: 0 for success, negative value for error
1318 */
1319typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
1320
1321/**
1322 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
1323 * @cmd: the IOCTL code as created by the kernel macros.
1324 * @func: pointer to the driver's function that should be called for this IOCTL.
1325 */
1326struct hl_ioctl_desc {
1327        unsigned int cmd;
1328        hl_ioctl_t *func;
1329};
1330
1331
1332/*
1333 * Kernel module functions that can be accessed by entire module
1334 */
1335
1336/**
1337 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
1338 * @address: The start address of the area we want to validate.
1339 * @size: The size in bytes of the area we want to validate.
1340 * @range_start_address: The start address of the valid range.
1341 * @range_end_address: The end address of the valid range.
1342 *
1343 * Return: true if the area is inside the valid range, false otherwise.
1344 */
1345static inline bool hl_mem_area_inside_range(u64 address, u32 size,
1346                                u64 range_start_address, u64 range_end_address)
1347{
1348        u64 end_address = address + size;
1349
1350        if ((address >= range_start_address) &&
1351                        (end_address <= range_end_address) &&
1352                        (end_address > address))
1353                return true;
1354
1355        return false;
1356}
1357
1358/**
1359 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
1360 * @address: The start address of the area we want to validate.
1361 * @size: The size in bytes of the area we want to validate.
1362 * @range_start_address: The start address of the valid range.
1363 * @range_end_address: The end address of the valid range.
1364 *
1365 * Return: true if the area overlaps part or all of the valid range,
1366 *              false otherwise.
1367 */
1368static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
1369                                u64 range_start_address, u64 range_end_address)
1370{
1371        u64 end_address = address + size;
1372
1373        if ((address >= range_start_address) &&
1374                        (address < range_end_address))
1375                return true;
1376
1377        if ((end_address >= range_start_address) &&
1378                        (end_address < range_end_address))
1379                return true;
1380
1381        if ((address < range_start_address) &&
1382                        (end_address >= range_end_address))
1383                return true;
1384
1385        return false;
1386}
1387
1388int hl_device_open(struct inode *inode, struct file *filp);
1389bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
1390enum hl_device_status hl_device_status(struct hl_device *hdev);
1391int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
1392int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
1393                enum hl_asic_type asic_type, int minor);
1394void destroy_hdev(struct hl_device *hdev);
1395int hl_hw_queues_create(struct hl_device *hdev);
1396void hl_hw_queues_destroy(struct hl_device *hdev);
1397int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
1398                                u32 cb_size, u64 cb_ptr);
1399int hl_hw_queue_schedule_cs(struct hl_cs *cs);
1400u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
1401void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
1402void hl_int_hw_queue_update_ci(struct hl_cs *cs);
1403void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
1404
1405#define hl_queue_inc_ptr(p)             hl_hw_queue_add_ptr(p, 1)
1406#define hl_pi_2_offset(pi)              ((pi) & (HL_QUEUE_LENGTH - 1))
1407
1408int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
1409void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
1410int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
1411void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
1412void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
1413void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
1414irqreturn_t hl_irq_handler_cq(int irq, void *arg);
1415irqreturn_t hl_irq_handler_eq(int irq, void *arg);
1416u32 hl_cq_inc_ptr(u32 ptr);
1417
1418int hl_asid_init(struct hl_device *hdev);
1419void hl_asid_fini(struct hl_device *hdev);
1420unsigned long hl_asid_alloc(struct hl_device *hdev);
1421void hl_asid_free(struct hl_device *hdev, unsigned long asid);
1422
1423int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
1424void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
1425int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
1426void hl_ctx_do_release(struct kref *ref);
1427void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
1428int hl_ctx_put(struct hl_ctx *ctx);
1429struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
1430void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
1431void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
1432
1433int hl_device_init(struct hl_device *hdev, struct class *hclass);
1434void hl_device_fini(struct hl_device *hdev);
1435int hl_device_suspend(struct hl_device *hdev);
1436int hl_device_resume(struct hl_device *hdev);
1437int hl_device_reset(struct hl_device *hdev, bool hard_reset,
1438                        bool from_hard_reset_thread);
1439void hl_hpriv_get(struct hl_fpriv *hpriv);
1440void hl_hpriv_put(struct hl_fpriv *hpriv);
1441int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
1442
1443int hl_build_hwmon_channel_info(struct hl_device *hdev,
1444                struct armcp_sensor *sensors_arr);
1445
1446int hl_sysfs_init(struct hl_device *hdev);
1447void hl_sysfs_fini(struct hl_device *hdev);
1448
1449int hl_hwmon_init(struct hl_device *hdev);
1450void hl_hwmon_fini(struct hl_device *hdev);
1451
1452int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
1453                u64 *handle, int ctx_id);
1454int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
1455int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
1456struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
1457                        u32 handle);
1458void hl_cb_put(struct hl_cb *cb);
1459void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
1460void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
1461struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
1462int hl_cb_pool_init(struct hl_device *hdev);
1463int hl_cb_pool_fini(struct hl_device *hdev);
1464
1465void hl_cs_rollback_all(struct hl_device *hdev);
1466struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
1467
1468void goya_set_asic_funcs(struct hl_device *hdev);
1469
1470int hl_vm_ctx_init(struct hl_ctx *ctx);
1471void hl_vm_ctx_fini(struct hl_ctx *ctx);
1472
1473int hl_vm_init(struct hl_device *hdev);
1474void hl_vm_fini(struct hl_device *hdev);
1475
1476int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1477                        struct hl_userptr *userptr);
1478int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
1479void hl_userptr_delete_list(struct hl_device *hdev,
1480                                struct list_head *userptr_list);
1481bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
1482                                struct list_head *userptr_list,
1483                                struct hl_userptr **userptr);
1484
1485int hl_mmu_init(struct hl_device *hdev);
1486void hl_mmu_fini(struct hl_device *hdev);
1487int hl_mmu_ctx_init(struct hl_ctx *ctx);
1488void hl_mmu_ctx_fini(struct hl_ctx *ctx);
1489int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size);
1490int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
1491void hl_mmu_swap_out(struct hl_ctx *ctx);
1492void hl_mmu_swap_in(struct hl_ctx *ctx);
1493
1494int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
1495                                void __iomem *dst);
1496int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
1497int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
1498                                u16 len, u32 timeout, long *result);
1499int hl_fw_test_cpu_queue(struct hl_device *hdev);
1500void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
1501                                                dma_addr_t *dma_handle);
1502void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
1503                                        void *vaddr);
1504int hl_fw_send_heartbeat(struct hl_device *hdev);
1505int hl_fw_armcp_info_get(struct hl_device *hdev);
1506int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
1507
1508int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
1509                        bool is_wc[3]);
1510int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
1511int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
1512                                u64 addr);
1513int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
1514                        u64 dram_base_address, u64 host_phys_base_address,
1515                        u64 host_phys_size);
1516int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
1517void hl_pci_fini(struct hl_device *hdev);
1518int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
1519
1520long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
1521void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
1522long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
1523long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
1524long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
1525long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
1526long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
1527void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
1528                        long value);
1529u64 hl_get_max_power(struct hl_device *hdev);
1530void hl_set_max_power(struct hl_device *hdev, u64 value);
1531
1532#ifdef CONFIG_DEBUG_FS
1533
1534void hl_debugfs_init(void);
1535void hl_debugfs_fini(void);
1536void hl_debugfs_add_device(struct hl_device *hdev);
1537void hl_debugfs_remove_device(struct hl_device *hdev);
1538void hl_debugfs_add_file(struct hl_fpriv *hpriv);
1539void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
1540void hl_debugfs_add_cb(struct hl_cb *cb);
1541void hl_debugfs_remove_cb(struct hl_cb *cb);
1542void hl_debugfs_add_cs(struct hl_cs *cs);
1543void hl_debugfs_remove_cs(struct hl_cs *cs);
1544void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
1545void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
1546void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
1547void hl_debugfs_remove_userptr(struct hl_device *hdev,
1548                                struct hl_userptr *userptr);
1549void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
1550void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
1551
1552#else
1553
1554static inline void __init hl_debugfs_init(void)
1555{
1556}
1557
1558static inline void hl_debugfs_fini(void)
1559{
1560}
1561
1562static inline void hl_debugfs_add_device(struct hl_device *hdev)
1563{
1564}
1565
1566static inline void hl_debugfs_remove_device(struct hl_device *hdev)
1567{
1568}
1569
1570static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1571{
1572}
1573
1574static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1575{
1576}
1577
1578static inline void hl_debugfs_add_cb(struct hl_cb *cb)
1579{
1580}
1581
1582static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
1583{
1584}
1585
1586static inline void hl_debugfs_add_cs(struct hl_cs *cs)
1587{
1588}
1589
1590static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
1591{
1592}
1593
1594static inline void hl_debugfs_add_job(struct hl_device *hdev,
1595                                        struct hl_cs_job *job)
1596{
1597}
1598
1599static inline void hl_debugfs_remove_job(struct hl_device *hdev,
1600                                        struct hl_cs_job *job)
1601{
1602}
1603
1604static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
1605                                        struct hl_userptr *userptr)
1606{
1607}
1608
1609static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
1610                                        struct hl_userptr *userptr)
1611{
1612}
1613
1614static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
1615                                        struct hl_ctx *ctx)
1616{
1617}
1618
1619static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
1620                                        struct hl_ctx *ctx)
1621{
1622}
1623
1624#endif
1625
1626/* IOCTLs */
1627long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
1628int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
1629int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
1630int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
1631int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
1632
1633#endif /* HABANALABSP_H_ */
1634