linux/drivers/misc/habanalabs/common/debugfs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Copyright 2016-2019 HabanaLabs, Ltd.
   5 * All Rights Reserved.
   6 */
   7
   8#include "habanalabs.h"
   9#include "../include/hw_ip/mmu/mmu_general.h"
  10
  11#include <linux/pci.h>
  12#include <linux/debugfs.h>
  13#include <linux/uaccess.h>
  14
  15#define MMU_ADDR_BUF_SIZE       40
  16#define MMU_ASID_BUF_SIZE       10
  17#define MMU_KBUF_SIZE           (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
  18
  19static struct dentry *hl_debug_root;
  20
  21static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
  22                                u8 i2c_reg, long *val)
  23{
  24        struct cpucp_packet pkt;
  25        int rc;
  26
  27        if (hl_device_disabled_or_in_reset(hdev))
  28                return -EBUSY;
  29
  30        memset(&pkt, 0, sizeof(pkt));
  31
  32        pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
  33                                CPUCP_PKT_CTL_OPCODE_SHIFT);
  34        pkt.i2c_bus = i2c_bus;
  35        pkt.i2c_addr = i2c_addr;
  36        pkt.i2c_reg = i2c_reg;
  37
  38        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  39                                                0, val);
  40
  41        if (rc)
  42                dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
  43
  44        return rc;
  45}
  46
  47static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
  48                                u8 i2c_reg, u32 val)
  49{
  50        struct cpucp_packet pkt;
  51        int rc;
  52
  53        if (hl_device_disabled_or_in_reset(hdev))
  54                return -EBUSY;
  55
  56        memset(&pkt, 0, sizeof(pkt));
  57
  58        pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
  59                                CPUCP_PKT_CTL_OPCODE_SHIFT);
  60        pkt.i2c_bus = i2c_bus;
  61        pkt.i2c_addr = i2c_addr;
  62        pkt.i2c_reg = i2c_reg;
  63        pkt.value = cpu_to_le64(val);
  64
  65        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  66                                                0, NULL);
  67
  68        if (rc)
  69                dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
  70
  71        return rc;
  72}
  73
  74static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
  75{
  76        struct cpucp_packet pkt;
  77        int rc;
  78
  79        if (hl_device_disabled_or_in_reset(hdev))
  80                return;
  81
  82        memset(&pkt, 0, sizeof(pkt));
  83
  84        pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
  85                                CPUCP_PKT_CTL_OPCODE_SHIFT);
  86        pkt.led_index = cpu_to_le32(led);
  87        pkt.value = cpu_to_le64(state);
  88
  89        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  90                                                0, NULL);
  91
  92        if (rc)
  93                dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
  94}
  95
  96static int command_buffers_show(struct seq_file *s, void *data)
  97{
  98        struct hl_debugfs_entry *entry = s->private;
  99        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 100        struct hl_cb *cb;
 101        bool first = true;
 102
 103        spin_lock(&dev_entry->cb_spinlock);
 104
 105        list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
 106                if (first) {
 107                        first = false;
 108                        seq_puts(s, "\n");
 109                        seq_puts(s, " CB ID   CTX ID   CB size    CB RefCnt    mmap?   CS counter\n");
 110                        seq_puts(s, "---------------------------------------------------------------\n");
 111                }
 112                seq_printf(s,
 113                        "   %03llu        %d    0x%08x      %d          %d          %d\n",
 114                        cb->id, cb->ctx->asid, cb->size,
 115                        kref_read(&cb->refcount),
 116                        cb->mmap, cb->cs_cnt);
 117        }
 118
 119        spin_unlock(&dev_entry->cb_spinlock);
 120
 121        if (!first)
 122                seq_puts(s, "\n");
 123
 124        return 0;
 125}
 126
 127static int command_submission_show(struct seq_file *s, void *data)
 128{
 129        struct hl_debugfs_entry *entry = s->private;
 130        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 131        struct hl_cs *cs;
 132        bool first = true;
 133
 134        spin_lock(&dev_entry->cs_spinlock);
 135
 136        list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
 137                if (first) {
 138                        first = false;
 139                        seq_puts(s, "\n");
 140                        seq_puts(s, " CS ID   CTX ASID   CS RefCnt   Submitted    Completed\n");
 141                        seq_puts(s, "------------------------------------------------------\n");
 142                }
 143                seq_printf(s,
 144                        "   %llu       %d          %d           %d            %d\n",
 145                        cs->sequence, cs->ctx->asid,
 146                        kref_read(&cs->refcount),
 147                        cs->submitted, cs->completed);
 148        }
 149
 150        spin_unlock(&dev_entry->cs_spinlock);
 151
 152        if (!first)
 153                seq_puts(s, "\n");
 154
 155        return 0;
 156}
 157
 158static int command_submission_jobs_show(struct seq_file *s, void *data)
 159{
 160        struct hl_debugfs_entry *entry = s->private;
 161        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 162        struct hl_cs_job *job;
 163        bool first = true;
 164
 165        spin_lock(&dev_entry->cs_job_spinlock);
 166
 167        list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
 168                if (first) {
 169                        first = false;
 170                        seq_puts(s, "\n");
 171                        seq_puts(s, " JOB ID   CS ID    CTX ASID   H/W Queue\n");
 172                        seq_puts(s, "---------------------------------------\n");
 173                }
 174                if (job->cs)
 175                        seq_printf(s,
 176                                "    %02d       %llu         %d         %d\n",
 177                                job->id, job->cs->sequence, job->cs->ctx->asid,
 178                                job->hw_queue_id);
 179                else
 180                        seq_printf(s,
 181                                "    %02d       0         %d         %d\n",
 182                                job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
 183        }
 184
 185        spin_unlock(&dev_entry->cs_job_spinlock);
 186
 187        if (!first)
 188                seq_puts(s, "\n");
 189
 190        return 0;
 191}
 192
 193static int userptr_show(struct seq_file *s, void *data)
 194{
 195        struct hl_debugfs_entry *entry = s->private;
 196        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 197        struct hl_userptr *userptr;
 198        char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
 199                                "DMA_FROM_DEVICE", "DMA_NONE"};
 200        bool first = true;
 201
 202        spin_lock(&dev_entry->userptr_spinlock);
 203
 204        list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
 205                if (first) {
 206                        first = false;
 207                        seq_puts(s, "\n");
 208                        seq_puts(s, " user virtual address     size             dma dir\n");
 209                        seq_puts(s, "----------------------------------------------------------\n");
 210                }
 211                seq_printf(s,
 212                        "    0x%-14llx      %-10u    %-30s\n",
 213                        userptr->addr, userptr->size, dma_dir[userptr->dir]);
 214        }
 215
 216        spin_unlock(&dev_entry->userptr_spinlock);
 217
 218        if (!first)
 219                seq_puts(s, "\n");
 220
 221        return 0;
 222}
 223
 224static int vm_show(struct seq_file *s, void *data)
 225{
 226        struct hl_debugfs_entry *entry = s->private;
 227        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 228        struct hl_ctx *ctx;
 229        struct hl_vm *vm;
 230        struct hl_vm_hash_node *hnode;
 231        struct hl_userptr *userptr;
 232        struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
 233        enum vm_type_t *vm_type;
 234        bool once = true;
 235        u64 j;
 236        int i;
 237
 238        if (!dev_entry->hdev->mmu_enable)
 239                return 0;
 240
 241        spin_lock(&dev_entry->ctx_mem_hash_spinlock);
 242
 243        list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
 244                once = false;
 245                seq_puts(s, "\n\n----------------------------------------------------");
 246                seq_puts(s, "\n----------------------------------------------------\n\n");
 247                seq_printf(s, "ctx asid: %u\n", ctx->asid);
 248
 249                seq_puts(s, "\nmappings:\n\n");
 250                seq_puts(s, "    virtual address        size          handle\n");
 251                seq_puts(s, "----------------------------------------------------\n");
 252                mutex_lock(&ctx->mem_hash_lock);
 253                hash_for_each(ctx->mem_hash, i, hnode, node) {
 254                        vm_type = hnode->ptr;
 255
 256                        if (*vm_type == VM_TYPE_USERPTR) {
 257                                userptr = hnode->ptr;
 258                                seq_printf(s,
 259                                        "    0x%-14llx      %-10u\n",
 260                                        hnode->vaddr, userptr->size);
 261                        } else {
 262                                phys_pg_pack = hnode->ptr;
 263                                seq_printf(s,
 264                                        "    0x%-14llx      %-10llu       %-4u\n",
 265                                        hnode->vaddr, phys_pg_pack->total_size,
 266                                        phys_pg_pack->handle);
 267                        }
 268                }
 269                mutex_unlock(&ctx->mem_hash_lock);
 270
 271                vm = &ctx->hdev->vm;
 272                spin_lock(&vm->idr_lock);
 273
 274                if (!idr_is_empty(&vm->phys_pg_pack_handles))
 275                        seq_puts(s, "\n\nallocations:\n");
 276
 277                idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
 278                        if (phys_pg_pack->asid != ctx->asid)
 279                                continue;
 280
 281                        seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
 282                        seq_printf(s, "page size: %u\n\n",
 283                                                phys_pg_pack->page_size);
 284                        seq_puts(s, "   physical address\n");
 285                        seq_puts(s, "---------------------\n");
 286                        for (j = 0 ; j < phys_pg_pack->npages ; j++) {
 287                                seq_printf(s, "    0x%-14llx\n",
 288                                                phys_pg_pack->pages[j]);
 289                        }
 290                }
 291                spin_unlock(&vm->idr_lock);
 292
 293        }
 294
 295        spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
 296
 297        if (!once)
 298                seq_puts(s, "\n");
 299
 300        return 0;
 301}
 302
 303/* these inline functions are copied from mmu.c */
 304static inline u64 get_hop0_addr(struct hl_ctx *ctx)
 305{
 306        return ctx->hdev->asic_prop.mmu_pgt_addr +
 307                        (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
 308}
 309
 310static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
 311                                        u64 virt_addr, u64 mask, u64 shift)
 312{
 313        return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
 314                        ((virt_addr & mask) >> shift);
 315}
 316
 317static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
 318                                        struct hl_mmu_properties *mmu_specs,
 319                                        u64 hop_addr, u64 vaddr)
 320{
 321        return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
 322                                        mmu_specs->hop0_shift);
 323}
 324
 325static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
 326                                        struct hl_mmu_properties *mmu_specs,
 327                                        u64 hop_addr, u64 vaddr)
 328{
 329        return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
 330                                        mmu_specs->hop1_shift);
 331}
 332
 333static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
 334                                        struct hl_mmu_properties *mmu_specs,
 335                                        u64 hop_addr, u64 vaddr)
 336{
 337        return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
 338                                        mmu_specs->hop2_shift);
 339}
 340
 341static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
 342                                        struct hl_mmu_properties *mmu_specs,
 343                                        u64 hop_addr, u64 vaddr)
 344{
 345        return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
 346                                        mmu_specs->hop3_shift);
 347}
 348
 349static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
 350                                        struct hl_mmu_properties *mmu_specs,
 351                                        u64 hop_addr, u64 vaddr)
 352{
 353        return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
 354                                        mmu_specs->hop4_shift);
 355}
 356
 357static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx,
 358                                        struct hl_mmu_properties *mmu_specs,
 359                                        u64 hop_addr, u64 vaddr)
 360{
 361        return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask,
 362                                        mmu_specs->hop5_shift);
 363}
 364
 365static inline u64 get_next_hop_addr(u64 curr_pte)
 366{
 367        if (curr_pte & PAGE_PRESENT_MASK)
 368                return curr_pte & HOP_PHYS_ADDR_MASK;
 369        else
 370                return ULLONG_MAX;
 371}
 372
 373static int mmu_show(struct seq_file *s, void *data)
 374{
 375        struct hl_debugfs_entry *entry = s->private;
 376        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 377        struct hl_device *hdev = dev_entry->hdev;
 378        struct asic_fixed_properties *prop = &hdev->asic_prop;
 379        struct hl_mmu_properties *mmu_prop;
 380        struct hl_ctx *ctx;
 381        bool is_dram_addr;
 382
 383        u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
 384                hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
 385                hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
 386                hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
 387                hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
 388                hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0,
 389                virt_addr = dev_entry->mmu_addr;
 390
 391        if (!hdev->mmu_enable)
 392                return 0;
 393
 394        if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
 395                ctx = hdev->kernel_ctx;
 396        else
 397                ctx = hdev->compute_ctx;
 398
 399        if (!ctx) {
 400                dev_err(hdev->dev, "no ctx available\n");
 401                return 0;
 402        }
 403
 404        is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
 405                                                prop->dmmu.start_addr,
 406                                                prop->dmmu.end_addr);
 407
 408        /* shifts and masks are the same in PMMU and HPMMU, use one of them */
 409        mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
 410
 411        mutex_lock(&ctx->mmu_lock);
 412
 413        /* the following lookup is copied from unmap() in mmu.c */
 414
 415        hop0_addr = get_hop0_addr(ctx);
 416        hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
 417        hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
 418        hop1_addr = get_next_hop_addr(hop0_pte);
 419
 420        if (hop1_addr == ULLONG_MAX)
 421                goto not_mapped;
 422
 423        hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
 424        hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
 425        hop2_addr = get_next_hop_addr(hop1_pte);
 426
 427        if (hop2_addr == ULLONG_MAX)
 428                goto not_mapped;
 429
 430        hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
 431        hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
 432        hop3_addr = get_next_hop_addr(hop2_pte);
 433
 434        if (hop3_addr == ULLONG_MAX)
 435                goto not_mapped;
 436
 437        hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
 438        hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
 439
 440        if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
 441                if (!(hop3_pte & LAST_MASK)) {
 442                        hop4_addr = get_next_hop_addr(hop3_pte);
 443
 444                        if (hop4_addr == ULLONG_MAX)
 445                                goto not_mapped;
 446
 447                        hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
 448                                                        hop4_addr, virt_addr);
 449                        hop4_pte = hdev->asic_funcs->read_pte(hdev,
 450                                                                hop4_pte_addr);
 451                        if (!(hop4_pte & PAGE_PRESENT_MASK))
 452                                goto not_mapped;
 453                } else {
 454                        if (!(hop3_pte & PAGE_PRESENT_MASK))
 455                                goto not_mapped;
 456                }
 457        } else {
 458                hop4_addr = get_next_hop_addr(hop3_pte);
 459
 460                if (hop4_addr == ULLONG_MAX)
 461                        goto not_mapped;
 462
 463                hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
 464                                                hop4_addr, virt_addr);
 465                hop4_pte = hdev->asic_funcs->read_pte(hdev,
 466                                                        hop4_pte_addr);
 467                if (!(hop4_pte & LAST_MASK)) {
 468                        hop5_addr = get_next_hop_addr(hop4_pte);
 469
 470                        if (hop5_addr == ULLONG_MAX)
 471                                goto not_mapped;
 472
 473                        hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop,
 474                                                        hop5_addr, virt_addr);
 475                        hop5_pte = hdev->asic_funcs->read_pte(hdev,
 476                                                                hop5_pte_addr);
 477                        if (!(hop5_pte & PAGE_PRESENT_MASK))
 478                                goto not_mapped;
 479                } else {
 480                        if (!(hop4_pte & PAGE_PRESENT_MASK))
 481                                goto not_mapped;
 482                }
 483        }
 484
 485        seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
 486                        dev_entry->mmu_asid, dev_entry->mmu_addr);
 487
 488        seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
 489        seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
 490        seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
 491
 492        seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
 493        seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
 494        seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
 495
 496        seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
 497        seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
 498        seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
 499
 500        seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
 501        seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
 502        seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
 503
 504        if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
 505                if (!(hop3_pte & LAST_MASK)) {
 506                        seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
 507                        seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
 508                        seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
 509                }
 510        } else {
 511                seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
 512                seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
 513                seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
 514
 515                if (!(hop4_pte & LAST_MASK)) {
 516                        seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr);
 517                        seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr);
 518                        seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte);
 519                }
 520        }
 521
 522        goto out;
 523
 524not_mapped:
 525        dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
 526                        virt_addr);
 527out:
 528        mutex_unlock(&ctx->mmu_lock);
 529
 530        return 0;
 531}
 532
 533static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
 534                size_t count, loff_t *f_pos)
 535{
 536        struct seq_file *s = file->private_data;
 537        struct hl_debugfs_entry *entry = s->private;
 538        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 539        struct hl_device *hdev = dev_entry->hdev;
 540        char kbuf[MMU_KBUF_SIZE];
 541        char *c;
 542        ssize_t rc;
 543
 544        if (!hdev->mmu_enable)
 545                return count;
 546
 547        if (count > sizeof(kbuf) - 1)
 548                goto err;
 549        if (copy_from_user(kbuf, buf, count))
 550                goto err;
 551        kbuf[count] = 0;
 552
 553        c = strchr(kbuf, ' ');
 554        if (!c)
 555                goto err;
 556        *c = '\0';
 557
 558        rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
 559        if (rc)
 560                goto err;
 561
 562        if (strncmp(c+1, "0x", 2))
 563                goto err;
 564        rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
 565        if (rc)
 566                goto err;
 567
 568        return count;
 569
 570err:
 571        dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
 572
 573        return -EINVAL;
 574}
 575
 576static int engines_show(struct seq_file *s, void *data)
 577{
 578        struct hl_debugfs_entry *entry = s->private;
 579        struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 580        struct hl_device *hdev = dev_entry->hdev;
 581
 582        if (atomic_read(&hdev->in_reset)) {
 583                dev_warn_ratelimited(hdev->dev,
 584                                "Can't check device idle during reset\n");
 585                return 0;
 586        }
 587
 588        hdev->asic_funcs->is_device_idle(hdev, NULL, s);
 589
 590        return 0;
 591}
 592
 593static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
 594{
 595        struct asic_fixed_properties *prop = &hdev->asic_prop;
 596
 597        if (!hdev->mmu_enable)
 598                goto out;
 599
 600        if (hdev->dram_supports_virtual_memory &&
 601                (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
 602                return true;
 603
 604        if (addr >= prop->pmmu.start_addr &&
 605                addr < prop->pmmu.end_addr)
 606                return true;
 607
 608        if (addr >= prop->pmmu_huge.start_addr &&
 609                addr < prop->pmmu_huge.end_addr)
 610                return true;
 611out:
 612        return false;
 613}
 614
 615static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
 616                                u64 *phys_addr)
 617{
 618        struct hl_ctx *ctx = hdev->compute_ctx;
 619        struct asic_fixed_properties *prop = &hdev->asic_prop;
 620        struct hl_mmu_properties *mmu_prop;
 621        u64 hop_addr, hop_pte_addr, hop_pte;
 622        u64 offset_mask = HOP4_MASK | FLAGS_MASK;
 623        int rc = 0;
 624        bool is_dram_addr;
 625
 626        if (!ctx) {
 627                dev_err(hdev->dev, "no ctx available\n");
 628                return -EINVAL;
 629        }
 630
 631        is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
 632                                                prop->dmmu.start_addr,
 633                                                prop->dmmu.end_addr);
 634
 635        /* shifts and masks are the same in PMMU and HPMMU, use one of them */
 636        mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
 637
 638        mutex_lock(&ctx->mmu_lock);
 639
 640        /* hop 0 */
 641        hop_addr = get_hop0_addr(ctx);
 642        hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
 643        hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
 644
 645        /* hop 1 */
 646        hop_addr = get_next_hop_addr(hop_pte);
 647        if (hop_addr == ULLONG_MAX)
 648                goto not_mapped;
 649        hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
 650        hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
 651
 652        /* hop 2 */
 653        hop_addr = get_next_hop_addr(hop_pte);
 654        if (hop_addr == ULLONG_MAX)
 655                goto not_mapped;
 656        hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
 657        hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
 658
 659        /* hop 3 */
 660        hop_addr = get_next_hop_addr(hop_pte);
 661        if (hop_addr == ULLONG_MAX)
 662                goto not_mapped;
 663        hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
 664        hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
 665
 666        if (!(hop_pte & LAST_MASK)) {
 667                /* hop 4 */
 668                hop_addr = get_next_hop_addr(hop_pte);
 669                if (hop_addr == ULLONG_MAX)
 670                        goto not_mapped;
 671                hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
 672                                                        virt_addr);
 673                hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
 674
 675                offset_mask = FLAGS_MASK;
 676        }
 677
 678        if (!(hop_pte & PAGE_PRESENT_MASK))
 679                goto not_mapped;
 680
 681        *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
 682
 683        goto out;
 684
 685not_mapped:
 686        dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
 687                        virt_addr);
 688        rc = -EINVAL;
 689out:
 690        mutex_unlock(&ctx->mmu_lock);
 691        return rc;
 692}
 693
 694static ssize_t hl_data_read32(struct file *f, char __user *buf,
 695                                        size_t count, loff_t *ppos)
 696{
 697        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 698        struct hl_device *hdev = entry->hdev;
 699        char tmp_buf[32];
 700        u64 addr = entry->addr;
 701        u32 val;
 702        ssize_t rc;
 703
 704        if (atomic_read(&hdev->in_reset)) {
 705                dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
 706                return 0;
 707        }
 708
 709        if (*ppos)
 710                return 0;
 711
 712        if (hl_is_device_va(hdev, addr)) {
 713                rc = device_va_to_pa(hdev, addr, &addr);
 714                if (rc)
 715                        return rc;
 716        }
 717
 718        rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
 719        if (rc) {
 720                dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
 721                return rc;
 722        }
 723
 724        sprintf(tmp_buf, "0x%08x\n", val);
 725        return simple_read_from_buffer(buf, count, ppos, tmp_buf,
 726                        strlen(tmp_buf));
 727}
 728
 729static ssize_t hl_data_write32(struct file *f, const char __user *buf,
 730                                        size_t count, loff_t *ppos)
 731{
 732        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 733        struct hl_device *hdev = entry->hdev;
 734        u64 addr = entry->addr;
 735        u32 value;
 736        ssize_t rc;
 737
 738        if (atomic_read(&hdev->in_reset)) {
 739                dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
 740                return 0;
 741        }
 742
 743        rc = kstrtouint_from_user(buf, count, 16, &value);
 744        if (rc)
 745                return rc;
 746
 747        if (hl_is_device_va(hdev, addr)) {
 748                rc = device_va_to_pa(hdev, addr, &addr);
 749                if (rc)
 750                        return rc;
 751        }
 752
 753        rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
 754        if (rc) {
 755                dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
 756                        value, addr);
 757                return rc;
 758        }
 759
 760        return count;
 761}
 762
 763static ssize_t hl_data_read64(struct file *f, char __user *buf,
 764                                        size_t count, loff_t *ppos)
 765{
 766        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 767        struct hl_device *hdev = entry->hdev;
 768        char tmp_buf[32];
 769        u64 addr = entry->addr;
 770        u64 val;
 771        ssize_t rc;
 772
 773        if (*ppos)
 774                return 0;
 775
 776        if (hl_is_device_va(hdev, addr)) {
 777                rc = device_va_to_pa(hdev, addr, &addr);
 778                if (rc)
 779                        return rc;
 780        }
 781
 782        rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
 783        if (rc) {
 784                dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
 785                return rc;
 786        }
 787
 788        sprintf(tmp_buf, "0x%016llx\n", val);
 789        return simple_read_from_buffer(buf, count, ppos, tmp_buf,
 790                        strlen(tmp_buf));
 791}
 792
 793static ssize_t hl_data_write64(struct file *f, const char __user *buf,
 794                                        size_t count, loff_t *ppos)
 795{
 796        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 797        struct hl_device *hdev = entry->hdev;
 798        u64 addr = entry->addr;
 799        u64 value;
 800        ssize_t rc;
 801
 802        rc = kstrtoull_from_user(buf, count, 16, &value);
 803        if (rc)
 804                return rc;
 805
 806        if (hl_is_device_va(hdev, addr)) {
 807                rc = device_va_to_pa(hdev, addr, &addr);
 808                if (rc)
 809                        return rc;
 810        }
 811
 812        rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
 813        if (rc) {
 814                dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
 815                        value, addr);
 816                return rc;
 817        }
 818
 819        return count;
 820}
 821
 822static ssize_t hl_get_power_state(struct file *f, char __user *buf,
 823                size_t count, loff_t *ppos)
 824{
 825        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 826        struct hl_device *hdev = entry->hdev;
 827        char tmp_buf[200];
 828        int i;
 829
 830        if (*ppos)
 831                return 0;
 832
 833        if (hdev->pdev->current_state == PCI_D0)
 834                i = 1;
 835        else if (hdev->pdev->current_state == PCI_D3hot)
 836                i = 2;
 837        else
 838                i = 3;
 839
 840        sprintf(tmp_buf,
 841                "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
 842        return simple_read_from_buffer(buf, count, ppos, tmp_buf,
 843                        strlen(tmp_buf));
 844}
 845
 846static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
 847                                        size_t count, loff_t *ppos)
 848{
 849        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 850        struct hl_device *hdev = entry->hdev;
 851        u32 value;
 852        ssize_t rc;
 853
 854        rc = kstrtouint_from_user(buf, count, 10, &value);
 855        if (rc)
 856                return rc;
 857
 858        if (value == 1) {
 859                pci_set_power_state(hdev->pdev, PCI_D0);
 860                pci_restore_state(hdev->pdev);
 861                rc = pci_enable_device(hdev->pdev);
 862        } else if (value == 2) {
 863                pci_save_state(hdev->pdev);
 864                pci_disable_device(hdev->pdev);
 865                pci_set_power_state(hdev->pdev, PCI_D3hot);
 866        } else {
 867                dev_dbg(hdev->dev, "invalid power state value %u\n", value);
 868                return -EINVAL;
 869        }
 870
 871        return count;
 872}
 873
 874static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
 875                                        size_t count, loff_t *ppos)
 876{
 877        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 878        struct hl_device *hdev = entry->hdev;
 879        char tmp_buf[32];
 880        long val;
 881        ssize_t rc;
 882
 883        if (*ppos)
 884                return 0;
 885
 886        rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
 887                        entry->i2c_reg, &val);
 888        if (rc) {
 889                dev_err(hdev->dev,
 890                        "Failed to read from I2C bus %d, addr %d, reg %d\n",
 891                        entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
 892                return rc;
 893        }
 894
 895        sprintf(tmp_buf, "0x%02lx\n", val);
 896        rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
 897                        strlen(tmp_buf));
 898
 899        return rc;
 900}
 901
 902static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
 903                                        size_t count, loff_t *ppos)
 904{
 905        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 906        struct hl_device *hdev = entry->hdev;
 907        u32 value;
 908        ssize_t rc;
 909
 910        rc = kstrtouint_from_user(buf, count, 16, &value);
 911        if (rc)
 912                return rc;
 913
 914        rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
 915                        entry->i2c_reg, value);
 916        if (rc) {
 917                dev_err(hdev->dev,
 918                        "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
 919                        value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
 920                return rc;
 921        }
 922
 923        return count;
 924}
 925
 926static ssize_t hl_led0_write(struct file *f, const char __user *buf,
 927                                        size_t count, loff_t *ppos)
 928{
 929        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 930        struct hl_device *hdev = entry->hdev;
 931        u32 value;
 932        ssize_t rc;
 933
 934        rc = kstrtouint_from_user(buf, count, 10, &value);
 935        if (rc)
 936                return rc;
 937
 938        value = value ? 1 : 0;
 939
 940        hl_debugfs_led_set(hdev, 0, value);
 941
 942        return count;
 943}
 944
 945static ssize_t hl_led1_write(struct file *f, const char __user *buf,
 946                                        size_t count, loff_t *ppos)
 947{
 948        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 949        struct hl_device *hdev = entry->hdev;
 950        u32 value;
 951        ssize_t rc;
 952
 953        rc = kstrtouint_from_user(buf, count, 10, &value);
 954        if (rc)
 955                return rc;
 956
 957        value = value ? 1 : 0;
 958
 959        hl_debugfs_led_set(hdev, 1, value);
 960
 961        return count;
 962}
 963
 964static ssize_t hl_led2_write(struct file *f, const char __user *buf,
 965                                        size_t count, loff_t *ppos)
 966{
 967        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 968        struct hl_device *hdev = entry->hdev;
 969        u32 value;
 970        ssize_t rc;
 971
 972        rc = kstrtouint_from_user(buf, count, 10, &value);
 973        if (rc)
 974                return rc;
 975
 976        value = value ? 1 : 0;
 977
 978        hl_debugfs_led_set(hdev, 2, value);
 979
 980        return count;
 981}
 982
 983static ssize_t hl_device_read(struct file *f, char __user *buf,
 984                                        size_t count, loff_t *ppos)
 985{
 986        static const char *help =
 987                "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
 988        return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
 989}
 990
 991static ssize_t hl_device_write(struct file *f, const char __user *buf,
 992                                     size_t count, loff_t *ppos)
 993{
 994        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
 995        struct hl_device *hdev = entry->hdev;
 996        char data[30] = {0};
 997
 998        /* don't allow partial writes */
 999        if (*ppos != 0)
1000                return 0;
1001
1002        simple_write_to_buffer(data, 29, ppos, buf, count);
1003
1004        if (strncmp("disable", data, strlen("disable")) == 0) {
1005                hdev->disabled = true;
1006        } else if (strncmp("enable", data, strlen("enable")) == 0) {
1007                hdev->disabled = false;
1008        } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
1009                hdev->asic_funcs->suspend(hdev);
1010        } else if (strncmp("resume", data, strlen("resume")) == 0) {
1011                hdev->asic_funcs->resume(hdev);
1012        } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
1013                hdev->device_cpu_disabled = true;
1014        } else {
1015                dev_err(hdev->dev,
1016                        "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
1017                count = -EINVAL;
1018        }
1019
1020        return count;
1021}
1022
1023static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
1024                                        size_t count, loff_t *ppos)
1025{
1026        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1027        struct hl_device *hdev = entry->hdev;
1028        char tmp_buf[200];
1029        ssize_t rc;
1030
1031        if (*ppos)
1032                return 0;
1033
1034        sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
1035        rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
1036                        strlen(tmp_buf) + 1);
1037
1038        return rc;
1039}
1040
1041static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
1042                                     size_t count, loff_t *ppos)
1043{
1044        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1045        struct hl_device *hdev = entry->hdev;
1046        u64 value;
1047        ssize_t rc;
1048
1049        if (atomic_read(&hdev->in_reset)) {
1050                dev_warn_ratelimited(hdev->dev,
1051                                "Can't change clock gating during reset\n");
1052                return 0;
1053        }
1054
1055        rc = kstrtoull_from_user(buf, count, 16, &value);
1056        if (rc)
1057                return rc;
1058
1059        hdev->clock_gating_mask = value;
1060        hdev->asic_funcs->set_clock_gating(hdev);
1061
1062        return count;
1063}
1064
1065static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
1066                                        size_t count, loff_t *ppos)
1067{
1068        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1069        struct hl_device *hdev = entry->hdev;
1070        char tmp_buf[200];
1071        ssize_t rc;
1072
1073        if (*ppos)
1074                return 0;
1075
1076        sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
1077        rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1078                        strlen(tmp_buf) + 1);
1079
1080        return rc;
1081}
1082
1083static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
1084                                     size_t count, loff_t *ppos)
1085{
1086        struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1087        struct hl_device *hdev = entry->hdev;
1088        u32 value;
1089        ssize_t rc;
1090
1091        if (atomic_read(&hdev->in_reset)) {
1092                dev_warn_ratelimited(hdev->dev,
1093                                "Can't change stop on error during reset\n");
1094                return 0;
1095        }
1096
1097        rc = kstrtouint_from_user(buf, count, 10, &value);
1098        if (rc)
1099                return rc;
1100
1101        hdev->stop_on_err = value ? 1 : 0;
1102
1103        hl_device_reset(hdev, false, false);
1104
1105        return count;
1106}
1107
1108static const struct file_operations hl_data32b_fops = {
1109        .owner = THIS_MODULE,
1110        .read = hl_data_read32,
1111        .write = hl_data_write32
1112};
1113
1114static const struct file_operations hl_data64b_fops = {
1115        .owner = THIS_MODULE,
1116        .read = hl_data_read64,
1117        .write = hl_data_write64
1118};
1119
1120static const struct file_operations hl_i2c_data_fops = {
1121        .owner = THIS_MODULE,
1122        .read = hl_i2c_data_read,
1123        .write = hl_i2c_data_write
1124};
1125
1126static const struct file_operations hl_power_fops = {
1127        .owner = THIS_MODULE,
1128        .read = hl_get_power_state,
1129        .write = hl_set_power_state
1130};
1131
1132static const struct file_operations hl_led0_fops = {
1133        .owner = THIS_MODULE,
1134        .write = hl_led0_write
1135};
1136
1137static const struct file_operations hl_led1_fops = {
1138        .owner = THIS_MODULE,
1139        .write = hl_led1_write
1140};
1141
1142static const struct file_operations hl_led2_fops = {
1143        .owner = THIS_MODULE,
1144        .write = hl_led2_write
1145};
1146
1147static const struct file_operations hl_device_fops = {
1148        .owner = THIS_MODULE,
1149        .read = hl_device_read,
1150        .write = hl_device_write
1151};
1152
1153static const struct file_operations hl_clk_gate_fops = {
1154        .owner = THIS_MODULE,
1155        .read = hl_clk_gate_read,
1156        .write = hl_clk_gate_write
1157};
1158
1159static const struct file_operations hl_stop_on_err_fops = {
1160        .owner = THIS_MODULE,
1161        .read = hl_stop_on_err_read,
1162        .write = hl_stop_on_err_write
1163};
1164
1165static const struct hl_info_list hl_debugfs_list[] = {
1166        {"command_buffers", command_buffers_show, NULL},
1167        {"command_submission", command_submission_show, NULL},
1168        {"command_submission_jobs", command_submission_jobs_show, NULL},
1169        {"userptr", userptr_show, NULL},
1170        {"vm", vm_show, NULL},
1171        {"mmu", mmu_show, mmu_asid_va_write},
1172        {"engines", engines_show, NULL}
1173};
1174
1175static int hl_debugfs_open(struct inode *inode, struct file *file)
1176{
1177        struct hl_debugfs_entry *node = inode->i_private;
1178
1179        return single_open(file, node->info_ent->show, node);
1180}
1181
1182static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
1183                size_t count, loff_t *f_pos)
1184{
1185        struct hl_debugfs_entry *node = file->f_inode->i_private;
1186
1187        if (node->info_ent->write)
1188                return node->info_ent->write(file, buf, count, f_pos);
1189        else
1190                return -EINVAL;
1191
1192}
1193
1194static const struct file_operations hl_debugfs_fops = {
1195        .owner = THIS_MODULE,
1196        .open = hl_debugfs_open,
1197        .read = seq_read,
1198        .write = hl_debugfs_write,
1199        .llseek = seq_lseek,
1200        .release = single_release,
1201};
1202
1203void hl_debugfs_add_device(struct hl_device *hdev)
1204{
1205        struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1206        int count = ARRAY_SIZE(hl_debugfs_list);
1207        struct hl_debugfs_entry *entry;
1208        struct dentry *ent;
1209        int i;
1210
1211        dev_entry->hdev = hdev;
1212        dev_entry->entry_arr = kmalloc_array(count,
1213                                        sizeof(struct hl_debugfs_entry),
1214                                        GFP_KERNEL);
1215        if (!dev_entry->entry_arr)
1216                return;
1217
1218        INIT_LIST_HEAD(&dev_entry->file_list);
1219        INIT_LIST_HEAD(&dev_entry->cb_list);
1220        INIT_LIST_HEAD(&dev_entry->cs_list);
1221        INIT_LIST_HEAD(&dev_entry->cs_job_list);
1222        INIT_LIST_HEAD(&dev_entry->userptr_list);
1223        INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
1224        mutex_init(&dev_entry->file_mutex);
1225        spin_lock_init(&dev_entry->cb_spinlock);
1226        spin_lock_init(&dev_entry->cs_spinlock);
1227        spin_lock_init(&dev_entry->cs_job_spinlock);
1228        spin_lock_init(&dev_entry->userptr_spinlock);
1229        spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
1230
1231        dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
1232                                                hl_debug_root);
1233
1234        debugfs_create_x64("addr",
1235                                0644,
1236                                dev_entry->root,
1237                                &dev_entry->addr);
1238
1239        debugfs_create_file("data32",
1240                                0644,
1241                                dev_entry->root,
1242                                dev_entry,
1243                                &hl_data32b_fops);
1244
1245        debugfs_create_file("data64",
1246                                0644,
1247                                dev_entry->root,
1248                                dev_entry,
1249                                &hl_data64b_fops);
1250
1251        debugfs_create_file("set_power_state",
1252                                0200,
1253                                dev_entry->root,
1254                                dev_entry,
1255                                &hl_power_fops);
1256
1257        debugfs_create_u8("i2c_bus",
1258                                0644,
1259                                dev_entry->root,
1260                                &dev_entry->i2c_bus);
1261
1262        debugfs_create_u8("i2c_addr",
1263                                0644,
1264                                dev_entry->root,
1265                                &dev_entry->i2c_addr);
1266
1267        debugfs_create_u8("i2c_reg",
1268                                0644,
1269                                dev_entry->root,
1270                                &dev_entry->i2c_reg);
1271
1272        debugfs_create_file("i2c_data",
1273                                0644,
1274                                dev_entry->root,
1275                                dev_entry,
1276                                &hl_i2c_data_fops);
1277
1278        debugfs_create_file("led0",
1279                                0200,
1280                                dev_entry->root,
1281                                dev_entry,
1282                                &hl_led0_fops);
1283
1284        debugfs_create_file("led1",
1285                                0200,
1286                                dev_entry->root,
1287                                dev_entry,
1288                                &hl_led1_fops);
1289
1290        debugfs_create_file("led2",
1291                                0200,
1292                                dev_entry->root,
1293                                dev_entry,
1294                                &hl_led2_fops);
1295
1296        debugfs_create_file("device",
1297                                0200,
1298                                dev_entry->root,
1299                                dev_entry,
1300                                &hl_device_fops);
1301
1302        debugfs_create_file("clk_gate",
1303                                0200,
1304                                dev_entry->root,
1305                                dev_entry,
1306                                &hl_clk_gate_fops);
1307
1308        debugfs_create_file("stop_on_err",
1309                                0644,
1310                                dev_entry->root,
1311                                dev_entry,
1312                                &hl_stop_on_err_fops);
1313
1314        for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
1315
1316                ent = debugfs_create_file(hl_debugfs_list[i].name,
1317                                        0444,
1318                                        dev_entry->root,
1319                                        entry,
1320                                        &hl_debugfs_fops);
1321                entry->dent = ent;
1322                entry->info_ent = &hl_debugfs_list[i];
1323                entry->dev_entry = dev_entry;
1324        }
1325}
1326
1327void hl_debugfs_remove_device(struct hl_device *hdev)
1328{
1329        struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
1330
1331        debugfs_remove_recursive(entry->root);
1332
1333        mutex_destroy(&entry->file_mutex);
1334        kfree(entry->entry_arr);
1335}
1336
1337void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1338{
1339        struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1340
1341        mutex_lock(&dev_entry->file_mutex);
1342        list_add(&hpriv->debugfs_list, &dev_entry->file_list);
1343        mutex_unlock(&dev_entry->file_mutex);
1344}
1345
1346void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1347{
1348        struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1349
1350        mutex_lock(&dev_entry->file_mutex);
1351        list_del(&hpriv->debugfs_list);
1352        mutex_unlock(&dev_entry->file_mutex);
1353}
1354
1355void hl_debugfs_add_cb(struct hl_cb *cb)
1356{
1357        struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1358
1359        spin_lock(&dev_entry->cb_spinlock);
1360        list_add(&cb->debugfs_list, &dev_entry->cb_list);
1361        spin_unlock(&dev_entry->cb_spinlock);
1362}
1363
1364void hl_debugfs_remove_cb(struct hl_cb *cb)
1365{
1366        struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1367
1368        spin_lock(&dev_entry->cb_spinlock);
1369        list_del(&cb->debugfs_list);
1370        spin_unlock(&dev_entry->cb_spinlock);
1371}
1372
1373void hl_debugfs_add_cs(struct hl_cs *cs)
1374{
1375        struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1376
1377        spin_lock(&dev_entry->cs_spinlock);
1378        list_add(&cs->debugfs_list, &dev_entry->cs_list);
1379        spin_unlock(&dev_entry->cs_spinlock);
1380}
1381
1382void hl_debugfs_remove_cs(struct hl_cs *cs)
1383{
1384        struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1385
1386        spin_lock(&dev_entry->cs_spinlock);
1387        list_del(&cs->debugfs_list);
1388        spin_unlock(&dev_entry->cs_spinlock);
1389}
1390
1391void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
1392{
1393        struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1394
1395        spin_lock(&dev_entry->cs_job_spinlock);
1396        list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1397        spin_unlock(&dev_entry->cs_job_spinlock);
1398}
1399
1400void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
1401{
1402        struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1403
1404        spin_lock(&dev_entry->cs_job_spinlock);
1405        list_del(&job->debugfs_list);
1406        spin_unlock(&dev_entry->cs_job_spinlock);
1407}
1408
1409void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
1410{
1411        struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1412
1413        spin_lock(&dev_entry->userptr_spinlock);
1414        list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1415        spin_unlock(&dev_entry->userptr_spinlock);
1416}
1417
1418void hl_debugfs_remove_userptr(struct hl_device *hdev,
1419                                struct hl_userptr *userptr)
1420{
1421        struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1422
1423        spin_lock(&dev_entry->userptr_spinlock);
1424        list_del(&userptr->debugfs_list);
1425        spin_unlock(&dev_entry->userptr_spinlock);
1426}
1427
1428void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1429{
1430        struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1431
1432        spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1433        list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1434        spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1435}
1436
1437void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1438{
1439        struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1440
1441        spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1442        list_del(&ctx->debugfs_list);
1443        spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1444}
1445
1446void __init hl_debugfs_init(void)
1447{
1448        hl_debug_root = debugfs_create_dir("habanalabs", NULL);
1449}
1450
1451void hl_debugfs_fini(void)
1452{
1453        debugfs_remove_recursive(hl_debug_root);
1454}
1455