linux/drivers/misc/habanalabs/common/irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Copyright 2016-2019 HabanaLabs, Ltd.
   5 * All Rights Reserved.
   6 */
   7
   8#include "habanalabs.h"
   9
  10#include <linux/slab.h>
  11
  12/**
  13 * struct hl_eqe_work - This structure is used to schedule work of EQ
  14 *                      entry and cpucp_reset event
  15 *
  16 * @eq_work:          workqueue object to run when EQ entry is received
  17 * @hdev:             pointer to device structure
  18 * @eq_entry:         copy of the EQ entry
  19 */
  20struct hl_eqe_work {
  21        struct work_struct      eq_work;
  22        struct hl_device        *hdev;
  23        struct hl_eq_entry      eq_entry;
  24};
  25
  26/**
  27 * hl_cq_inc_ptr - increment ci or pi of cq
  28 *
  29 * @ptr: the current ci or pi value of the completion queue
  30 *
  31 * Increment ptr by 1. If it reaches the number of completion queue
  32 * entries, set it to 0
  33 */
  34inline u32 hl_cq_inc_ptr(u32 ptr)
  35{
  36        ptr++;
  37        if (unlikely(ptr == HL_CQ_LENGTH))
  38                ptr = 0;
  39        return ptr;
  40}
  41
  42/**
  43 * hl_eq_inc_ptr - increment ci of eq
  44 *
  45 * @ptr: the current ci value of the event queue
  46 *
  47 * Increment ptr by 1. If it reaches the number of event queue
  48 * entries, set it to 0
  49 */
  50static inline u32 hl_eq_inc_ptr(u32 ptr)
  51{
  52        ptr++;
  53        if (unlikely(ptr == HL_EQ_LENGTH))
  54                ptr = 0;
  55        return ptr;
  56}
  57
  58static void irq_handle_eqe(struct work_struct *work)
  59{
  60        struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
  61                                                        eq_work);
  62        struct hl_device *hdev = eqe_work->hdev;
  63
  64        hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
  65
  66        kfree(eqe_work);
  67}
  68
  69/**
  70 * hl_irq_handler_cq - irq handler for completion queue
  71 *
  72 * @irq: irq number
  73 * @arg: pointer to completion queue structure
  74 *
  75 */
  76irqreturn_t hl_irq_handler_cq(int irq, void *arg)
  77{
  78        struct hl_cq *cq = arg;
  79        struct hl_device *hdev = cq->hdev;
  80        struct hl_hw_queue *queue;
  81        struct hl_cs_job *job;
  82        bool shadow_index_valid;
  83        u16 shadow_index;
  84        struct hl_cq_entry *cq_entry, *cq_base;
  85
  86        if (hdev->disabled) {
  87                dev_dbg(hdev->dev,
  88                        "Device disabled but received IRQ %d for CQ %d\n",
  89                        irq, cq->hw_queue_id);
  90                return IRQ_HANDLED;
  91        }
  92
  93        cq_base = cq->kernel_address;
  94
  95        while (1) {
  96                bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
  97                                        CQ_ENTRY_READY_MASK)
  98                                                >> CQ_ENTRY_READY_SHIFT);
  99
 100                if (!entry_ready)
 101                        break;
 102
 103                cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
 104
 105                /* Make sure we read CQ entry contents after we've
 106                 * checked the ownership bit.
 107                 */
 108                dma_rmb();
 109
 110                shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
 111                                        CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
 112                                        >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
 113
 114                shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
 115                                        CQ_ENTRY_SHADOW_INDEX_MASK)
 116                                        >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
 117
 118                queue = &hdev->kernel_queues[cq->hw_queue_id];
 119
 120                if ((shadow_index_valid) && (!hdev->disabled)) {
 121                        job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
 122                        queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
 123                }
 124
 125                atomic_inc(&queue->ci);
 126
 127                /* Clear CQ entry ready bit */
 128                cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
 129                                                ~CQ_ENTRY_READY_MASK);
 130
 131                cq->ci = hl_cq_inc_ptr(cq->ci);
 132
 133                /* Increment free slots */
 134                atomic_inc(&cq->free_slots_cnt);
 135        }
 136
 137        return IRQ_HANDLED;
 138}
 139
 140static void handle_user_cq(struct hl_device *hdev,
 141                        struct hl_user_interrupt *user_cq)
 142{
 143        struct hl_user_pending_interrupt *pend;
 144
 145        spin_lock(&user_cq->wait_list_lock);
 146        list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node)
 147                complete_all(&pend->fence.completion);
 148        spin_unlock(&user_cq->wait_list_lock);
 149}
 150
 151/**
 152 * hl_irq_handler_user_cq - irq handler for user completion queues
 153 *
 154 * @irq: irq number
 155 * @arg: pointer to user interrupt structure
 156 *
 157 */
 158irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
 159{
 160        struct hl_user_interrupt *user_cq = arg;
 161        struct hl_device *hdev = user_cq->hdev;
 162
 163        dev_dbg(hdev->dev,
 164                "got user completion interrupt id %u",
 165                user_cq->interrupt_id);
 166
 167        /* Handle user cq interrupts registered on all interrupts */
 168        handle_user_cq(hdev, &hdev->common_user_interrupt);
 169
 170        /* Handle user cq interrupts registered on this specific interrupt */
 171        handle_user_cq(hdev, user_cq);
 172
 173        return IRQ_HANDLED;
 174}
 175
 176/**
 177 * hl_irq_handler_default - default irq handler
 178 *
 179 * @irq: irq number
 180 * @arg: pointer to user interrupt structure
 181 *
 182 */
 183irqreturn_t hl_irq_handler_default(int irq, void *arg)
 184{
 185        struct hl_user_interrupt *user_interrupt = arg;
 186        struct hl_device *hdev = user_interrupt->hdev;
 187        u32 interrupt_id = user_interrupt->interrupt_id;
 188
 189        dev_err(hdev->dev,
 190                "got invalid user interrupt %u",
 191                interrupt_id);
 192
 193        return IRQ_HANDLED;
 194}
 195
 196/**
 197 * hl_irq_handler_eq - irq handler for event queue
 198 *
 199 * @irq: irq number
 200 * @arg: pointer to event queue structure
 201 *
 202 */
 203irqreturn_t hl_irq_handler_eq(int irq, void *arg)
 204{
 205        struct hl_eq *eq = arg;
 206        struct hl_device *hdev = eq->hdev;
 207        struct hl_eq_entry *eq_entry;
 208        struct hl_eq_entry *eq_base;
 209        struct hl_eqe_work *handle_eqe_work;
 210        bool entry_ready;
 211        u32 cur_eqe;
 212        u16 cur_eqe_index;
 213
 214        eq_base = eq->kernel_address;
 215
 216        while (1) {
 217                cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
 218                entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
 219
 220                if (!entry_ready)
 221                        break;
 222
 223                cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
 224                if ((hdev->event_queue.check_eqe_index) &&
 225                                (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
 226                                                        != cur_eqe_index)) {
 227                        dev_dbg(hdev->dev,
 228                                "EQE 0x%x in queue is ready but index does not match %d!=%d",
 229                                eq_base[eq->ci].hdr.ctl,
 230                                ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
 231                                cur_eqe_index);
 232                        break;
 233                }
 234
 235                eq->prev_eqe_index++;
 236
 237                eq_entry = &eq_base[eq->ci];
 238
 239                /*
 240                 * Make sure we read EQ entry contents after we've
 241                 * checked the ownership bit.
 242                 */
 243                dma_rmb();
 244
 245                if (hdev->disabled) {
 246                        dev_warn(hdev->dev,
 247                                "Device disabled but received IRQ %d for EQ\n",
 248                                        irq);
 249                        goto skip_irq;
 250                }
 251
 252                handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
 253                if (handle_eqe_work) {
 254                        INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
 255                        handle_eqe_work->hdev = hdev;
 256
 257                        memcpy(&handle_eqe_work->eq_entry, eq_entry,
 258                                        sizeof(*eq_entry));
 259
 260                        queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
 261                }
 262skip_irq:
 263                /* Clear EQ entry ready bit */
 264                eq_entry->hdr.ctl =
 265                        cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
 266                                                        ~EQ_CTL_READY_MASK);
 267
 268                eq->ci = hl_eq_inc_ptr(eq->ci);
 269
 270                hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
 271        }
 272
 273        return IRQ_HANDLED;
 274}
 275
 276/**
 277 * hl_cq_init - main initialization function for an cq object
 278 *
 279 * @hdev: pointer to device structure
 280 * @q: pointer to cq structure
 281 * @hw_queue_id: The H/W queue ID this completion queue belongs to
 282 *
 283 * Allocate dma-able memory for the completion queue and initialize fields
 284 * Returns 0 on success
 285 */
 286int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
 287{
 288        void *p;
 289
 290        p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
 291                                &q->bus_address, GFP_KERNEL | __GFP_ZERO);
 292        if (!p)
 293                return -ENOMEM;
 294
 295        q->hdev = hdev;
 296        q->kernel_address = p;
 297        q->hw_queue_id = hw_queue_id;
 298        q->ci = 0;
 299        q->pi = 0;
 300
 301        atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
 302
 303        return 0;
 304}
 305
 306/**
 307 * hl_cq_fini - destroy completion queue
 308 *
 309 * @hdev: pointer to device structure
 310 * @q: pointer to cq structure
 311 *
 312 * Free the completion queue memory
 313 */
 314void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
 315{
 316        hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
 317                                                 q->kernel_address,
 318                                                 q->bus_address);
 319}
 320
 321void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
 322{
 323        q->ci = 0;
 324        q->pi = 0;
 325
 326        atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
 327
 328        /*
 329         * It's not enough to just reset the PI/CI because the H/W may have
 330         * written valid completion entries before it was halted and therefore
 331         * we need to clean the actual queues so we won't process old entries
 332         * when the device is operational again
 333         */
 334
 335        memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
 336}
 337
 338/**
 339 * hl_eq_init - main initialization function for an event queue object
 340 *
 341 * @hdev: pointer to device structure
 342 * @q: pointer to eq structure
 343 *
 344 * Allocate dma-able memory for the event queue and initialize fields
 345 * Returns 0 on success
 346 */
 347int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
 348{
 349        void *p;
 350
 351        p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
 352                                                        HL_EQ_SIZE_IN_BYTES,
 353                                                        &q->bus_address);
 354        if (!p)
 355                return -ENOMEM;
 356
 357        q->hdev = hdev;
 358        q->kernel_address = p;
 359        q->ci = 0;
 360        q->prev_eqe_index = 0;
 361
 362        return 0;
 363}
 364
 365/**
 366 * hl_eq_fini - destroy event queue
 367 *
 368 * @hdev: pointer to device structure
 369 * @q: pointer to eq structure
 370 *
 371 * Free the event queue memory
 372 */
 373void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
 374{
 375        flush_workqueue(hdev->eq_wq);
 376
 377        hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
 378                                        HL_EQ_SIZE_IN_BYTES,
 379                                        q->kernel_address);
 380}
 381
 382void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
 383{
 384        q->ci = 0;
 385        q->prev_eqe_index = 0;
 386
 387        /*
 388         * It's not enough to just reset the PI/CI because the H/W may have
 389         * written valid completion entries before it was halted and therefore
 390         * we need to clean the actual queues so we won't process old entries
 391         * when the device is operational again
 392         */
 393
 394        memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
 395}
 396