linux/drivers/dma/idxd/irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
   3#include <linux/init.h>
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include <linux/io-64-nonatomic-lo-hi.h>
   8#include <linux/dmaengine.h>
   9#include <uapi/linux/idxd.h>
  10#include "../dmaengine.h"
  11#include "idxd.h"
  12#include "registers.h"
  13
  14void idxd_device_wqs_clear_state(struct idxd_device *idxd)
  15{
  16        int i;
  17
  18        lockdep_assert_held(&idxd->dev_lock);
  19        for (i = 0; i < idxd->max_wqs; i++) {
  20                struct idxd_wq *wq = &idxd->wqs[i];
  21
  22                wq->state = IDXD_WQ_DISABLED;
  23        }
  24}
  25
  26static int idxd_restart(struct idxd_device *idxd)
  27{
  28        int i, rc;
  29
  30        lockdep_assert_held(&idxd->dev_lock);
  31
  32        rc = __idxd_device_reset(idxd);
  33        if (rc < 0)
  34                goto out;
  35
  36        rc = idxd_device_config(idxd);
  37        if (rc < 0)
  38                goto out;
  39
  40        rc = idxd_device_enable(idxd);
  41        if (rc < 0)
  42                goto out;
  43
  44        for (i = 0; i < idxd->max_wqs; i++) {
  45                struct idxd_wq *wq = &idxd->wqs[i];
  46
  47                if (wq->state == IDXD_WQ_ENABLED) {
  48                        rc = idxd_wq_enable(wq);
  49                        if (rc < 0) {
  50                                dev_warn(&idxd->pdev->dev,
  51                                         "Unable to re-enable wq %s\n",
  52                                         dev_name(&wq->conf_dev));
  53                        }
  54                }
  55        }
  56
  57        return 0;
  58
  59 out:
  60        idxd_device_wqs_clear_state(idxd);
  61        idxd->state = IDXD_DEV_HALTED;
  62        return rc;
  63}
  64
  65irqreturn_t idxd_irq_handler(int vec, void *data)
  66{
  67        struct idxd_irq_entry *irq_entry = data;
  68        struct idxd_device *idxd = irq_entry->idxd;
  69
  70        idxd_mask_msix_vector(idxd, irq_entry->id);
  71        return IRQ_WAKE_THREAD;
  72}
  73
  74irqreturn_t idxd_misc_thread(int vec, void *data)
  75{
  76        struct idxd_irq_entry *irq_entry = data;
  77        struct idxd_device *idxd = irq_entry->idxd;
  78        struct device *dev = &idxd->pdev->dev;
  79        union gensts_reg gensts;
  80        u32 cause, val = 0;
  81        int i, rc;
  82        bool err = false;
  83
  84        cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
  85
  86        if (cause & IDXD_INTC_ERR) {
  87                spin_lock_bh(&idxd->dev_lock);
  88                for (i = 0; i < 4; i++)
  89                        idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
  90                                        IDXD_SWERR_OFFSET + i * sizeof(u64));
  91                iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
  92
  93                if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
  94                        int id = idxd->sw_err.wq_idx;
  95                        struct idxd_wq *wq = &idxd->wqs[id];
  96
  97                        if (wq->type == IDXD_WQT_USER)
  98                                wake_up_interruptible(&wq->idxd_cdev.err_queue);
  99                } else {
 100                        int i;
 101
 102                        for (i = 0; i < idxd->max_wqs; i++) {
 103                                struct idxd_wq *wq = &idxd->wqs[i];
 104
 105                                if (wq->type == IDXD_WQT_USER)
 106                                        wake_up_interruptible(&wq->idxd_cdev.err_queue);
 107                        }
 108                }
 109
 110                spin_unlock_bh(&idxd->dev_lock);
 111                val |= IDXD_INTC_ERR;
 112
 113                for (i = 0; i < 4; i++)
 114                        dev_warn(dev, "err[%d]: %#16.16llx\n",
 115                                 i, idxd->sw_err.bits[i]);
 116                err = true;
 117        }
 118
 119        if (cause & IDXD_INTC_CMD) {
 120                /* Driver does use command interrupts */
 121                val |= IDXD_INTC_CMD;
 122        }
 123
 124        if (cause & IDXD_INTC_OCCUPY) {
 125                /* Driver does not utilize occupancy interrupt */
 126                val |= IDXD_INTC_OCCUPY;
 127        }
 128
 129        if (cause & IDXD_INTC_PERFMON_OVFL) {
 130                /*
 131                 * Driver does not utilize perfmon counter overflow interrupt
 132                 * yet.
 133                 */
 134                val |= IDXD_INTC_PERFMON_OVFL;
 135        }
 136
 137        val ^= cause;
 138        if (val)
 139                dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
 140                              val);
 141
 142        iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
 143        if (!err)
 144                return IRQ_HANDLED;
 145
 146        gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
 147        if (gensts.state == IDXD_DEVICE_STATE_HALT) {
 148                spin_lock_bh(&idxd->dev_lock);
 149                if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
 150                        rc = idxd_restart(idxd);
 151                        if (rc < 0)
 152                                dev_err(&idxd->pdev->dev,
 153                                        "idxd restart failed, device halt.");
 154                } else {
 155                        idxd_device_wqs_clear_state(idxd);
 156                        idxd->state = IDXD_DEV_HALTED;
 157                        dev_err(&idxd->pdev->dev,
 158                                "idxd halted, need %s.\n",
 159                                gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
 160                                "FLR" : "system reset");
 161                }
 162                spin_unlock_bh(&idxd->dev_lock);
 163        }
 164
 165        idxd_unmask_msix_vector(idxd, irq_entry->id);
 166        return IRQ_HANDLED;
 167}
 168
 169static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
 170                                     int *processed)
 171{
 172        struct idxd_desc *desc, *t;
 173        struct llist_node *head;
 174        int queued = 0;
 175
 176        head = llist_del_all(&irq_entry->pending_llist);
 177        if (!head)
 178                return 0;
 179
 180        llist_for_each_entry_safe(desc, t, head, llnode) {
 181                if (desc->completion->status) {
 182                        idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
 183                        idxd_free_desc(desc->wq, desc);
 184                        (*processed)++;
 185                } else {
 186                        list_add_tail(&desc->list, &irq_entry->work_list);
 187                        queued++;
 188                }
 189        }
 190
 191        return queued;
 192}
 193
 194static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
 195                                 int *processed)
 196{
 197        struct list_head *node, *next;
 198        int queued = 0;
 199
 200        if (list_empty(&irq_entry->work_list))
 201                return 0;
 202
 203        list_for_each_safe(node, next, &irq_entry->work_list) {
 204                struct idxd_desc *desc =
 205                        container_of(node, struct idxd_desc, list);
 206
 207                if (desc->completion->status) {
 208                        list_del(&desc->list);
 209                        /* process and callback */
 210                        idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
 211                        idxd_free_desc(desc->wq, desc);
 212                        (*processed)++;
 213                } else {
 214                        queued++;
 215                }
 216        }
 217
 218        return queued;
 219}
 220
 221irqreturn_t idxd_wq_thread(int irq, void *data)
 222{
 223        struct idxd_irq_entry *irq_entry = data;
 224        int rc, processed = 0, retry = 0;
 225
 226        /*
 227         * There are two lists we are processing. The pending_llist is where
 228         * submmiter adds all the submitted descriptor after sending it to
 229         * the workqueue. It's a lockless singly linked list. The work_list
 230         * is the common linux double linked list. We are in a scenario of
 231         * multiple producers and a single consumer. The producers are all
 232         * the kernel submitters of descriptors, and the consumer is the
 233         * kernel irq handler thread for the msix vector when using threaded
 234         * irq. To work with the restrictions of llist to remain lockless,
 235         * we are doing the following steps:
 236         * 1. Iterate through the work_list and process any completed
 237         *    descriptor. Delete the completed entries during iteration.
 238         * 2. llist_del_all() from the pending list.
 239         * 3. Iterate through the llist that was deleted from the pending list
 240         *    and process the completed entries.
 241         * 4. If the entry is still waiting on hardware, list_add_tail() to
 242         *    the work_list.
 243         * 5. Repeat until no more descriptors.
 244         */
 245        do {
 246                rc = irq_process_work_list(irq_entry, &processed);
 247                if (rc != 0) {
 248                        retry++;
 249                        continue;
 250                }
 251
 252                rc = irq_process_pending_llist(irq_entry, &processed);
 253        } while (rc != 0 && retry != 10);
 254
 255        idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
 256
 257        if (processed == 0)
 258                return IRQ_NONE;
 259
 260        return IRQ_HANDLED;
 261}
 262