linux/drivers/dma/idxd/submit.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
   3#include <linux/init.h>
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include <uapi/linux/idxd.h>
   8#include "idxd.h"
   9#include "registers.h"
  10
  11static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
  12{
  13        struct idxd_desc *desc;
  14        struct idxd_device *idxd = wq->idxd;
  15
  16        desc = wq->descs[idx];
  17        memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
  18        memset(desc->completion, 0, idxd->data->compl_size);
  19        desc->cpu = cpu;
  20
  21        if (device_pasid_enabled(idxd))
  22                desc->hw->pasid = idxd->pasid;
  23
  24        /*
  25         * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
  26         * vector 1:1 to the WQ id, we need to add 1
  27         */
  28        if (!idxd->int_handles)
  29                desc->hw->int_handle = wq->id + 1;
  30        else
  31                desc->hw->int_handle = idxd->int_handles[wq->id];
  32
  33        return desc;
  34}
  35
  36struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
  37{
  38        int cpu, idx;
  39        struct idxd_device *idxd = wq->idxd;
  40        DEFINE_SBQ_WAIT(wait);
  41        struct sbq_wait_state *ws;
  42        struct sbitmap_queue *sbq;
  43
  44        if (idxd->state != IDXD_DEV_ENABLED)
  45                return ERR_PTR(-EIO);
  46
  47        sbq = &wq->sbq;
  48        idx = sbitmap_queue_get(sbq, &cpu);
  49        if (idx < 0) {
  50                if (optype == IDXD_OP_NONBLOCK)
  51                        return ERR_PTR(-EAGAIN);
  52        } else {
  53                return __get_desc(wq, idx, cpu);
  54        }
  55
  56        ws = &sbq->ws[0];
  57        for (;;) {
  58                sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
  59                if (signal_pending_state(TASK_INTERRUPTIBLE, current))
  60                        break;
  61                idx = sbitmap_queue_get(sbq, &cpu);
  62                if (idx >= 0)
  63                        break;
  64                schedule();
  65        }
  66
  67        sbitmap_finish_wait(sbq, ws, &wait);
  68        if (idx < 0)
  69                return ERR_PTR(-EAGAIN);
  70
  71        return __get_desc(wq, idx, cpu);
  72}
  73
  74void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
  75{
  76        int cpu = desc->cpu;
  77
  78        desc->cpu = -1;
  79        sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
  80}
  81
  82static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
  83                                         struct idxd_desc *desc)
  84{
  85        struct idxd_desc *d, *n;
  86
  87        lockdep_assert_held(&ie->list_lock);
  88        list_for_each_entry_safe(d, n, &ie->work_list, list) {
  89                if (d == desc) {
  90                        list_del(&d->list);
  91                        return d;
  92                }
  93        }
  94
  95        /*
  96         * At this point, the desc needs to be aborted is held by the completion
  97         * handler where it has taken it off the pending list but has not added to the
  98         * work list. It will be cleaned up by the interrupt handler when it sees the
  99         * IDXD_COMP_DESC_ABORT for completion status.
 100         */
 101        return NULL;
 102}
 103
 104static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
 105                             struct idxd_desc *desc)
 106{
 107        struct idxd_desc *d, *t, *found = NULL;
 108        struct llist_node *head;
 109
 110        desc->completion->status = IDXD_COMP_DESC_ABORT;
 111        /*
 112         * Grab the list lock so it will block the irq thread handler. This allows the
 113         * abort code to locate the descriptor need to be aborted.
 114         */
 115        spin_lock(&ie->list_lock);
 116        head = llist_del_all(&ie->pending_llist);
 117        if (head) {
 118                llist_for_each_entry_safe(d, t, head, llnode) {
 119                        if (d == desc) {
 120                                found = desc;
 121                                continue;
 122                        }
 123                        list_add_tail(&desc->list, &ie->work_list);
 124                }
 125        }
 126
 127        if (!found)
 128                found = list_abort_desc(wq, ie, desc);
 129        spin_unlock(&ie->list_lock);
 130
 131        if (found)
 132                complete_desc(found, IDXD_COMPLETE_ABORT);
 133}
 134
 135int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
 136{
 137        struct idxd_device *idxd = wq->idxd;
 138        struct idxd_irq_entry *ie = NULL;
 139        void __iomem *portal;
 140        int rc;
 141
 142        if (idxd->state != IDXD_DEV_ENABLED) {
 143                idxd_free_desc(wq, desc);
 144                return -EIO;
 145        }
 146
 147        if (!percpu_ref_tryget_live(&wq->wq_active)) {
 148                idxd_free_desc(wq, desc);
 149                return -ENXIO;
 150        }
 151
 152        portal = idxd_wq_portal_addr(wq);
 153
 154        /*
 155         * The wmb() flushes writes to coherent DMA data before
 156         * possibly triggering a DMA read. The wmb() is necessary
 157         * even on UP because the recipient is a device.
 158         */
 159        wmb();
 160
 161        /*
 162         * Pending the descriptor to the lockless list for the irq_entry
 163         * that we designated the descriptor to.
 164         */
 165        if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
 166                ie = &idxd->irq_entries[wq->id + 1];
 167                llist_add(&desc->llnode, &ie->pending_llist);
 168        }
 169
 170        if (wq_dedicated(wq)) {
 171                iosubmit_cmds512(portal, desc->hw, 1);
 172        } else {
 173                /*
 174                 * It's not likely that we would receive queue full rejection
 175                 * since the descriptor allocation gates at wq size. If we
 176                 * receive a -EAGAIN, that means something went wrong such as the
 177                 * device is not accepting descriptor at all.
 178                 */
 179                rc = enqcmds(portal, desc->hw);
 180                if (rc < 0) {
 181                        percpu_ref_put(&wq->wq_active);
 182                        /* abort operation frees the descriptor */
 183                        if (ie)
 184                                llist_abort_desc(wq, ie, desc);
 185                        else
 186                                idxd_free_desc(wq, desc);
 187                        return rc;
 188                }
 189        }
 190
 191        percpu_ref_put(&wq->wq_active);
 192        return 0;
 193}
 194