1
2
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/io-64-nonatomic-lo-hi.h>
8#include <linux/dmaengine.h>
9#include <uapi/linux/idxd.h>
10#include "../dmaengine.h"
11#include "idxd.h"
12#include "registers.h"
13
14enum irq_work_type {
15 IRQ_WORK_NORMAL = 0,
16 IRQ_WORK_PROCESS_FAULT,
17};
18
19struct idxd_fault {
20 struct work_struct work;
21 u64 addr;
22 struct idxd_device *idxd;
23};
24
25static void idxd_device_reinit(struct work_struct *work)
26{
27 struct idxd_device *idxd = container_of(work, struct idxd_device, work);
28 struct device *dev = &idxd->pdev->dev;
29 int rc, i;
30
31 idxd_device_reset(idxd);
32 rc = idxd_device_config(idxd);
33 if (rc < 0)
34 goto out;
35
36 rc = idxd_device_enable(idxd);
37 if (rc < 0)
38 goto out;
39
40 for (i = 0; i < idxd->max_wqs; i++) {
41 struct idxd_wq *wq = idxd->wqs[i];
42
43 if (wq->state == IDXD_WQ_ENABLED) {
44 rc = idxd_wq_enable(wq);
45 if (rc < 0) {
46 dev_warn(dev, "Unable to re-enable wq %s\n",
47 dev_name(wq_confdev(wq)));
48 }
49 }
50 }
51
52 return;
53
54 out:
55 idxd_device_clear_state(idxd);
56}
57
58static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
59{
60 struct device *dev = &idxd->pdev->dev;
61 union gensts_reg gensts;
62 u32 val = 0;
63 int i;
64 bool err = false;
65
66 if (cause & IDXD_INTC_ERR) {
67 spin_lock(&idxd->dev_lock);
68 for (i = 0; i < 4; i++)
69 idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
70 IDXD_SWERR_OFFSET + i * sizeof(u64));
71
72 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
73 idxd->reg_base + IDXD_SWERR_OFFSET);
74
75 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
76 int id = idxd->sw_err.wq_idx;
77 struct idxd_wq *wq = idxd->wqs[id];
78
79 if (wq->type == IDXD_WQT_USER)
80 wake_up_interruptible(&wq->err_queue);
81 } else {
82 int i;
83
84 for (i = 0; i < idxd->max_wqs; i++) {
85 struct idxd_wq *wq = idxd->wqs[i];
86
87 if (wq->type == IDXD_WQT_USER)
88 wake_up_interruptible(&wq->err_queue);
89 }
90 }
91
92 spin_unlock(&idxd->dev_lock);
93 val |= IDXD_INTC_ERR;
94
95 for (i = 0; i < 4; i++)
96 dev_warn(dev, "err[%d]: %#16.16llx\n",
97 i, idxd->sw_err.bits[i]);
98 err = true;
99 }
100
101 if (cause & IDXD_INTC_CMD) {
102 val |= IDXD_INTC_CMD;
103 complete(idxd->cmd_done);
104 }
105
106 if (cause & IDXD_INTC_OCCUPY) {
107
108 val |= IDXD_INTC_OCCUPY;
109 }
110
111 if (cause & IDXD_INTC_PERFMON_OVFL) {
112 val |= IDXD_INTC_PERFMON_OVFL;
113 perfmon_counter_overflow(idxd);
114 }
115
116 val ^= cause;
117 if (val)
118 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
119 val);
120
121 if (!err)
122 return 0;
123
124 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
125 if (gensts.state == IDXD_DEVICE_STATE_HALT) {
126 idxd->state = IDXD_DEV_HALTED;
127 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
128
129
130
131
132
133 INIT_WORK(&idxd->work, idxd_device_reinit);
134 queue_work(idxd->wq, &idxd->work);
135 } else {
136 spin_lock(&idxd->dev_lock);
137 idxd_wqs_quiesce(idxd);
138 idxd_wqs_unmap_portal(idxd);
139 idxd_device_clear_state(idxd);
140 dev_err(&idxd->pdev->dev,
141 "idxd halted, need %s.\n",
142 gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
143 "FLR" : "system reset");
144 spin_unlock(&idxd->dev_lock);
145 return -ENXIO;
146 }
147 }
148
149 return 0;
150}
151
152irqreturn_t idxd_misc_thread(int vec, void *data)
153{
154 struct idxd_irq_entry *irq_entry = data;
155 struct idxd_device *idxd = irq_entry->idxd;
156 int rc;
157 u32 cause;
158
159 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
160 if (cause)
161 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
162
163 while (cause) {
164 rc = process_misc_interrupts(idxd, cause);
165 if (rc < 0)
166 break;
167 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
168 if (cause)
169 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
170 }
171
172 return IRQ_HANDLED;
173}
174
175static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
176{
177 struct idxd_desc *desc, *t;
178 struct llist_node *head;
179
180 head = llist_del_all(&irq_entry->pending_llist);
181 if (!head)
182 return;
183
184 llist_for_each_entry_safe(desc, t, head, llnode) {
185 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
186
187 if (status) {
188
189
190
191
192 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
193 complete_desc(desc, IDXD_COMPLETE_ABORT);
194 continue;
195 }
196
197 complete_desc(desc, IDXD_COMPLETE_NORMAL);
198 } else {
199 spin_lock(&irq_entry->list_lock);
200 list_add_tail(&desc->list,
201 &irq_entry->work_list);
202 spin_unlock(&irq_entry->list_lock);
203 }
204 }
205}
206
207static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
208{
209 LIST_HEAD(flist);
210 struct idxd_desc *desc, *n;
211
212
213
214
215
216 spin_lock(&irq_entry->list_lock);
217 if (list_empty(&irq_entry->work_list)) {
218 spin_unlock(&irq_entry->list_lock);
219 return;
220 }
221
222 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
223 if (desc->completion->status) {
224 list_del(&desc->list);
225 list_add_tail(&desc->list, &flist);
226 }
227 }
228
229 spin_unlock(&irq_entry->list_lock);
230
231 list_for_each_entry(desc, &flist, list) {
232
233
234
235
236 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
237 complete_desc(desc, IDXD_COMPLETE_ABORT);
238 continue;
239 }
240
241 complete_desc(desc, IDXD_COMPLETE_NORMAL);
242 }
243}
244
245irqreturn_t idxd_wq_thread(int irq, void *data)
246{
247 struct idxd_irq_entry *irq_entry = data;
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267 irq_process_work_list(irq_entry);
268 irq_process_pending_llist(irq_entry);
269
270 return IRQ_HANDLED;
271}
272