1
2
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/io-64-nonatomic-lo-hi.h>
8#include <linux/dmaengine.h>
9#include <linux/irq.h>
10#include <linux/msi.h>
11#include <uapi/linux/idxd.h>
12#include "../dmaengine.h"
13#include "idxd.h"
14#include "registers.h"
15
16static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17 u32 *status);
18static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
19static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
20
21
22void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
23{
24 struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
25
26 pci_msi_mask_irq(data);
27}
28
29void idxd_mask_msix_vectors(struct idxd_device *idxd)
30{
31 struct pci_dev *pdev = idxd->pdev;
32 int msixcnt = pci_msix_vec_count(pdev);
33 int i;
34
35 for (i = 0; i < msixcnt; i++)
36 idxd_mask_msix_vector(idxd, i);
37}
38
39void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
40{
41 struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
42
43 pci_msi_unmask_irq(data);
44}
45
46void idxd_unmask_error_interrupts(struct idxd_device *idxd)
47{
48 union genctrl_reg genctrl;
49
50 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
51 genctrl.softerr_int_en = 1;
52 genctrl.halt_int_en = 1;
53 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
54}
55
56void idxd_mask_error_interrupts(struct idxd_device *idxd)
57{
58 union genctrl_reg genctrl;
59
60 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
61 genctrl.softerr_int_en = 0;
62 genctrl.halt_int_en = 0;
63 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
64}
65
66static void free_hw_descs(struct idxd_wq *wq)
67{
68 int i;
69
70 for (i = 0; i < wq->num_descs; i++)
71 kfree(wq->hw_descs[i]);
72
73 kfree(wq->hw_descs);
74}
75
76static int alloc_hw_descs(struct idxd_wq *wq, int num)
77{
78 struct device *dev = &wq->idxd->pdev->dev;
79 int i;
80 int node = dev_to_node(dev);
81
82 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
83 GFP_KERNEL, node);
84 if (!wq->hw_descs)
85 return -ENOMEM;
86
87 for (i = 0; i < num; i++) {
88 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
89 GFP_KERNEL, node);
90 if (!wq->hw_descs[i]) {
91 free_hw_descs(wq);
92 return -ENOMEM;
93 }
94 }
95
96 return 0;
97}
98
99static void free_descs(struct idxd_wq *wq)
100{
101 int i;
102
103 for (i = 0; i < wq->num_descs; i++)
104 kfree(wq->descs[i]);
105
106 kfree(wq->descs);
107}
108
109static int alloc_descs(struct idxd_wq *wq, int num)
110{
111 struct device *dev = &wq->idxd->pdev->dev;
112 int i;
113 int node = dev_to_node(dev);
114
115 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
116 GFP_KERNEL, node);
117 if (!wq->descs)
118 return -ENOMEM;
119
120 for (i = 0; i < num; i++) {
121 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
122 GFP_KERNEL, node);
123 if (!wq->descs[i]) {
124 free_descs(wq);
125 return -ENOMEM;
126 }
127 }
128
129 return 0;
130}
131
132
133int idxd_wq_alloc_resources(struct idxd_wq *wq)
134{
135 struct idxd_device *idxd = wq->idxd;
136 struct device *dev = &idxd->pdev->dev;
137 int rc, num_descs, i;
138 int align;
139 u64 tmp;
140
141 if (wq->type != IDXD_WQT_KERNEL)
142 return 0;
143
144 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
145 wq->num_descs = num_descs;
146
147 rc = alloc_hw_descs(wq, num_descs);
148 if (rc < 0)
149 return rc;
150
151 align = idxd->data->align;
152 wq->compls_size = num_descs * idxd->data->compl_size + align;
153 wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
154 &wq->compls_addr_raw, GFP_KERNEL);
155 if (!wq->compls_raw) {
156 rc = -ENOMEM;
157 goto fail_alloc_compls;
158 }
159
160
161 wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
162 tmp = (u64)wq->compls_raw;
163 tmp = (tmp + (align - 1)) & ~(align - 1);
164 wq->compls = (struct dsa_completion_record *)tmp;
165
166 rc = alloc_descs(wq, num_descs);
167 if (rc < 0)
168 goto fail_alloc_descs;
169
170 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
171 dev_to_node(dev));
172 if (rc < 0)
173 goto fail_sbitmap_init;
174
175 for (i = 0; i < num_descs; i++) {
176 struct idxd_desc *desc = wq->descs[i];
177
178 desc->hw = wq->hw_descs[i];
179 if (idxd->data->type == IDXD_TYPE_DSA)
180 desc->completion = &wq->compls[i];
181 else if (idxd->data->type == IDXD_TYPE_IAX)
182 desc->iax_completion = &wq->iax_compls[i];
183 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
184 desc->id = i;
185 desc->wq = wq;
186 desc->cpu = -1;
187 }
188
189 return 0;
190
191 fail_sbitmap_init:
192 free_descs(wq);
193 fail_alloc_descs:
194 dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
195 wq->compls_addr_raw);
196 fail_alloc_compls:
197 free_hw_descs(wq);
198 return rc;
199}
200
201void idxd_wq_free_resources(struct idxd_wq *wq)
202{
203 struct device *dev = &wq->idxd->pdev->dev;
204
205 if (wq->type != IDXD_WQT_KERNEL)
206 return;
207
208 free_hw_descs(wq);
209 free_descs(wq);
210 dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
211 wq->compls_addr_raw);
212 sbitmap_queue_free(&wq->sbq);
213}
214
215int idxd_wq_enable(struct idxd_wq *wq)
216{
217 struct idxd_device *idxd = wq->idxd;
218 struct device *dev = &idxd->pdev->dev;
219 u32 status;
220
221 if (wq->state == IDXD_WQ_ENABLED) {
222 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
223 return -ENXIO;
224 }
225
226 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
227
228 if (status != IDXD_CMDSTS_SUCCESS &&
229 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
230 dev_dbg(dev, "WQ enable failed: %#x\n", status);
231 return -ENXIO;
232 }
233
234 wq->state = IDXD_WQ_ENABLED;
235 dev_dbg(dev, "WQ %d enabled\n", wq->id);
236 return 0;
237}
238
239int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
240{
241 struct idxd_device *idxd = wq->idxd;
242 struct device *dev = &idxd->pdev->dev;
243 u32 status, operand;
244
245 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
246
247 if (wq->state != IDXD_WQ_ENABLED) {
248 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
249 return 0;
250 }
251
252 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
253 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
254
255 if (status != IDXD_CMDSTS_SUCCESS) {
256 dev_dbg(dev, "WQ disable failed: %#x\n", status);
257 return -ENXIO;
258 }
259
260 if (reset_config)
261 idxd_wq_disable_cleanup(wq);
262 wq->state = IDXD_WQ_DISABLED;
263 dev_dbg(dev, "WQ %d disabled\n", wq->id);
264 return 0;
265}
266
267void idxd_wq_drain(struct idxd_wq *wq)
268{
269 struct idxd_device *idxd = wq->idxd;
270 struct device *dev = &idxd->pdev->dev;
271 u32 operand;
272
273 if (wq->state != IDXD_WQ_ENABLED) {
274 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
275 return;
276 }
277
278 dev_dbg(dev, "Draining WQ %d\n", wq->id);
279 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
280 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
281}
282
283void idxd_wq_reset(struct idxd_wq *wq)
284{
285 struct idxd_device *idxd = wq->idxd;
286 struct device *dev = &idxd->pdev->dev;
287 u32 operand;
288
289 if (wq->state != IDXD_WQ_ENABLED) {
290 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
291 return;
292 }
293
294 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
295 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
296 idxd_wq_disable_cleanup(wq);
297 wq->state = IDXD_WQ_DISABLED;
298}
299
300int idxd_wq_map_portal(struct idxd_wq *wq)
301{
302 struct idxd_device *idxd = wq->idxd;
303 struct pci_dev *pdev = idxd->pdev;
304 struct device *dev = &pdev->dev;
305 resource_size_t start;
306
307 start = pci_resource_start(pdev, IDXD_WQ_BAR);
308 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
309
310 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
311 if (!wq->portal)
312 return -ENOMEM;
313
314 return 0;
315}
316
317void idxd_wq_unmap_portal(struct idxd_wq *wq)
318{
319 struct device *dev = &wq->idxd->pdev->dev;
320
321 devm_iounmap(dev, wq->portal);
322 wq->portal = NULL;
323 wq->portal_offset = 0;
324}
325
326void idxd_wqs_unmap_portal(struct idxd_device *idxd)
327{
328 int i;
329
330 for (i = 0; i < idxd->max_wqs; i++) {
331 struct idxd_wq *wq = idxd->wqs[i];
332
333 if (wq->portal)
334 idxd_wq_unmap_portal(wq);
335 }
336}
337
338int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
339{
340 struct idxd_device *idxd = wq->idxd;
341 int rc;
342 union wqcfg wqcfg;
343 unsigned int offset;
344
345 rc = idxd_wq_disable(wq, false);
346 if (rc < 0)
347 return rc;
348
349 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
350 spin_lock(&idxd->dev_lock);
351 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
352 wqcfg.pasid_en = 1;
353 wqcfg.pasid = pasid;
354 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
355 spin_unlock(&idxd->dev_lock);
356
357 rc = idxd_wq_enable(wq);
358 if (rc < 0)
359 return rc;
360
361 return 0;
362}
363
364int idxd_wq_disable_pasid(struct idxd_wq *wq)
365{
366 struct idxd_device *idxd = wq->idxd;
367 int rc;
368 union wqcfg wqcfg;
369 unsigned int offset;
370
371 rc = idxd_wq_disable(wq, false);
372 if (rc < 0)
373 return rc;
374
375 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
376 spin_lock(&idxd->dev_lock);
377 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
378 wqcfg.pasid_en = 0;
379 wqcfg.pasid = 0;
380 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
381 spin_unlock(&idxd->dev_lock);
382
383 rc = idxd_wq_enable(wq);
384 if (rc < 0)
385 return rc;
386
387 return 0;
388}
389
390static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
391{
392 struct idxd_device *idxd = wq->idxd;
393
394 lockdep_assert_held(&wq->wq_lock);
395 memset(wq->wqcfg, 0, idxd->wqcfg_size);
396 wq->type = IDXD_WQT_NONE;
397 wq->size = 0;
398 wq->group = NULL;
399 wq->threshold = 0;
400 wq->priority = 0;
401 wq->ats_dis = 0;
402 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
403 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
404 memset(wq->name, 0, WQ_NAME_SIZE);
405}
406
407static void idxd_wq_ref_release(struct percpu_ref *ref)
408{
409 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
410
411 complete(&wq->wq_dead);
412}
413
414int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
415{
416 int rc;
417
418 memset(&wq->wq_active, 0, sizeof(wq->wq_active));
419 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
420 if (rc < 0)
421 return rc;
422 reinit_completion(&wq->wq_dead);
423 return 0;
424}
425
426void idxd_wq_quiesce(struct idxd_wq *wq)
427{
428 percpu_ref_kill(&wq->wq_active);
429 wait_for_completion(&wq->wq_dead);
430 percpu_ref_exit(&wq->wq_active);
431}
432
433
434static inline bool idxd_is_enabled(struct idxd_device *idxd)
435{
436 union gensts_reg gensts;
437
438 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
439
440 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
441 return true;
442 return false;
443}
444
445static inline bool idxd_device_is_halted(struct idxd_device *idxd)
446{
447 union gensts_reg gensts;
448
449 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
450
451 return (gensts.state == IDXD_DEVICE_STATE_HALT);
452}
453
454
455
456
457
458
459int idxd_device_init_reset(struct idxd_device *idxd)
460{
461 struct device *dev = &idxd->pdev->dev;
462 union idxd_command_reg cmd;
463
464 if (idxd_device_is_halted(idxd)) {
465 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
466 return -ENXIO;
467 }
468
469 memset(&cmd, 0, sizeof(cmd));
470 cmd.cmd = IDXD_CMD_RESET_DEVICE;
471 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
472 spin_lock(&idxd->cmd_lock);
473 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
474
475 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
476 IDXD_CMDSTS_ACTIVE)
477 cpu_relax();
478 spin_unlock(&idxd->cmd_lock);
479 return 0;
480}
481
482static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
483 u32 *status)
484{
485 union idxd_command_reg cmd;
486 DECLARE_COMPLETION_ONSTACK(done);
487 u32 stat;
488
489 if (idxd_device_is_halted(idxd)) {
490 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
491 if (status)
492 *status = IDXD_CMDSTS_HW_ERR;
493 return;
494 }
495
496 memset(&cmd, 0, sizeof(cmd));
497 cmd.cmd = cmd_code;
498 cmd.operand = operand;
499 cmd.int_req = 1;
500
501 spin_lock(&idxd->cmd_lock);
502 wait_event_lock_irq(idxd->cmd_waitq,
503 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
504 idxd->cmd_lock);
505
506 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
507 __func__, cmd_code, operand);
508
509 idxd->cmd_status = 0;
510 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
511 idxd->cmd_done = &done;
512 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
513
514
515
516
517
518 spin_unlock(&idxd->cmd_lock);
519 wait_for_completion(&done);
520 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
521 spin_lock(&idxd->cmd_lock);
522 if (status)
523 *status = stat;
524 idxd->cmd_status = stat & GENMASK(7, 0);
525
526 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
527
528 wake_up(&idxd->cmd_waitq);
529 spin_unlock(&idxd->cmd_lock);
530}
531
532int idxd_device_enable(struct idxd_device *idxd)
533{
534 struct device *dev = &idxd->pdev->dev;
535 u32 status;
536
537 if (idxd_is_enabled(idxd)) {
538 dev_dbg(dev, "Device already enabled\n");
539 return -ENXIO;
540 }
541
542 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
543
544
545 if (status != IDXD_CMDSTS_SUCCESS &&
546 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
547 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
548 return -ENXIO;
549 }
550
551 idxd->state = IDXD_DEV_ENABLED;
552 return 0;
553}
554
555int idxd_device_disable(struct idxd_device *idxd)
556{
557 struct device *dev = &idxd->pdev->dev;
558 u32 status;
559
560 if (!idxd_is_enabled(idxd)) {
561 dev_dbg(dev, "Device is not enabled\n");
562 return 0;
563 }
564
565 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
566
567
568 if (status != IDXD_CMDSTS_SUCCESS &&
569 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
570 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
571 return -ENXIO;
572 }
573
574 spin_lock(&idxd->dev_lock);
575 idxd_device_clear_state(idxd);
576 idxd->state = IDXD_DEV_DISABLED;
577 spin_unlock(&idxd->dev_lock);
578 return 0;
579}
580
581void idxd_device_reset(struct idxd_device *idxd)
582{
583 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
584 spin_lock(&idxd->dev_lock);
585 idxd_device_clear_state(idxd);
586 idxd->state = IDXD_DEV_DISABLED;
587 spin_unlock(&idxd->dev_lock);
588}
589
590void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
591{
592 struct device *dev = &idxd->pdev->dev;
593 u32 operand;
594
595 operand = pasid;
596 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
597 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
598 dev_dbg(dev, "pasid %d drained\n", pasid);
599}
600
601int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
602 enum idxd_interrupt_type irq_type)
603{
604 struct device *dev = &idxd->pdev->dev;
605 u32 operand, status;
606
607 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
608 return -EOPNOTSUPP;
609
610 dev_dbg(dev, "get int handle, idx %d\n", idx);
611
612 operand = idx & GENMASK(15, 0);
613 if (irq_type == IDXD_IRQ_IMS)
614 operand |= CMD_INT_HANDLE_IMS;
615
616 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
617
618 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
619
620 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
621 dev_dbg(dev, "request int handle failed: %#x\n", status);
622 return -ENXIO;
623 }
624
625 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
626
627 dev_dbg(dev, "int handle acquired: %u\n", *handle);
628 return 0;
629}
630
631int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
632 enum idxd_interrupt_type irq_type)
633{
634 struct device *dev = &idxd->pdev->dev;
635 u32 operand, status;
636 union idxd_command_reg cmd;
637
638 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
639 return -EOPNOTSUPP;
640
641 dev_dbg(dev, "release int handle, handle %d\n", handle);
642
643 memset(&cmd, 0, sizeof(cmd));
644 operand = handle & GENMASK(15, 0);
645
646 if (irq_type == IDXD_IRQ_IMS)
647 operand |= CMD_INT_HANDLE_IMS;
648
649 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
650 cmd.operand = operand;
651
652 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
653
654 spin_lock(&idxd->cmd_lock);
655 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
656
657 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
658 cpu_relax();
659 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
660 spin_unlock(&idxd->cmd_lock);
661
662 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
663 dev_dbg(dev, "release int handle failed: %#x\n", status);
664 return -ENXIO;
665 }
666
667 dev_dbg(dev, "int handle released.\n");
668 return 0;
669}
670
671
672static void idxd_engines_clear_state(struct idxd_device *idxd)
673{
674 struct idxd_engine *engine;
675 int i;
676
677 lockdep_assert_held(&idxd->dev_lock);
678 for (i = 0; i < idxd->max_engines; i++) {
679 engine = idxd->engines[i];
680 engine->group = NULL;
681 }
682}
683
684static void idxd_groups_clear_state(struct idxd_device *idxd)
685{
686 struct idxd_group *group;
687 int i;
688
689 lockdep_assert_held(&idxd->dev_lock);
690 for (i = 0; i < idxd->max_groups; i++) {
691 group = idxd->groups[i];
692 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
693 group->num_engines = 0;
694 group->num_wqs = 0;
695 group->use_token_limit = false;
696 group->tokens_allowed = 0;
697 group->tokens_reserved = 0;
698 group->tc_a = -1;
699 group->tc_b = -1;
700 }
701}
702
703static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
704{
705 int i;
706
707 lockdep_assert_held(&idxd->dev_lock);
708 for (i = 0; i < idxd->max_wqs; i++) {
709 struct idxd_wq *wq = idxd->wqs[i];
710
711 if (wq->state == IDXD_WQ_ENABLED) {
712 idxd_wq_disable_cleanup(wq);
713 wq->state = IDXD_WQ_DISABLED;
714 }
715 }
716}
717
718void idxd_device_clear_state(struct idxd_device *idxd)
719{
720 idxd_groups_clear_state(idxd);
721 idxd_engines_clear_state(idxd);
722 idxd_device_wqs_clear_state(idxd);
723}
724
725void idxd_msix_perm_setup(struct idxd_device *idxd)
726{
727 union msix_perm mperm;
728 int i, msixcnt;
729
730 msixcnt = pci_msix_vec_count(idxd->pdev);
731 if (msixcnt < 0)
732 return;
733
734 mperm.bits = 0;
735 mperm.pasid = idxd->pasid;
736 mperm.pasid_en = device_pasid_enabled(idxd);
737 for (i = 1; i < msixcnt; i++)
738 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
739}
740
741void idxd_msix_perm_clear(struct idxd_device *idxd)
742{
743 union msix_perm mperm;
744 int i, msixcnt;
745
746 msixcnt = pci_msix_vec_count(idxd->pdev);
747 if (msixcnt < 0)
748 return;
749
750 mperm.bits = 0;
751 for (i = 1; i < msixcnt; i++)
752 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
753}
754
755static void idxd_group_config_write(struct idxd_group *group)
756{
757 struct idxd_device *idxd = group->idxd;
758 struct device *dev = &idxd->pdev->dev;
759 int i;
760 u32 grpcfg_offset;
761
762 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
763
764
765 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
766 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
767 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
768 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
769 group->id, i, grpcfg_offset,
770 ioread64(idxd->reg_base + grpcfg_offset));
771 }
772
773
774 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
775 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
776 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
777 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
778
779
780 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
781 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
782 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
783 group->id, grpcfg_offset,
784 ioread32(idxd->reg_base + grpcfg_offset));
785}
786
787static int idxd_groups_config_write(struct idxd_device *idxd)
788
789{
790 union gencfg_reg reg;
791 int i;
792 struct device *dev = &idxd->pdev->dev;
793
794
795 if (idxd->token_limit) {
796 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
797 reg.token_limit = idxd->token_limit;
798 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
799 }
800
801 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
802 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
803
804 for (i = 0; i < idxd->max_groups; i++) {
805 struct idxd_group *group = idxd->groups[i];
806
807 idxd_group_config_write(group);
808 }
809
810 return 0;
811}
812
813static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
814{
815 struct pci_dev *pdev = idxd->pdev;
816
817 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
818 return true;
819 return false;
820}
821
822static int idxd_wq_config_write(struct idxd_wq *wq)
823{
824 struct idxd_device *idxd = wq->idxd;
825 struct device *dev = &idxd->pdev->dev;
826 u32 wq_offset;
827 int i;
828
829 if (!wq->group)
830 return 0;
831
832
833
834
835
836 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
837 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
838 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
839 }
840
841
842 wq->wqcfg->wq_size = wq->size;
843
844 if (wq->size == 0) {
845 idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
846 dev_warn(dev, "Incorrect work queue size: 0\n");
847 return -EINVAL;
848 }
849
850
851 wq->wqcfg->wq_thresh = wq->threshold;
852
853
854 if (wq_dedicated(wq))
855 wq->wqcfg->mode = 1;
856
857 if (device_pasid_enabled(idxd)) {
858 wq->wqcfg->pasid_en = 1;
859 if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
860 wq->wqcfg->pasid = idxd->pasid;
861 }
862
863
864
865
866
867
868
869
870
871
872
873
874 wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
875 if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
876 !idxd_device_pasid_priv_enabled(idxd) &&
877 wq->type == IDXD_WQT_KERNEL) {
878 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
879 return -EOPNOTSUPP;
880 }
881
882 wq->wqcfg->priority = wq->priority;
883
884 if (idxd->hw.gen_cap.block_on_fault &&
885 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
886 wq->wqcfg->bof = 1;
887
888 if (idxd->hw.wq_cap.wq_ats_support)
889 wq->wqcfg->wq_ats_disable = wq->ats_dis;
890
891
892 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
893 wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
894
895 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
896 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
897 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
898 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
899 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
900 wq->id, i, wq_offset,
901 ioread32(idxd->reg_base + wq_offset));
902 }
903
904 return 0;
905}
906
907static int idxd_wqs_config_write(struct idxd_device *idxd)
908{
909 int i, rc;
910
911 for (i = 0; i < idxd->max_wqs; i++) {
912 struct idxd_wq *wq = idxd->wqs[i];
913
914 rc = idxd_wq_config_write(wq);
915 if (rc < 0)
916 return rc;
917 }
918
919 return 0;
920}
921
922static void idxd_group_flags_setup(struct idxd_device *idxd)
923{
924 int i;
925
926
927 for (i = 0; i < idxd->max_groups; i++) {
928 struct idxd_group *group = idxd->groups[i];
929
930 if (group->tc_a == -1)
931 group->tc_a = group->grpcfg.flags.tc_a = 0;
932 else
933 group->grpcfg.flags.tc_a = group->tc_a;
934 if (group->tc_b == -1)
935 group->tc_b = group->grpcfg.flags.tc_b = 1;
936 else
937 group->grpcfg.flags.tc_b = group->tc_b;
938 group->grpcfg.flags.use_token_limit = group->use_token_limit;
939 group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
940 if (group->tokens_allowed)
941 group->grpcfg.flags.tokens_allowed =
942 group->tokens_allowed;
943 else
944 group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
945 }
946}
947
948static int idxd_engines_setup(struct idxd_device *idxd)
949{
950 int i, engines = 0;
951 struct idxd_engine *eng;
952 struct idxd_group *group;
953
954 for (i = 0; i < idxd->max_groups; i++) {
955 group = idxd->groups[i];
956 group->grpcfg.engines = 0;
957 }
958
959 for (i = 0; i < idxd->max_engines; i++) {
960 eng = idxd->engines[i];
961 group = eng->group;
962
963 if (!group)
964 continue;
965
966 group->grpcfg.engines |= BIT(eng->id);
967 engines++;
968 }
969
970 if (!engines)
971 return -EINVAL;
972
973 return 0;
974}
975
976static int idxd_wqs_setup(struct idxd_device *idxd)
977{
978 struct idxd_wq *wq;
979 struct idxd_group *group;
980 int i, j, configured = 0;
981 struct device *dev = &idxd->pdev->dev;
982
983 for (i = 0; i < idxd->max_groups; i++) {
984 group = idxd->groups[i];
985 for (j = 0; j < 4; j++)
986 group->grpcfg.wqs[j] = 0;
987 }
988
989 for (i = 0; i < idxd->max_wqs; i++) {
990 wq = idxd->wqs[i];
991 group = wq->group;
992
993 if (!wq->group)
994 continue;
995 if (!wq->size)
996 continue;
997
998 if (wq_shared(wq) && !device_swq_supported(idxd)) {
999 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1000 dev_warn(dev, "No shared wq support but configured.\n");
1001 return -EINVAL;
1002 }
1003
1004 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1005 configured++;
1006 }
1007
1008 if (configured == 0) {
1009 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1010 return -EINVAL;
1011 }
1012
1013 return 0;
1014}
1015
1016int idxd_device_config(struct idxd_device *idxd)
1017{
1018 int rc;
1019
1020 lockdep_assert_held(&idxd->dev_lock);
1021 rc = idxd_wqs_setup(idxd);
1022 if (rc < 0)
1023 return rc;
1024
1025 rc = idxd_engines_setup(idxd);
1026 if (rc < 0)
1027 return rc;
1028
1029 idxd_group_flags_setup(idxd);
1030
1031 rc = idxd_wqs_config_write(idxd);
1032 if (rc < 0)
1033 return rc;
1034
1035 rc = idxd_groups_config_write(idxd);
1036 if (rc < 0)
1037 return rc;
1038
1039 return 0;
1040}
1041
1042static int idxd_wq_load_config(struct idxd_wq *wq)
1043{
1044 struct idxd_device *idxd = wq->idxd;
1045 struct device *dev = &idxd->pdev->dev;
1046 int wqcfg_offset;
1047 int i;
1048
1049 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1050 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1051
1052 wq->size = wq->wqcfg->wq_size;
1053 wq->threshold = wq->wqcfg->wq_thresh;
1054 if (wq->wqcfg->priv)
1055 wq->type = IDXD_WQT_KERNEL;
1056
1057
1058 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1059 return -EOPNOTSUPP;
1060
1061 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1062
1063 wq->priority = wq->wqcfg->priority;
1064
1065 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1066 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1067 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1068 }
1069
1070 return 0;
1071}
1072
1073static void idxd_group_load_config(struct idxd_group *group)
1074{
1075 struct idxd_device *idxd = group->idxd;
1076 struct device *dev = &idxd->pdev->dev;
1077 int i, j, grpcfg_offset;
1078
1079
1080
1081
1082
1083 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1084 struct idxd_wq *wq;
1085
1086 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1087 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1088 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1089 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1090
1091 if (i * 64 >= idxd->max_wqs)
1092 break;
1093
1094
1095 for (j = 0; j < 64; j++) {
1096 int id = i * 64 + j;
1097
1098
1099 if (id >= idxd->max_wqs)
1100 break;
1101
1102
1103 if (group->grpcfg.wqs[i] & BIT(j)) {
1104 wq = idxd->wqs[id];
1105 wq->group = group;
1106 }
1107 }
1108 }
1109
1110 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1111 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1112 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1113 grpcfg_offset, group->grpcfg.engines);
1114
1115
1116 for (i = 0; i < 64; i++) {
1117 if (i >= idxd->max_engines)
1118 break;
1119
1120 if (group->grpcfg.engines & BIT(i)) {
1121 struct idxd_engine *engine = idxd->engines[i];
1122
1123 engine->group = group;
1124 }
1125 }
1126
1127 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1128 group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
1129 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
1130 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1131}
1132
1133int idxd_device_load_config(struct idxd_device *idxd)
1134{
1135 union gencfg_reg reg;
1136 int i, rc;
1137
1138 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1139 idxd->token_limit = reg.token_limit;
1140
1141 for (i = 0; i < idxd->max_groups; i++) {
1142 struct idxd_group *group = idxd->groups[i];
1143
1144 idxd_group_load_config(group);
1145 }
1146
1147 for (i = 0; i < idxd->max_wqs; i++) {
1148 struct idxd_wq *wq = idxd->wqs[i];
1149
1150 rc = idxd_wq_load_config(wq);
1151 if (rc < 0)
1152 return rc;
1153 }
1154
1155 return 0;
1156}
1157
1158int __drv_enable_wq(struct idxd_wq *wq)
1159{
1160 struct idxd_device *idxd = wq->idxd;
1161 struct device *dev = &idxd->pdev->dev;
1162 int rc = -ENXIO;
1163
1164 lockdep_assert_held(&wq->wq_lock);
1165
1166 if (idxd->state != IDXD_DEV_ENABLED) {
1167 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1168 goto err;
1169 }
1170
1171 if (wq->state != IDXD_WQ_DISABLED) {
1172 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1173 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1174 rc = -EBUSY;
1175 goto err;
1176 }
1177
1178 if (!wq->group) {
1179 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1180 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1181 goto err;
1182 }
1183
1184 if (strlen(wq->name) == 0) {
1185 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1186 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1187 goto err;
1188 }
1189
1190
1191 if (wq_shared(wq)) {
1192 if (!device_swq_supported(idxd)) {
1193 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1194 dev_dbg(dev, "PASID not enabled and shared wq.\n");
1195 goto err;
1196 }
1197
1198
1199
1200
1201
1202
1203
1204
1205 if (wq->threshold == 0) {
1206 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1207 dev_dbg(dev, "Shared wq and threshold 0.\n");
1208 goto err;
1209 }
1210 }
1211
1212 rc = 0;
1213 spin_lock(&idxd->dev_lock);
1214 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1215 rc = idxd_device_config(idxd);
1216 spin_unlock(&idxd->dev_lock);
1217 if (rc < 0) {
1218 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1219 goto err;
1220 }
1221
1222 rc = idxd_wq_enable(wq);
1223 if (rc < 0) {
1224 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1225 goto err;
1226 }
1227
1228 rc = idxd_wq_map_portal(wq);
1229 if (rc < 0) {
1230 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1231 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1232 goto err_map_portal;
1233 }
1234
1235 wq->client_count = 0;
1236 return 0;
1237
1238err_map_portal:
1239 rc = idxd_wq_disable(wq, false);
1240 if (rc < 0)
1241 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1242err:
1243 return rc;
1244}
1245
1246int drv_enable_wq(struct idxd_wq *wq)
1247{
1248 int rc;
1249
1250 mutex_lock(&wq->wq_lock);
1251 rc = __drv_enable_wq(wq);
1252 mutex_unlock(&wq->wq_lock);
1253 return rc;
1254}
1255
1256void __drv_disable_wq(struct idxd_wq *wq)
1257{
1258 struct idxd_device *idxd = wq->idxd;
1259 struct device *dev = &idxd->pdev->dev;
1260
1261 lockdep_assert_held(&wq->wq_lock);
1262
1263 if (idxd_wq_refcount(wq))
1264 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1265 wq->id, idxd_wq_refcount(wq));
1266
1267 idxd_wq_unmap_portal(wq);
1268
1269 idxd_wq_drain(wq);
1270 idxd_wq_reset(wq);
1271
1272 wq->client_count = 0;
1273}
1274
1275void drv_disable_wq(struct idxd_wq *wq)
1276{
1277 mutex_lock(&wq->wq_lock);
1278 __drv_disable_wq(wq);
1279 mutex_unlock(&wq->wq_lock);
1280}
1281
1282int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1283{
1284 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1285 int rc = 0;
1286
1287
1288
1289
1290
1291
1292 if (idxd->state != IDXD_DEV_DISABLED) {
1293 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1294 return -ENXIO;
1295 }
1296
1297
1298 spin_lock(&idxd->dev_lock);
1299 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1300 rc = idxd_device_config(idxd);
1301 spin_unlock(&idxd->dev_lock);
1302 if (rc < 0)
1303 return -ENXIO;
1304
1305
1306 rc = idxd_device_enable(idxd);
1307 if (rc < 0)
1308 return rc;
1309
1310
1311 rc = idxd_register_dma_device(idxd);
1312 if (rc < 0) {
1313 idxd_device_disable(idxd);
1314 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1315 return rc;
1316 }
1317
1318 idxd->cmd_status = 0;
1319 return 0;
1320}
1321
1322void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1323{
1324 struct device *dev = &idxd_dev->conf_dev;
1325 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1326 int i;
1327
1328 for (i = 0; i < idxd->max_wqs; i++) {
1329 struct idxd_wq *wq = idxd->wqs[i];
1330 struct device *wq_dev = wq_confdev(wq);
1331
1332 if (wq->state == IDXD_WQ_DISABLED)
1333 continue;
1334 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1335 device_release_driver(wq_dev);
1336 }
1337
1338 idxd_unregister_dma_device(idxd);
1339 idxd_device_disable(idxd);
1340 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1341 idxd_device_reset(idxd);
1342}
1343
1344static enum idxd_dev_type dev_types[] = {
1345 IDXD_DEV_DSA,
1346 IDXD_DEV_IAX,
1347 IDXD_DEV_NONE,
1348};
1349
1350struct idxd_device_driver idxd_drv = {
1351 .type = dev_types,
1352 .probe = idxd_device_drv_probe,
1353 .remove = idxd_device_drv_remove,
1354 .name = "idxd",
1355};
1356EXPORT_SYMBOL_GPL(idxd_drv);
1357