1
2
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/pci.h>
8#include <linux/interrupt.h>
9#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/workqueue.h>
12#include <linux/aer.h>
13#include <linux/fs.h>
14#include <linux/io-64-nonatomic-lo-hi.h>
15#include <linux/device.h>
16#include <linux/idr.h>
17#include <uapi/linux/idxd.h>
18#include <linux/dmaengine.h>
19#include "../dmaengine.h"
20#include "registers.h"
21#include "idxd.h"
22
23MODULE_VERSION(IDXD_DRIVER_VERSION);
24MODULE_LICENSE("GPL v2");
25MODULE_AUTHOR("Intel Corporation");
26
27#define DRV_NAME "idxd"
28
29static struct idr idxd_idrs[IDXD_TYPE_MAX];
30static struct mutex idxd_idr_lock;
31
32static struct pci_device_id idxd_pci_tbl[] = {
33
34 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
35 { 0, }
36};
37MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
38
39static char *idxd_name[] = {
40 "dsa",
41};
42
43const char *idxd_get_dev_name(struct idxd_device *idxd)
44{
45 return idxd_name[idxd->type];
46}
47
48static int idxd_setup_interrupts(struct idxd_device *idxd)
49{
50 struct pci_dev *pdev = idxd->pdev;
51 struct device *dev = &pdev->dev;
52 struct msix_entry *msix;
53 struct idxd_irq_entry *irq_entry;
54 int i, msixcnt;
55 int rc = 0;
56
57 msixcnt = pci_msix_vec_count(pdev);
58 if (msixcnt < 0) {
59 dev_err(dev, "Not MSI-X interrupt capable.\n");
60 goto err_no_irq;
61 }
62
63 idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
64 msixcnt, GFP_KERNEL);
65 if (!idxd->msix_entries) {
66 rc = -ENOMEM;
67 goto err_no_irq;
68 }
69
70 for (i = 0; i < msixcnt; i++)
71 idxd->msix_entries[i].entry = i;
72
73 rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
74 if (rc) {
75 dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
76 goto err_no_irq;
77 }
78 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
79
80
81
82
83
84 idxd->irq_entries = devm_kcalloc(dev, msixcnt,
85 sizeof(struct idxd_irq_entry),
86 GFP_KERNEL);
87 if (!idxd->irq_entries) {
88 rc = -ENOMEM;
89 goto err_no_irq;
90 }
91
92 for (i = 0; i < msixcnt; i++) {
93 idxd->irq_entries[i].id = i;
94 idxd->irq_entries[i].idxd = idxd;
95 }
96
97 msix = &idxd->msix_entries[0];
98 irq_entry = &idxd->irq_entries[0];
99 rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
100 idxd_misc_thread, 0, "idxd-misc",
101 irq_entry);
102 if (rc < 0) {
103 dev_err(dev, "Failed to allocate misc interrupt.\n");
104 goto err_no_irq;
105 }
106
107 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
108 msix->vector);
109
110
111 idxd->num_wq_irqs = msixcnt - 1;
112
113 for (i = 1; i < msixcnt; i++) {
114 msix = &idxd->msix_entries[i];
115 irq_entry = &idxd->irq_entries[i];
116
117 init_llist_head(&idxd->irq_entries[i].pending_llist);
118 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
119 rc = devm_request_threaded_irq(dev, msix->vector,
120 idxd_irq_handler,
121 idxd_wq_thread, 0,
122 "idxd-portal", irq_entry);
123 if (rc < 0) {
124 dev_err(dev, "Failed to allocate irq %d.\n",
125 msix->vector);
126 goto err_no_irq;
127 }
128 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
129 i, msix->vector);
130 }
131
132 idxd_unmask_error_interrupts(idxd);
133
134 return 0;
135
136 err_no_irq:
137
138 idxd_mask_error_interrupts(idxd);
139 pci_disable_msix(pdev);
140 dev_err(dev, "No usable interrupts\n");
141 return rc;
142}
143
144static int idxd_setup_internals(struct idxd_device *idxd)
145{
146 struct device *dev = &idxd->pdev->dev;
147 int i;
148
149 init_waitqueue_head(&idxd->cmd_waitq);
150 idxd->groups = devm_kcalloc(dev, idxd->max_groups,
151 sizeof(struct idxd_group), GFP_KERNEL);
152 if (!idxd->groups)
153 return -ENOMEM;
154
155 for (i = 0; i < idxd->max_groups; i++) {
156 idxd->groups[i].idxd = idxd;
157 idxd->groups[i].id = i;
158 idxd->groups[i].tc_a = -1;
159 idxd->groups[i].tc_b = -1;
160 }
161
162 idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
163 GFP_KERNEL);
164 if (!idxd->wqs)
165 return -ENOMEM;
166
167 idxd->engines = devm_kcalloc(dev, idxd->max_engines,
168 sizeof(struct idxd_engine), GFP_KERNEL);
169 if (!idxd->engines)
170 return -ENOMEM;
171
172 for (i = 0; i < idxd->max_wqs; i++) {
173 struct idxd_wq *wq = &idxd->wqs[i];
174
175 wq->id = i;
176 wq->idxd = idxd;
177 mutex_init(&wq->wq_lock);
178 wq->idxd_cdev.minor = -1;
179 }
180
181 for (i = 0; i < idxd->max_engines; i++) {
182 idxd->engines[i].idxd = idxd;
183 idxd->engines[i].id = i;
184 }
185
186 idxd->wq = create_workqueue(dev_name(dev));
187 if (!idxd->wq)
188 return -ENOMEM;
189
190 return 0;
191}
192
193static void idxd_read_table_offsets(struct idxd_device *idxd)
194{
195 union offsets_reg offsets;
196 struct device *dev = &idxd->pdev->dev;
197
198 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
199 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
200 + sizeof(u64));
201 idxd->grpcfg_offset = offsets.grpcfg * 0x100;
202 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
203 idxd->wqcfg_offset = offsets.wqcfg * 0x100;
204 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
205 idxd->wqcfg_offset);
206 idxd->msix_perm_offset = offsets.msix_perm * 0x100;
207 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
208 idxd->msix_perm_offset);
209 idxd->perfmon_offset = offsets.perfmon * 0x100;
210 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
211}
212
213static void idxd_read_caps(struct idxd_device *idxd)
214{
215 struct device *dev = &idxd->pdev->dev;
216 int i;
217
218
219 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
220 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
221 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
222 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
223 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
224 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
225 if (idxd->hw.gen_cap.config_en)
226 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
227
228
229 idxd->hw.group_cap.bits =
230 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
231 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
232 idxd->max_groups = idxd->hw.group_cap.num_groups;
233 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
234 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
235 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
236 idxd->nr_tokens = idxd->max_tokens;
237
238
239 idxd->hw.engine_cap.bits =
240 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
241 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
242 idxd->max_engines = idxd->hw.engine_cap.num_engines;
243 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
244
245
246 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
247 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
248 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
249 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
250 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
251 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
252
253
254 for (i = 0; i < 4; i++) {
255 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
256 IDXD_OPCAP_OFFSET + i * sizeof(u64));
257 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
258 }
259}
260
261static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
262 void __iomem * const *iomap)
263{
264 struct device *dev = &pdev->dev;
265 struct idxd_device *idxd;
266
267 idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
268 if (!idxd)
269 return NULL;
270
271 idxd->pdev = pdev;
272 idxd->reg_base = iomap[IDXD_MMIO_BAR];
273 spin_lock_init(&idxd->dev_lock);
274
275 return idxd;
276}
277
278static int idxd_probe(struct idxd_device *idxd)
279{
280 struct pci_dev *pdev = idxd->pdev;
281 struct device *dev = &pdev->dev;
282 int rc;
283
284 dev_dbg(dev, "%s entered and resetting device\n", __func__);
285 idxd_device_init_reset(idxd);
286 dev_dbg(dev, "IDXD reset complete\n");
287
288 idxd_read_caps(idxd);
289 idxd_read_table_offsets(idxd);
290
291 rc = idxd_setup_internals(idxd);
292 if (rc)
293 goto err_setup;
294
295 rc = idxd_setup_interrupts(idxd);
296 if (rc)
297 goto err_setup;
298
299 dev_dbg(dev, "IDXD interrupt setup complete.\n");
300
301 mutex_lock(&idxd_idr_lock);
302 idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
303 mutex_unlock(&idxd_idr_lock);
304 if (idxd->id < 0) {
305 rc = -ENOMEM;
306 goto err_idr_fail;
307 }
308
309 idxd->major = idxd_cdev_get_major(idxd);
310
311 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
312 return 0;
313
314 err_idr_fail:
315 idxd_mask_error_interrupts(idxd);
316 idxd_mask_msix_vectors(idxd);
317 err_setup:
318 return rc;
319}
320
321static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
322{
323 void __iomem * const *iomap;
324 struct device *dev = &pdev->dev;
325 struct idxd_device *idxd;
326 int rc;
327 unsigned int mask;
328
329 rc = pcim_enable_device(pdev);
330 if (rc)
331 return rc;
332
333 dev_dbg(dev, "Mapping BARs\n");
334 mask = (1 << IDXD_MMIO_BAR);
335 rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
336 if (rc)
337 return rc;
338
339 iomap = pcim_iomap_table(pdev);
340 if (!iomap)
341 return -ENOMEM;
342
343 dev_dbg(dev, "Set DMA masks\n");
344 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
345 if (rc)
346 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
347 if (rc)
348 return rc;
349
350 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
351 if (rc)
352 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
353 if (rc)
354 return rc;
355
356 dev_dbg(dev, "Alloc IDXD context\n");
357 idxd = idxd_alloc(pdev, iomap);
358 if (!idxd)
359 return -ENOMEM;
360
361 idxd_set_type(idxd);
362
363 dev_dbg(dev, "Set PCI master\n");
364 pci_set_master(pdev);
365 pci_set_drvdata(pdev, idxd);
366
367 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
368 rc = idxd_probe(idxd);
369 if (rc) {
370 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
371 return -ENODEV;
372 }
373
374 rc = idxd_setup_sysfs(idxd);
375 if (rc) {
376 dev_err(dev, "IDXD sysfs setup failed\n");
377 return -ENODEV;
378 }
379
380 idxd->state = IDXD_DEV_CONF_READY;
381
382 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
383 idxd->hw.version);
384
385 return 0;
386}
387
388static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
389{
390 struct idxd_desc *desc, *itr;
391 struct llist_node *head;
392
393 head = llist_del_all(&ie->pending_llist);
394 if (!head)
395 return;
396
397 llist_for_each_entry_safe(desc, itr, head, llnode) {
398 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
399 idxd_free_desc(desc->wq, desc);
400 }
401}
402
403static void idxd_flush_work_list(struct idxd_irq_entry *ie)
404{
405 struct idxd_desc *desc, *iter;
406
407 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
408 list_del(&desc->list);
409 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
410 idxd_free_desc(desc->wq, desc);
411 }
412}
413
414static void idxd_shutdown(struct pci_dev *pdev)
415{
416 struct idxd_device *idxd = pci_get_drvdata(pdev);
417 int rc, i;
418 struct idxd_irq_entry *irq_entry;
419 int msixcnt = pci_msix_vec_count(pdev);
420
421 rc = idxd_device_disable(idxd);
422 if (rc)
423 dev_err(&pdev->dev, "Disabling device failed\n");
424
425 dev_dbg(&pdev->dev, "%s called\n", __func__);
426 idxd_mask_msix_vectors(idxd);
427 idxd_mask_error_interrupts(idxd);
428
429 for (i = 0; i < msixcnt; i++) {
430 irq_entry = &idxd->irq_entries[i];
431 synchronize_irq(idxd->msix_entries[i].vector);
432 if (i == 0)
433 continue;
434 idxd_flush_pending_llist(irq_entry);
435 idxd_flush_work_list(irq_entry);
436 }
437
438 destroy_workqueue(idxd->wq);
439}
440
441static void idxd_remove(struct pci_dev *pdev)
442{
443 struct idxd_device *idxd = pci_get_drvdata(pdev);
444
445 dev_dbg(&pdev->dev, "%s called\n", __func__);
446 idxd_cleanup_sysfs(idxd);
447 idxd_shutdown(pdev);
448 mutex_lock(&idxd_idr_lock);
449 idr_remove(&idxd_idrs[idxd->type], idxd->id);
450 mutex_unlock(&idxd_idr_lock);
451}
452
453static struct pci_driver idxd_pci_driver = {
454 .name = DRV_NAME,
455 .id_table = idxd_pci_tbl,
456 .probe = idxd_pci_probe,
457 .remove = idxd_remove,
458 .shutdown = idxd_shutdown,
459};
460
461static int __init idxd_init_module(void)
462{
463 int err, i;
464
465
466
467
468
469 if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
470 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
471 return -ENODEV;
472 }
473
474 pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
475 DRV_NAME, IDXD_DRIVER_VERSION);
476
477 mutex_init(&idxd_idr_lock);
478 for (i = 0; i < IDXD_TYPE_MAX; i++)
479 idr_init(&idxd_idrs[i]);
480
481 err = idxd_register_bus_type();
482 if (err < 0)
483 return err;
484
485 err = idxd_register_driver();
486 if (err < 0)
487 goto err_idxd_driver_register;
488
489 err = idxd_cdev_register();
490 if (err)
491 goto err_cdev_register;
492
493 err = pci_register_driver(&idxd_pci_driver);
494 if (err)
495 goto err_pci_register;
496
497 return 0;
498
499err_pci_register:
500 idxd_cdev_remove();
501err_cdev_register:
502 idxd_unregister_driver();
503err_idxd_driver_register:
504 idxd_unregister_bus_type();
505 return err;
506}
507module_init(idxd_init_module);
508
509static void __exit idxd_exit_module(void)
510{
511 pci_unregister_driver(&idxd_pci_driver);
512 idxd_cdev_remove();
513 idxd_unregister_bus_type();
514}
515module_exit(idxd_exit_module);
516