1
2
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/pci.h>
8#include <linux/interrupt.h>
9#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/workqueue.h>
12#include <linux/aer.h>
13#include <linux/fs.h>
14#include <linux/io-64-nonatomic-lo-hi.h>
15#include <linux/device.h>
16#include <linux/idr.h>
17#include <linux/intel-svm.h>
18#include <linux/iommu.h>
19#include <uapi/linux/idxd.h>
20#include <linux/dmaengine.h>
21#include "../dmaengine.h"
22#include "registers.h"
23#include "idxd.h"
24#include "perfmon.h"
25
26MODULE_VERSION(IDXD_DRIVER_VERSION);
27MODULE_LICENSE("GPL v2");
28MODULE_AUTHOR("Intel Corporation");
29
30static bool sva = true;
31module_param(sva, bool, 0644);
32MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
33
34#define DRV_NAME "idxd"
35
36bool support_enqcmd;
37DEFINE_IDA(idxd_ida);
38
39static struct idxd_driver_data idxd_driver_data[] = {
40 [IDXD_TYPE_DSA] = {
41 .name_prefix = "dsa",
42 .type = IDXD_TYPE_DSA,
43 .compl_size = sizeof(struct dsa_completion_record),
44 .align = 32,
45 .dev_type = &dsa_device_type,
46 },
47 [IDXD_TYPE_IAX] = {
48 .name_prefix = "iax",
49 .type = IDXD_TYPE_IAX,
50 .compl_size = sizeof(struct iax_completion_record),
51 .align = 64,
52 .dev_type = &iax_device_type,
53 },
54};
55
56static struct pci_device_id idxd_pci_tbl[] = {
57
58 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
59
60
61 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
62 { 0, }
63};
64MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
65
66static int idxd_setup_interrupts(struct idxd_device *idxd)
67{
68 struct pci_dev *pdev = idxd->pdev;
69 struct device *dev = &pdev->dev;
70 struct idxd_irq_entry *irq_entry;
71 int i, msixcnt;
72 int rc = 0;
73
74 msixcnt = pci_msix_vec_count(pdev);
75 if (msixcnt < 0) {
76 dev_err(dev, "Not MSI-X interrupt capable.\n");
77 return -ENOSPC;
78 }
79
80 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
81 if (rc != msixcnt) {
82 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
83 return -ENOSPC;
84 }
85 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
86
87
88
89
90
91 idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
92 GFP_KERNEL, dev_to_node(dev));
93 if (!idxd->irq_entries) {
94 rc = -ENOMEM;
95 goto err_irq_entries;
96 }
97
98 for (i = 0; i < msixcnt; i++) {
99 idxd->irq_entries[i].id = i;
100 idxd->irq_entries[i].idxd = idxd;
101 idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
102 spin_lock_init(&idxd->irq_entries[i].list_lock);
103 }
104
105 idxd_msix_perm_setup(idxd);
106
107 irq_entry = &idxd->irq_entries[0];
108 rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
109 0, "idxd-misc", irq_entry);
110 if (rc < 0) {
111 dev_err(dev, "Failed to allocate misc interrupt.\n");
112 goto err_misc_irq;
113 }
114
115 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
116
117
118 idxd->num_wq_irqs = msixcnt - 1;
119
120 for (i = 1; i < msixcnt; i++) {
121 irq_entry = &idxd->irq_entries[i];
122
123 init_llist_head(&idxd->irq_entries[i].pending_llist);
124 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
125 rc = request_threaded_irq(irq_entry->vector, NULL,
126 idxd_wq_thread, 0, "idxd-portal", irq_entry);
127 if (rc < 0) {
128 dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
129 goto err_wq_irqs;
130 }
131
132 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
133 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
134
135
136
137
138
139
140
141
142 rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
143 IDXD_IRQ_MSIX);
144 if (rc < 0) {
145 free_irq(irq_entry->vector, irq_entry);
146 goto err_wq_irqs;
147 }
148 dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
149 }
150 }
151
152 idxd_unmask_error_interrupts(idxd);
153 return 0;
154
155 err_wq_irqs:
156 while (--i >= 0) {
157 irq_entry = &idxd->irq_entries[i];
158 free_irq(irq_entry->vector, irq_entry);
159 if (i != 0)
160 idxd_device_release_int_handle(idxd,
161 idxd->int_handles[i], IDXD_IRQ_MSIX);
162 }
163 err_misc_irq:
164
165 idxd_mask_error_interrupts(idxd);
166 idxd_msix_perm_clear(idxd);
167 err_irq_entries:
168 pci_free_irq_vectors(pdev);
169 dev_err(dev, "No usable interrupts\n");
170 return rc;
171}
172
173static void idxd_cleanup_interrupts(struct idxd_device *idxd)
174{
175 struct pci_dev *pdev = idxd->pdev;
176 struct idxd_irq_entry *irq_entry;
177 int i, msixcnt;
178
179 msixcnt = pci_msix_vec_count(pdev);
180 if (msixcnt <= 0)
181 return;
182
183 irq_entry = &idxd->irq_entries[0];
184 free_irq(irq_entry->vector, irq_entry);
185
186 for (i = 1; i < msixcnt; i++) {
187
188 irq_entry = &idxd->irq_entries[i];
189 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
190 idxd_device_release_int_handle(idxd, idxd->int_handles[i],
191 IDXD_IRQ_MSIX);
192 free_irq(irq_entry->vector, irq_entry);
193 }
194
195 idxd_mask_error_interrupts(idxd);
196 pci_free_irq_vectors(pdev);
197}
198
199static int idxd_setup_wqs(struct idxd_device *idxd)
200{
201 struct device *dev = &idxd->pdev->dev;
202 struct idxd_wq *wq;
203 int i, rc;
204
205 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
206 GFP_KERNEL, dev_to_node(dev));
207 if (!idxd->wqs)
208 return -ENOMEM;
209
210 for (i = 0; i < idxd->max_wqs; i++) {
211 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
212 if (!wq) {
213 rc = -ENOMEM;
214 goto err;
215 }
216
217 wq->id = i;
218 wq->idxd = idxd;
219 device_initialize(&wq->conf_dev);
220 wq->conf_dev.parent = &idxd->conf_dev;
221 wq->conf_dev.bus = &dsa_bus_type;
222 wq->conf_dev.type = &idxd_wq_device_type;
223 rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
224 if (rc < 0) {
225 put_device(&wq->conf_dev);
226 goto err;
227 }
228
229 mutex_init(&wq->wq_lock);
230 init_waitqueue_head(&wq->err_queue);
231 init_completion(&wq->wq_dead);
232 wq->max_xfer_bytes = idxd->max_xfer_bytes;
233 wq->max_batch_size = idxd->max_batch_size;
234 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
235 if (!wq->wqcfg) {
236 put_device(&wq->conf_dev);
237 rc = -ENOMEM;
238 goto err;
239 }
240 idxd->wqs[i] = wq;
241 }
242
243 return 0;
244
245 err:
246 while (--i >= 0)
247 put_device(&idxd->wqs[i]->conf_dev);
248 return rc;
249}
250
251static int idxd_setup_engines(struct idxd_device *idxd)
252{
253 struct idxd_engine *engine;
254 struct device *dev = &idxd->pdev->dev;
255 int i, rc;
256
257 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
258 GFP_KERNEL, dev_to_node(dev));
259 if (!idxd->engines)
260 return -ENOMEM;
261
262 for (i = 0; i < idxd->max_engines; i++) {
263 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
264 if (!engine) {
265 rc = -ENOMEM;
266 goto err;
267 }
268
269 engine->id = i;
270 engine->idxd = idxd;
271 device_initialize(&engine->conf_dev);
272 engine->conf_dev.parent = &idxd->conf_dev;
273 engine->conf_dev.bus = &dsa_bus_type;
274 engine->conf_dev.type = &idxd_engine_device_type;
275 rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
276 if (rc < 0) {
277 put_device(&engine->conf_dev);
278 goto err;
279 }
280
281 idxd->engines[i] = engine;
282 }
283
284 return 0;
285
286 err:
287 while (--i >= 0)
288 put_device(&idxd->engines[i]->conf_dev);
289 return rc;
290}
291
292static int idxd_setup_groups(struct idxd_device *idxd)
293{
294 struct device *dev = &idxd->pdev->dev;
295 struct idxd_group *group;
296 int i, rc;
297
298 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
299 GFP_KERNEL, dev_to_node(dev));
300 if (!idxd->groups)
301 return -ENOMEM;
302
303 for (i = 0; i < idxd->max_groups; i++) {
304 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
305 if (!group) {
306 rc = -ENOMEM;
307 goto err;
308 }
309
310 group->id = i;
311 group->idxd = idxd;
312 device_initialize(&group->conf_dev);
313 group->conf_dev.parent = &idxd->conf_dev;
314 group->conf_dev.bus = &dsa_bus_type;
315 group->conf_dev.type = &idxd_group_device_type;
316 rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
317 if (rc < 0) {
318 put_device(&group->conf_dev);
319 goto err;
320 }
321
322 idxd->groups[i] = group;
323 group->tc_a = -1;
324 group->tc_b = -1;
325 }
326
327 return 0;
328
329 err:
330 while (--i >= 0)
331 put_device(&idxd->groups[i]->conf_dev);
332 return rc;
333}
334
335static void idxd_cleanup_internals(struct idxd_device *idxd)
336{
337 int i;
338
339 for (i = 0; i < idxd->max_groups; i++)
340 put_device(&idxd->groups[i]->conf_dev);
341 for (i = 0; i < idxd->max_engines; i++)
342 put_device(&idxd->engines[i]->conf_dev);
343 for (i = 0; i < idxd->max_wqs; i++)
344 put_device(&idxd->wqs[i]->conf_dev);
345 destroy_workqueue(idxd->wq);
346}
347
348static int idxd_setup_internals(struct idxd_device *idxd)
349{
350 struct device *dev = &idxd->pdev->dev;
351 int rc, i;
352
353 init_waitqueue_head(&idxd->cmd_waitq);
354
355 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
356 idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL,
357 dev_to_node(dev));
358 if (!idxd->int_handles)
359 return -ENOMEM;
360 }
361
362 rc = idxd_setup_wqs(idxd);
363 if (rc < 0)
364 goto err_wqs;
365
366 rc = idxd_setup_engines(idxd);
367 if (rc < 0)
368 goto err_engine;
369
370 rc = idxd_setup_groups(idxd);
371 if (rc < 0)
372 goto err_group;
373
374 idxd->wq = create_workqueue(dev_name(dev));
375 if (!idxd->wq) {
376 rc = -ENOMEM;
377 goto err_wkq_create;
378 }
379
380 return 0;
381
382 err_wkq_create:
383 for (i = 0; i < idxd->max_groups; i++)
384 put_device(&idxd->groups[i]->conf_dev);
385 err_group:
386 for (i = 0; i < idxd->max_engines; i++)
387 put_device(&idxd->engines[i]->conf_dev);
388 err_engine:
389 for (i = 0; i < idxd->max_wqs; i++)
390 put_device(&idxd->wqs[i]->conf_dev);
391 err_wqs:
392 kfree(idxd->int_handles);
393 return rc;
394}
395
396static void idxd_read_table_offsets(struct idxd_device *idxd)
397{
398 union offsets_reg offsets;
399 struct device *dev = &idxd->pdev->dev;
400
401 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
402 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
403 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
404 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
405 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
406 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
407 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
408 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
409 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
410 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
411}
412
413static void idxd_read_caps(struct idxd_device *idxd)
414{
415 struct device *dev = &idxd->pdev->dev;
416 int i;
417
418
419 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
420 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
421
422 if (idxd->hw.gen_cap.cmd_cap) {
423 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
424 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
425 }
426
427 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
428 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
429 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
430 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
431 if (idxd->hw.gen_cap.config_en)
432 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
433
434
435 idxd->hw.group_cap.bits =
436 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
437 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
438 idxd->max_groups = idxd->hw.group_cap.num_groups;
439 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
440 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
441 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
442 idxd->nr_tokens = idxd->max_tokens;
443
444
445 idxd->hw.engine_cap.bits =
446 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
447 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
448 idxd->max_engines = idxd->hw.engine_cap.num_engines;
449 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
450
451
452 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
453 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
454 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
455 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
456 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
457 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
458 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
459 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
460
461
462 for (i = 0; i < 4; i++) {
463 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
464 IDXD_OPCAP_OFFSET + i * sizeof(u64));
465 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
466 }
467}
468
469static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
470{
471 struct device *dev = &pdev->dev;
472 struct idxd_device *idxd;
473 int rc;
474
475 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
476 if (!idxd)
477 return NULL;
478
479 idxd->pdev = pdev;
480 idxd->data = data;
481 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
482 if (idxd->id < 0)
483 return NULL;
484
485 device_initialize(&idxd->conf_dev);
486 idxd->conf_dev.parent = dev;
487 idxd->conf_dev.bus = &dsa_bus_type;
488 idxd->conf_dev.type = idxd->data->dev_type;
489 rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
490 if (rc < 0) {
491 put_device(&idxd->conf_dev);
492 return NULL;
493 }
494
495 spin_lock_init(&idxd->dev_lock);
496 spin_lock_init(&idxd->cmd_lock);
497
498 return idxd;
499}
500
501static int idxd_enable_system_pasid(struct idxd_device *idxd)
502{
503 int flags;
504 unsigned int pasid;
505 struct iommu_sva *sva;
506
507 flags = SVM_FLAG_SUPERVISOR_MODE;
508
509 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
510 if (IS_ERR(sva)) {
511 dev_warn(&idxd->pdev->dev,
512 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
513 return PTR_ERR(sva);
514 }
515
516 pasid = iommu_sva_get_pasid(sva);
517 if (pasid == IOMMU_PASID_INVALID) {
518 iommu_sva_unbind_device(sva);
519 return -ENODEV;
520 }
521
522 idxd->sva = sva;
523 idxd->pasid = pasid;
524 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
525 return 0;
526}
527
528static void idxd_disable_system_pasid(struct idxd_device *idxd)
529{
530
531 iommu_sva_unbind_device(idxd->sva);
532 idxd->sva = NULL;
533}
534
535static int idxd_probe(struct idxd_device *idxd)
536{
537 struct pci_dev *pdev = idxd->pdev;
538 struct device *dev = &pdev->dev;
539 int rc;
540
541 dev_dbg(dev, "%s entered and resetting device\n", __func__);
542 rc = idxd_device_init_reset(idxd);
543 if (rc < 0)
544 return rc;
545
546 dev_dbg(dev, "IDXD reset complete\n");
547
548 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
549 rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
550 if (rc == 0) {
551 rc = idxd_enable_system_pasid(idxd);
552 if (rc < 0) {
553 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
554 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
555 } else {
556 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
557 }
558 } else {
559 dev_warn(dev, "Unable to turn on SVA feature.\n");
560 }
561 } else if (!sva) {
562 dev_warn(dev, "User forced SVA off via module param.\n");
563 }
564
565 idxd_read_caps(idxd);
566 idxd_read_table_offsets(idxd);
567
568 rc = idxd_setup_internals(idxd);
569 if (rc)
570 goto err;
571
572
573 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
574 dev_dbg(dev, "Loading RO device config\n");
575 rc = idxd_device_load_config(idxd);
576 if (rc < 0)
577 goto err_config;
578 }
579
580 rc = idxd_setup_interrupts(idxd);
581 if (rc)
582 goto err_config;
583
584 dev_dbg(dev, "IDXD interrupt setup complete.\n");
585
586 idxd->major = idxd_cdev_get_major(idxd);
587
588 rc = perfmon_pmu_init(idxd);
589 if (rc < 0)
590 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
591
592 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
593 return 0;
594
595 err_config:
596 idxd_cleanup_internals(idxd);
597 err:
598 if (device_pasid_enabled(idxd))
599 idxd_disable_system_pasid(idxd);
600 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
601 return rc;
602}
603
604static void idxd_cleanup(struct idxd_device *idxd)
605{
606 struct device *dev = &idxd->pdev->dev;
607
608 perfmon_pmu_remove(idxd);
609 idxd_cleanup_interrupts(idxd);
610 idxd_cleanup_internals(idxd);
611 if (device_pasid_enabled(idxd))
612 idxd_disable_system_pasid(idxd);
613 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
614}
615
616static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
617{
618 struct device *dev = &pdev->dev;
619 struct idxd_device *idxd;
620 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
621 int rc;
622
623 rc = pci_enable_device(pdev);
624 if (rc)
625 return rc;
626
627 dev_dbg(dev, "Alloc IDXD context\n");
628 idxd = idxd_alloc(pdev, data);
629 if (!idxd) {
630 rc = -ENOMEM;
631 goto err_idxd_alloc;
632 }
633
634 dev_dbg(dev, "Mapping BARs\n");
635 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
636 if (!idxd->reg_base) {
637 rc = -ENOMEM;
638 goto err_iomap;
639 }
640
641 dev_dbg(dev, "Set DMA masks\n");
642 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
643 if (rc)
644 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
645 if (rc)
646 goto err;
647
648 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
649 if (rc)
650 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
651 if (rc)
652 goto err;
653
654 dev_dbg(dev, "Set PCI master\n");
655 pci_set_master(pdev);
656 pci_set_drvdata(pdev, idxd);
657
658 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
659 rc = idxd_probe(idxd);
660 if (rc) {
661 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
662 goto err;
663 }
664
665 rc = idxd_register_devices(idxd);
666 if (rc) {
667 dev_err(dev, "IDXD sysfs setup failed\n");
668 goto err_dev_register;
669 }
670
671 idxd->state = IDXD_DEV_CONF_READY;
672
673 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
674 idxd->hw.version);
675
676 return 0;
677
678 err_dev_register:
679 idxd_cleanup(idxd);
680 err:
681 pci_iounmap(pdev, idxd->reg_base);
682 err_iomap:
683 put_device(&idxd->conf_dev);
684 err_idxd_alloc:
685 pci_disable_device(pdev);
686 return rc;
687}
688
689static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
690{
691 struct idxd_desc *desc, *itr;
692 struct llist_node *head;
693
694 head = llist_del_all(&ie->pending_llist);
695 if (!head)
696 return;
697
698 llist_for_each_entry_safe(desc, itr, head, llnode) {
699 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
700 idxd_free_desc(desc->wq, desc);
701 }
702}
703
704static void idxd_flush_work_list(struct idxd_irq_entry *ie)
705{
706 struct idxd_desc *desc, *iter;
707
708 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
709 list_del(&desc->list);
710 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
711 idxd_free_desc(desc->wq, desc);
712 }
713}
714
715void idxd_wqs_quiesce(struct idxd_device *idxd)
716{
717 struct idxd_wq *wq;
718 int i;
719
720 for (i = 0; i < idxd->max_wqs; i++) {
721 wq = idxd->wqs[i];
722 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
723 idxd_wq_quiesce(wq);
724 }
725}
726
727static void idxd_release_int_handles(struct idxd_device *idxd)
728{
729 struct device *dev = &idxd->pdev->dev;
730 int i, rc;
731
732 for (i = 0; i < idxd->num_wq_irqs; i++) {
733 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
734 rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
735 IDXD_IRQ_MSIX);
736 if (rc < 0)
737 dev_warn(dev, "irq handle %d release failed\n",
738 idxd->int_handles[i]);
739 else
740 dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
741 }
742 }
743}
744
745static void idxd_shutdown(struct pci_dev *pdev)
746{
747 struct idxd_device *idxd = pci_get_drvdata(pdev);
748 int rc, i;
749 struct idxd_irq_entry *irq_entry;
750 int msixcnt = pci_msix_vec_count(pdev);
751
752 rc = idxd_device_disable(idxd);
753 if (rc)
754 dev_err(&pdev->dev, "Disabling device failed\n");
755
756 dev_dbg(&pdev->dev, "%s called\n", __func__);
757 idxd_mask_msix_vectors(idxd);
758 idxd_mask_error_interrupts(idxd);
759
760 for (i = 0; i < msixcnt; i++) {
761 irq_entry = &idxd->irq_entries[i];
762 synchronize_irq(irq_entry->vector);
763 if (i == 0)
764 continue;
765 idxd_flush_pending_llist(irq_entry);
766 idxd_flush_work_list(irq_entry);
767 }
768 flush_workqueue(idxd->wq);
769}
770
771static void idxd_remove(struct pci_dev *pdev)
772{
773 struct idxd_device *idxd = pci_get_drvdata(pdev);
774 struct idxd_irq_entry *irq_entry;
775 int msixcnt = pci_msix_vec_count(pdev);
776 int i;
777
778 dev_dbg(&pdev->dev, "%s called\n", __func__);
779 idxd_shutdown(pdev);
780 if (device_pasid_enabled(idxd))
781 idxd_disable_system_pasid(idxd);
782 idxd_unregister_devices(idxd);
783
784 for (i = 0; i < msixcnt; i++) {
785 irq_entry = &idxd->irq_entries[i];
786 free_irq(irq_entry->vector, irq_entry);
787 }
788 idxd_msix_perm_clear(idxd);
789 idxd_release_int_handles(idxd);
790 pci_free_irq_vectors(pdev);
791 pci_iounmap(pdev, idxd->reg_base);
792 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
793 pci_disable_device(pdev);
794 destroy_workqueue(idxd->wq);
795 perfmon_pmu_remove(idxd);
796 device_unregister(&idxd->conf_dev);
797}
798
799static struct pci_driver idxd_pci_driver = {
800 .name = DRV_NAME,
801 .id_table = idxd_pci_tbl,
802 .probe = idxd_pci_probe,
803 .remove = idxd_remove,
804 .shutdown = idxd_shutdown,
805};
806
807static int __init idxd_init_module(void)
808{
809 int err;
810
811
812
813
814
815 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
816 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
817 return -ENODEV;
818 }
819
820 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
821 pr_warn("Platform does not have ENQCMD(S) support.\n");
822 else
823 support_enqcmd = true;
824
825 perfmon_init();
826
827 err = idxd_register_bus_type();
828 if (err < 0)
829 return err;
830
831 err = idxd_register_driver();
832 if (err < 0)
833 goto err_idxd_driver_register;
834
835 err = idxd_cdev_register();
836 if (err)
837 goto err_cdev_register;
838
839 err = pci_register_driver(&idxd_pci_driver);
840 if (err)
841 goto err_pci_register;
842
843 return 0;
844
845err_pci_register:
846 idxd_cdev_remove();
847err_cdev_register:
848 idxd_unregister_driver();
849err_idxd_driver_register:
850 idxd_unregister_bus_type();
851 return err;
852}
853module_init(idxd_init_module);
854
855static void __exit idxd_exit_module(void)
856{
857 idxd_unregister_driver();
858 pci_unregister_driver(&idxd_pci_driver);
859 idxd_cdev_remove();
860 idxd_unregister_bus_type();
861 perfmon_exit();
862}
863module_exit(idxd_exit_module);
864