1
2
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
17};
18
19static int idxd_config_bus_match(struct device *dev,
20 struct device_driver *drv)
21{
22 int matched = 0;
23
24 if (is_idxd_dev(dev)) {
25 struct idxd_device *idxd = confdev_to_idxd(dev);
26
27 if (idxd->state != IDXD_DEV_CONF_READY)
28 return 0;
29 matched = 1;
30 } else if (is_idxd_wq_dev(dev)) {
31 struct idxd_wq *wq = confdev_to_wq(dev);
32 struct idxd_device *idxd = wq->idxd;
33
34 if (idxd->state < IDXD_DEV_CONF_READY)
35 return 0;
36
37 if (wq->state != IDXD_WQ_DISABLED) {
38 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
39 return 0;
40 }
41 matched = 1;
42 }
43
44 if (matched)
45 dev_dbg(dev, "%s matched\n", dev_name(dev));
46
47 return matched;
48}
49
50static int idxd_config_bus_probe(struct device *dev)
51{
52 int rc;
53 unsigned long flags;
54
55 dev_dbg(dev, "%s called\n", __func__);
56
57 if (is_idxd_dev(dev)) {
58 struct idxd_device *idxd = confdev_to_idxd(dev);
59
60 if (idxd->state != IDXD_DEV_CONF_READY) {
61 dev_warn(dev, "Device not ready for config\n");
62 return -EBUSY;
63 }
64
65 if (!try_module_get(THIS_MODULE))
66 return -ENXIO;
67
68
69 spin_lock_irqsave(&idxd->dev_lock, flags);
70 rc = idxd_device_config(idxd);
71 spin_unlock_irqrestore(&idxd->dev_lock, flags);
72 if (rc < 0) {
73 module_put(THIS_MODULE);
74 dev_warn(dev, "Device config failed: %d\n", rc);
75 return rc;
76 }
77
78
79 rc = idxd_device_enable(idxd);
80 if (rc < 0) {
81 module_put(THIS_MODULE);
82 dev_warn(dev, "Device enable failed: %d\n", rc);
83 return rc;
84 }
85
86 dev_info(dev, "Device %s enabled\n", dev_name(dev));
87
88 rc = idxd_register_dma_device(idxd);
89 if (rc < 0) {
90 module_put(THIS_MODULE);
91 dev_dbg(dev, "Failed to register dmaengine device\n");
92 return rc;
93 }
94 return 0;
95 } else if (is_idxd_wq_dev(dev)) {
96 struct idxd_wq *wq = confdev_to_wq(dev);
97 struct idxd_device *idxd = wq->idxd;
98
99 mutex_lock(&wq->wq_lock);
100
101 if (idxd->state != IDXD_DEV_ENABLED) {
102 mutex_unlock(&wq->wq_lock);
103 dev_warn(dev, "Enabling while device not enabled.\n");
104 return -EPERM;
105 }
106
107 if (wq->state != IDXD_WQ_DISABLED) {
108 mutex_unlock(&wq->wq_lock);
109 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
110 return -EBUSY;
111 }
112
113 if (!wq->group) {
114 mutex_unlock(&wq->wq_lock);
115 dev_warn(dev, "WQ not attached to group.\n");
116 return -EINVAL;
117 }
118
119 if (strlen(wq->name) == 0) {
120 mutex_unlock(&wq->wq_lock);
121 dev_warn(dev, "WQ name not set.\n");
122 return -EINVAL;
123 }
124
125
126 if (wq_shared(wq)) {
127 if (!device_swq_supported(idxd)) {
128 dev_warn(dev,
129 "PASID not enabled and shared WQ.\n");
130 mutex_unlock(&wq->wq_lock);
131 return -ENXIO;
132 }
133
134
135
136
137
138
139
140
141 if (wq->threshold == 0) {
142 dev_warn(dev,
143 "Shared WQ and threshold 0.\n");
144 mutex_unlock(&wq->wq_lock);
145 return -EINVAL;
146 }
147 }
148
149 rc = idxd_wq_alloc_resources(wq);
150 if (rc < 0) {
151 mutex_unlock(&wq->wq_lock);
152 dev_warn(dev, "WQ resource alloc failed\n");
153 return rc;
154 }
155
156 spin_lock_irqsave(&idxd->dev_lock, flags);
157 rc = idxd_device_config(idxd);
158 spin_unlock_irqrestore(&idxd->dev_lock, flags);
159 if (rc < 0) {
160 mutex_unlock(&wq->wq_lock);
161 dev_warn(dev, "Writing WQ %d config failed: %d\n",
162 wq->id, rc);
163 return rc;
164 }
165
166 rc = idxd_wq_enable(wq);
167 if (rc < 0) {
168 mutex_unlock(&wq->wq_lock);
169 dev_warn(dev, "WQ %d enabling failed: %d\n",
170 wq->id, rc);
171 return rc;
172 }
173
174 rc = idxd_wq_map_portal(wq);
175 if (rc < 0) {
176 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
177 rc = idxd_wq_disable(wq);
178 if (rc < 0)
179 dev_warn(dev, "IDXD wq disable failed\n");
180 mutex_unlock(&wq->wq_lock);
181 return rc;
182 }
183
184 wq->client_count = 0;
185
186 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
187
188 if (is_idxd_wq_dmaengine(wq)) {
189 rc = idxd_register_dma_channel(wq);
190 if (rc < 0) {
191 dev_dbg(dev, "DMA channel register failed\n");
192 mutex_unlock(&wq->wq_lock);
193 return rc;
194 }
195 } else if (is_idxd_wq_cdev(wq)) {
196 rc = idxd_wq_add_cdev(wq);
197 if (rc < 0) {
198 dev_dbg(dev, "Cdev creation failed\n");
199 mutex_unlock(&wq->wq_lock);
200 return rc;
201 }
202 }
203
204 mutex_unlock(&wq->wq_lock);
205 return 0;
206 }
207
208 return -ENODEV;
209}
210
211static void disable_wq(struct idxd_wq *wq)
212{
213 struct idxd_device *idxd = wq->idxd;
214 struct device *dev = &idxd->pdev->dev;
215
216 mutex_lock(&wq->wq_lock);
217 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
218 if (wq->state == IDXD_WQ_DISABLED) {
219 mutex_unlock(&wq->wq_lock);
220 return;
221 }
222
223 if (is_idxd_wq_dmaengine(wq))
224 idxd_unregister_dma_channel(wq);
225 else if (is_idxd_wq_cdev(wq))
226 idxd_wq_del_cdev(wq);
227
228 if (idxd_wq_refcount(wq))
229 dev_warn(dev, "Clients has claim on wq %d: %d\n",
230 wq->id, idxd_wq_refcount(wq));
231
232 idxd_wq_unmap_portal(wq);
233
234 idxd_wq_drain(wq);
235 idxd_wq_reset(wq);
236
237 idxd_wq_free_resources(wq);
238 wq->client_count = 0;
239 mutex_unlock(&wq->wq_lock);
240
241 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
242}
243
244static int idxd_config_bus_remove(struct device *dev)
245{
246 int rc;
247
248 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
249
250
251 if (is_idxd_wq_dev(dev)) {
252 struct idxd_wq *wq = confdev_to_wq(dev);
253
254 disable_wq(wq);
255 } else if (is_idxd_dev(dev)) {
256 struct idxd_device *idxd = confdev_to_idxd(dev);
257 int i;
258
259 dev_dbg(dev, "%s removing dev %s\n", __func__,
260 dev_name(&idxd->conf_dev));
261 for (i = 0; i < idxd->max_wqs; i++) {
262 struct idxd_wq *wq = idxd->wqs[i];
263
264 if (wq->state == IDXD_WQ_DISABLED)
265 continue;
266 dev_warn(dev, "Active wq %d on disable %s.\n", i,
267 dev_name(&idxd->conf_dev));
268 device_release_driver(&wq->conf_dev);
269 }
270
271 idxd_unregister_dma_device(idxd);
272 rc = idxd_device_disable(idxd);
273 for (i = 0; i < idxd->max_wqs; i++) {
274 struct idxd_wq *wq = idxd->wqs[i];
275
276 mutex_lock(&wq->wq_lock);
277 idxd_wq_disable_cleanup(wq);
278 mutex_unlock(&wq->wq_lock);
279 }
280 module_put(THIS_MODULE);
281 if (rc < 0)
282 dev_warn(dev, "Device disable failed\n");
283 else
284 dev_info(dev, "Device %s disabled\n", dev_name(dev));
285
286 }
287
288 return 0;
289}
290
291static void idxd_config_bus_shutdown(struct device *dev)
292{
293 dev_dbg(dev, "%s called\n", __func__);
294}
295
296struct bus_type dsa_bus_type = {
297 .name = "dsa",
298 .match = idxd_config_bus_match,
299 .probe = idxd_config_bus_probe,
300 .remove = idxd_config_bus_remove,
301 .shutdown = idxd_config_bus_shutdown,
302};
303
304static struct idxd_device_driver dsa_drv = {
305 .drv = {
306 .name = "dsa",
307 .bus = &dsa_bus_type,
308 .owner = THIS_MODULE,
309 .mod_name = KBUILD_MODNAME,
310 },
311};
312
313struct device_type *idxd_get_device_type(struct idxd_device *idxd)
314{
315 if (idxd->type == IDXD_TYPE_DSA)
316 return &dsa_device_type;
317 else if (idxd->type == IDXD_TYPE_IAX)
318 return &iax_device_type;
319 else
320 return NULL;
321}
322
323
324int idxd_register_driver(void)
325{
326 return driver_register(&dsa_drv.drv);
327}
328
329void idxd_unregister_driver(void)
330{
331 driver_unregister(&dsa_drv.drv);
332}
333
334
335static ssize_t engine_group_id_show(struct device *dev,
336 struct device_attribute *attr, char *buf)
337{
338 struct idxd_engine *engine =
339 container_of(dev, struct idxd_engine, conf_dev);
340
341 if (engine->group)
342 return sprintf(buf, "%d\n", engine->group->id);
343 else
344 return sprintf(buf, "%d\n", -1);
345}
346
347static ssize_t engine_group_id_store(struct device *dev,
348 struct device_attribute *attr,
349 const char *buf, size_t count)
350{
351 struct idxd_engine *engine =
352 container_of(dev, struct idxd_engine, conf_dev);
353 struct idxd_device *idxd = engine->idxd;
354 long id;
355 int rc;
356 struct idxd_group *prevg;
357
358 rc = kstrtol(buf, 10, &id);
359 if (rc < 0)
360 return -EINVAL;
361
362 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
363 return -EPERM;
364
365 if (id > idxd->max_groups - 1 || id < -1)
366 return -EINVAL;
367
368 if (id == -1) {
369 if (engine->group) {
370 engine->group->num_engines--;
371 engine->group = NULL;
372 }
373 return count;
374 }
375
376 prevg = engine->group;
377
378 if (prevg)
379 prevg->num_engines--;
380 engine->group = idxd->groups[id];
381 engine->group->num_engines++;
382
383 return count;
384}
385
386static struct device_attribute dev_attr_engine_group =
387 __ATTR(group_id, 0644, engine_group_id_show,
388 engine_group_id_store);
389
390static struct attribute *idxd_engine_attributes[] = {
391 &dev_attr_engine_group.attr,
392 NULL,
393};
394
395static const struct attribute_group idxd_engine_attribute_group = {
396 .attrs = idxd_engine_attributes,
397};
398
399static const struct attribute_group *idxd_engine_attribute_groups[] = {
400 &idxd_engine_attribute_group,
401 NULL,
402};
403
404static void idxd_conf_engine_release(struct device *dev)
405{
406 struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
407
408 kfree(engine);
409}
410
411struct device_type idxd_engine_device_type = {
412 .name = "engine",
413 .release = idxd_conf_engine_release,
414 .groups = idxd_engine_attribute_groups,
415};
416
417
418
419static void idxd_set_free_tokens(struct idxd_device *idxd)
420{
421 int i, tokens;
422
423 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
424 struct idxd_group *g = idxd->groups[i];
425
426 tokens += g->tokens_reserved;
427 }
428
429 idxd->nr_tokens = idxd->max_tokens - tokens;
430}
431
432static ssize_t group_tokens_reserved_show(struct device *dev,
433 struct device_attribute *attr,
434 char *buf)
435{
436 struct idxd_group *group =
437 container_of(dev, struct idxd_group, conf_dev);
438
439 return sprintf(buf, "%u\n", group->tokens_reserved);
440}
441
442static ssize_t group_tokens_reserved_store(struct device *dev,
443 struct device_attribute *attr,
444 const char *buf, size_t count)
445{
446 struct idxd_group *group =
447 container_of(dev, struct idxd_group, conf_dev);
448 struct idxd_device *idxd = group->idxd;
449 unsigned long val;
450 int rc;
451
452 rc = kstrtoul(buf, 10, &val);
453 if (rc < 0)
454 return -EINVAL;
455
456 if (idxd->type == IDXD_TYPE_IAX)
457 return -EOPNOTSUPP;
458
459 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
460 return -EPERM;
461
462 if (idxd->state == IDXD_DEV_ENABLED)
463 return -EPERM;
464
465 if (val > idxd->max_tokens)
466 return -EINVAL;
467
468 if (val > idxd->nr_tokens + group->tokens_reserved)
469 return -EINVAL;
470
471 group->tokens_reserved = val;
472 idxd_set_free_tokens(idxd);
473 return count;
474}
475
476static struct device_attribute dev_attr_group_tokens_reserved =
477 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
478 group_tokens_reserved_store);
479
480static ssize_t group_tokens_allowed_show(struct device *dev,
481 struct device_attribute *attr,
482 char *buf)
483{
484 struct idxd_group *group =
485 container_of(dev, struct idxd_group, conf_dev);
486
487 return sprintf(buf, "%u\n", group->tokens_allowed);
488}
489
490static ssize_t group_tokens_allowed_store(struct device *dev,
491 struct device_attribute *attr,
492 const char *buf, size_t count)
493{
494 struct idxd_group *group =
495 container_of(dev, struct idxd_group, conf_dev);
496 struct idxd_device *idxd = group->idxd;
497 unsigned long val;
498 int rc;
499
500 rc = kstrtoul(buf, 10, &val);
501 if (rc < 0)
502 return -EINVAL;
503
504 if (idxd->type == IDXD_TYPE_IAX)
505 return -EOPNOTSUPP;
506
507 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
508 return -EPERM;
509
510 if (idxd->state == IDXD_DEV_ENABLED)
511 return -EPERM;
512
513 if (val < 4 * group->num_engines ||
514 val > group->tokens_reserved + idxd->nr_tokens)
515 return -EINVAL;
516
517 group->tokens_allowed = val;
518 return count;
519}
520
521static struct device_attribute dev_attr_group_tokens_allowed =
522 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
523 group_tokens_allowed_store);
524
525static ssize_t group_use_token_limit_show(struct device *dev,
526 struct device_attribute *attr,
527 char *buf)
528{
529 struct idxd_group *group =
530 container_of(dev, struct idxd_group, conf_dev);
531
532 return sprintf(buf, "%u\n", group->use_token_limit);
533}
534
535static ssize_t group_use_token_limit_store(struct device *dev,
536 struct device_attribute *attr,
537 const char *buf, size_t count)
538{
539 struct idxd_group *group =
540 container_of(dev, struct idxd_group, conf_dev);
541 struct idxd_device *idxd = group->idxd;
542 unsigned long val;
543 int rc;
544
545 rc = kstrtoul(buf, 10, &val);
546 if (rc < 0)
547 return -EINVAL;
548
549 if (idxd->type == IDXD_TYPE_IAX)
550 return -EOPNOTSUPP;
551
552 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
553 return -EPERM;
554
555 if (idxd->state == IDXD_DEV_ENABLED)
556 return -EPERM;
557
558 if (idxd->token_limit == 0)
559 return -EPERM;
560
561 group->use_token_limit = !!val;
562 return count;
563}
564
565static struct device_attribute dev_attr_group_use_token_limit =
566 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
567 group_use_token_limit_store);
568
569static ssize_t group_engines_show(struct device *dev,
570 struct device_attribute *attr, char *buf)
571{
572 struct idxd_group *group =
573 container_of(dev, struct idxd_group, conf_dev);
574 int i, rc = 0;
575 char *tmp = buf;
576 struct idxd_device *idxd = group->idxd;
577
578 for (i = 0; i < idxd->max_engines; i++) {
579 struct idxd_engine *engine = idxd->engines[i];
580
581 if (!engine->group)
582 continue;
583
584 if (engine->group->id == group->id)
585 rc += sprintf(tmp + rc, "engine%d.%d ",
586 idxd->id, engine->id);
587 }
588
589 rc--;
590 rc += sprintf(tmp + rc, "\n");
591
592 return rc;
593}
594
595static struct device_attribute dev_attr_group_engines =
596 __ATTR(engines, 0444, group_engines_show, NULL);
597
598static ssize_t group_work_queues_show(struct device *dev,
599 struct device_attribute *attr, char *buf)
600{
601 struct idxd_group *group =
602 container_of(dev, struct idxd_group, conf_dev);
603 int i, rc = 0;
604 char *tmp = buf;
605 struct idxd_device *idxd = group->idxd;
606
607 for (i = 0; i < idxd->max_wqs; i++) {
608 struct idxd_wq *wq = idxd->wqs[i];
609
610 if (!wq->group)
611 continue;
612
613 if (wq->group->id == group->id)
614 rc += sprintf(tmp + rc, "wq%d.%d ",
615 idxd->id, wq->id);
616 }
617
618 rc--;
619 rc += sprintf(tmp + rc, "\n");
620
621 return rc;
622}
623
624static struct device_attribute dev_attr_group_work_queues =
625 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
626
627static ssize_t group_traffic_class_a_show(struct device *dev,
628 struct device_attribute *attr,
629 char *buf)
630{
631 struct idxd_group *group =
632 container_of(dev, struct idxd_group, conf_dev);
633
634 return sprintf(buf, "%d\n", group->tc_a);
635}
636
637static ssize_t group_traffic_class_a_store(struct device *dev,
638 struct device_attribute *attr,
639 const char *buf, size_t count)
640{
641 struct idxd_group *group =
642 container_of(dev, struct idxd_group, conf_dev);
643 struct idxd_device *idxd = group->idxd;
644 long val;
645 int rc;
646
647 rc = kstrtol(buf, 10, &val);
648 if (rc < 0)
649 return -EINVAL;
650
651 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
652 return -EPERM;
653
654 if (idxd->state == IDXD_DEV_ENABLED)
655 return -EPERM;
656
657 if (val < 0 || val > 7)
658 return -EINVAL;
659
660 group->tc_a = val;
661 return count;
662}
663
664static struct device_attribute dev_attr_group_traffic_class_a =
665 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
666 group_traffic_class_a_store);
667
668static ssize_t group_traffic_class_b_show(struct device *dev,
669 struct device_attribute *attr,
670 char *buf)
671{
672 struct idxd_group *group =
673 container_of(dev, struct idxd_group, conf_dev);
674
675 return sprintf(buf, "%d\n", group->tc_b);
676}
677
678static ssize_t group_traffic_class_b_store(struct device *dev,
679 struct device_attribute *attr,
680 const char *buf, size_t count)
681{
682 struct idxd_group *group =
683 container_of(dev, struct idxd_group, conf_dev);
684 struct idxd_device *idxd = group->idxd;
685 long val;
686 int rc;
687
688 rc = kstrtol(buf, 10, &val);
689 if (rc < 0)
690 return -EINVAL;
691
692 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
693 return -EPERM;
694
695 if (idxd->state == IDXD_DEV_ENABLED)
696 return -EPERM;
697
698 if (val < 0 || val > 7)
699 return -EINVAL;
700
701 group->tc_b = val;
702 return count;
703}
704
705static struct device_attribute dev_attr_group_traffic_class_b =
706 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
707 group_traffic_class_b_store);
708
709static struct attribute *idxd_group_attributes[] = {
710 &dev_attr_group_work_queues.attr,
711 &dev_attr_group_engines.attr,
712 &dev_attr_group_use_token_limit.attr,
713 &dev_attr_group_tokens_allowed.attr,
714 &dev_attr_group_tokens_reserved.attr,
715 &dev_attr_group_traffic_class_a.attr,
716 &dev_attr_group_traffic_class_b.attr,
717 NULL,
718};
719
720static const struct attribute_group idxd_group_attribute_group = {
721 .attrs = idxd_group_attributes,
722};
723
724static const struct attribute_group *idxd_group_attribute_groups[] = {
725 &idxd_group_attribute_group,
726 NULL,
727};
728
729static void idxd_conf_group_release(struct device *dev)
730{
731 struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
732
733 kfree(group);
734}
735
736struct device_type idxd_group_device_type = {
737 .name = "group",
738 .release = idxd_conf_group_release,
739 .groups = idxd_group_attribute_groups,
740};
741
742
743static ssize_t wq_clients_show(struct device *dev,
744 struct device_attribute *attr, char *buf)
745{
746 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
747
748 return sprintf(buf, "%d\n", wq->client_count);
749}
750
751static struct device_attribute dev_attr_wq_clients =
752 __ATTR(clients, 0444, wq_clients_show, NULL);
753
754static ssize_t wq_state_show(struct device *dev,
755 struct device_attribute *attr, char *buf)
756{
757 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
758
759 switch (wq->state) {
760 case IDXD_WQ_DISABLED:
761 return sprintf(buf, "disabled\n");
762 case IDXD_WQ_ENABLED:
763 return sprintf(buf, "enabled\n");
764 }
765
766 return sprintf(buf, "unknown\n");
767}
768
769static struct device_attribute dev_attr_wq_state =
770 __ATTR(state, 0444, wq_state_show, NULL);
771
772static ssize_t wq_group_id_show(struct device *dev,
773 struct device_attribute *attr, char *buf)
774{
775 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
776
777 if (wq->group)
778 return sprintf(buf, "%u\n", wq->group->id);
779 else
780 return sprintf(buf, "-1\n");
781}
782
783static ssize_t wq_group_id_store(struct device *dev,
784 struct device_attribute *attr,
785 const char *buf, size_t count)
786{
787 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
788 struct idxd_device *idxd = wq->idxd;
789 long id;
790 int rc;
791 struct idxd_group *prevg, *group;
792
793 rc = kstrtol(buf, 10, &id);
794 if (rc < 0)
795 return -EINVAL;
796
797 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
798 return -EPERM;
799
800 if (wq->state != IDXD_WQ_DISABLED)
801 return -EPERM;
802
803 if (id > idxd->max_groups - 1 || id < -1)
804 return -EINVAL;
805
806 if (id == -1) {
807 if (wq->group) {
808 wq->group->num_wqs--;
809 wq->group = NULL;
810 }
811 return count;
812 }
813
814 group = idxd->groups[id];
815 prevg = wq->group;
816
817 if (prevg)
818 prevg->num_wqs--;
819 wq->group = group;
820 group->num_wqs++;
821 return count;
822}
823
824static struct device_attribute dev_attr_wq_group_id =
825 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
826
827static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
828 char *buf)
829{
830 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
831
832 return sprintf(buf, "%s\n",
833 wq_dedicated(wq) ? "dedicated" : "shared");
834}
835
836static ssize_t wq_mode_store(struct device *dev,
837 struct device_attribute *attr, const char *buf,
838 size_t count)
839{
840 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
841 struct idxd_device *idxd = wq->idxd;
842
843 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
844 return -EPERM;
845
846 if (wq->state != IDXD_WQ_DISABLED)
847 return -EPERM;
848
849 if (sysfs_streq(buf, "dedicated")) {
850 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
851 wq->threshold = 0;
852 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
853 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
854 } else {
855 return -EINVAL;
856 }
857
858 return count;
859}
860
861static struct device_attribute dev_attr_wq_mode =
862 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
863
864static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
865 char *buf)
866{
867 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
868
869 return sprintf(buf, "%u\n", wq->size);
870}
871
872static int total_claimed_wq_size(struct idxd_device *idxd)
873{
874 int i;
875 int wq_size = 0;
876
877 for (i = 0; i < idxd->max_wqs; i++) {
878 struct idxd_wq *wq = idxd->wqs[i];
879
880 wq_size += wq->size;
881 }
882
883 return wq_size;
884}
885
886static ssize_t wq_size_store(struct device *dev,
887 struct device_attribute *attr, const char *buf,
888 size_t count)
889{
890 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
891 unsigned long size;
892 struct idxd_device *idxd = wq->idxd;
893 int rc;
894
895 rc = kstrtoul(buf, 10, &size);
896 if (rc < 0)
897 return -EINVAL;
898
899 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
900 return -EPERM;
901
902 if (idxd->state == IDXD_DEV_ENABLED)
903 return -EPERM;
904
905 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
906 return -EINVAL;
907
908 wq->size = size;
909 return count;
910}
911
912static struct device_attribute dev_attr_wq_size =
913 __ATTR(size, 0644, wq_size_show, wq_size_store);
914
915static ssize_t wq_priority_show(struct device *dev,
916 struct device_attribute *attr, char *buf)
917{
918 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
919
920 return sprintf(buf, "%u\n", wq->priority);
921}
922
923static ssize_t wq_priority_store(struct device *dev,
924 struct device_attribute *attr,
925 const char *buf, size_t count)
926{
927 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
928 unsigned long prio;
929 struct idxd_device *idxd = wq->idxd;
930 int rc;
931
932 rc = kstrtoul(buf, 10, &prio);
933 if (rc < 0)
934 return -EINVAL;
935
936 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
937 return -EPERM;
938
939 if (wq->state != IDXD_WQ_DISABLED)
940 return -EPERM;
941
942 if (prio > IDXD_MAX_PRIORITY)
943 return -EINVAL;
944
945 wq->priority = prio;
946 return count;
947}
948
949static struct device_attribute dev_attr_wq_priority =
950 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
951
952static ssize_t wq_block_on_fault_show(struct device *dev,
953 struct device_attribute *attr, char *buf)
954{
955 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
956
957 return sprintf(buf, "%u\n",
958 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
959}
960
961static ssize_t wq_block_on_fault_store(struct device *dev,
962 struct device_attribute *attr,
963 const char *buf, size_t count)
964{
965 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
966 struct idxd_device *idxd = wq->idxd;
967 bool bof;
968 int rc;
969
970 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
971 return -EPERM;
972
973 if (wq->state != IDXD_WQ_DISABLED)
974 return -ENXIO;
975
976 rc = kstrtobool(buf, &bof);
977 if (rc < 0)
978 return rc;
979
980 if (bof)
981 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
982 else
983 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
984
985 return count;
986}
987
988static struct device_attribute dev_attr_wq_block_on_fault =
989 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
990 wq_block_on_fault_store);
991
992static ssize_t wq_threshold_show(struct device *dev,
993 struct device_attribute *attr, char *buf)
994{
995 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
996
997 return sprintf(buf, "%u\n", wq->threshold);
998}
999
1000static ssize_t wq_threshold_store(struct device *dev,
1001 struct device_attribute *attr,
1002 const char *buf, size_t count)
1003{
1004 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1005 struct idxd_device *idxd = wq->idxd;
1006 unsigned int val;
1007 int rc;
1008
1009 rc = kstrtouint(buf, 0, &val);
1010 if (rc < 0)
1011 return -EINVAL;
1012
1013 if (val > wq->size || val <= 0)
1014 return -EINVAL;
1015
1016 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1017 return -EPERM;
1018
1019 if (wq->state != IDXD_WQ_DISABLED)
1020 return -ENXIO;
1021
1022 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1023 return -EINVAL;
1024
1025 wq->threshold = val;
1026
1027 return count;
1028}
1029
1030static struct device_attribute dev_attr_wq_threshold =
1031 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1032
1033static ssize_t wq_type_show(struct device *dev,
1034 struct device_attribute *attr, char *buf)
1035{
1036 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1037
1038 switch (wq->type) {
1039 case IDXD_WQT_KERNEL:
1040 return sprintf(buf, "%s\n",
1041 idxd_wq_type_names[IDXD_WQT_KERNEL]);
1042 case IDXD_WQT_USER:
1043 return sprintf(buf, "%s\n",
1044 idxd_wq_type_names[IDXD_WQT_USER]);
1045 case IDXD_WQT_NONE:
1046 default:
1047 return sprintf(buf, "%s\n",
1048 idxd_wq_type_names[IDXD_WQT_NONE]);
1049 }
1050
1051 return -EINVAL;
1052}
1053
1054static ssize_t wq_type_store(struct device *dev,
1055 struct device_attribute *attr, const char *buf,
1056 size_t count)
1057{
1058 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1059 enum idxd_wq_type old_type;
1060
1061 if (wq->state != IDXD_WQ_DISABLED)
1062 return -EPERM;
1063
1064 old_type = wq->type;
1065 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1066 wq->type = IDXD_WQT_NONE;
1067 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1068 wq->type = IDXD_WQT_KERNEL;
1069 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1070 wq->type = IDXD_WQT_USER;
1071 else
1072 return -EINVAL;
1073
1074
1075 if (wq->type != old_type)
1076 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1077
1078 return count;
1079}
1080
1081static struct device_attribute dev_attr_wq_type =
1082 __ATTR(type, 0644, wq_type_show, wq_type_store);
1083
1084static ssize_t wq_name_show(struct device *dev,
1085 struct device_attribute *attr, char *buf)
1086{
1087 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1088
1089 return sprintf(buf, "%s\n", wq->name);
1090}
1091
1092static ssize_t wq_name_store(struct device *dev,
1093 struct device_attribute *attr, const char *buf,
1094 size_t count)
1095{
1096 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1097
1098 if (wq->state != IDXD_WQ_DISABLED)
1099 return -EPERM;
1100
1101 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1102 return -EINVAL;
1103
1104
1105
1106
1107
1108 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1109 return -EOPNOTSUPP;
1110
1111 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1112 strncpy(wq->name, buf, WQ_NAME_SIZE);
1113 strreplace(wq->name, '\n', '\0');
1114 return count;
1115}
1116
1117static struct device_attribute dev_attr_wq_name =
1118 __ATTR(name, 0644, wq_name_show, wq_name_store);
1119
1120static ssize_t wq_cdev_minor_show(struct device *dev,
1121 struct device_attribute *attr, char *buf)
1122{
1123 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1124 int minor = -1;
1125
1126 mutex_lock(&wq->wq_lock);
1127 if (wq->idxd_cdev)
1128 minor = wq->idxd_cdev->minor;
1129 mutex_unlock(&wq->wq_lock);
1130
1131 if (minor == -1)
1132 return -ENXIO;
1133 return sysfs_emit(buf, "%d\n", minor);
1134}
1135
1136static struct device_attribute dev_attr_wq_cdev_minor =
1137 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1138
1139static int __get_sysfs_u64(const char *buf, u64 *val)
1140{
1141 int rc;
1142
1143 rc = kstrtou64(buf, 0, val);
1144 if (rc < 0)
1145 return -EINVAL;
1146
1147 if (*val == 0)
1148 return -EINVAL;
1149
1150 *val = roundup_pow_of_two(*val);
1151 return 0;
1152}
1153
1154static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1155 char *buf)
1156{
1157 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1158
1159 return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
1160}
1161
1162static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1163 const char *buf, size_t count)
1164{
1165 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1166 struct idxd_device *idxd = wq->idxd;
1167 u64 xfer_size;
1168 int rc;
1169
1170 if (wq->state != IDXD_WQ_DISABLED)
1171 return -EPERM;
1172
1173 rc = __get_sysfs_u64(buf, &xfer_size);
1174 if (rc < 0)
1175 return rc;
1176
1177 if (xfer_size > idxd->max_xfer_bytes)
1178 return -EINVAL;
1179
1180 wq->max_xfer_bytes = xfer_size;
1181
1182 return count;
1183}
1184
1185static struct device_attribute dev_attr_wq_max_transfer_size =
1186 __ATTR(max_transfer_size, 0644,
1187 wq_max_transfer_size_show, wq_max_transfer_size_store);
1188
1189static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1190{
1191 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1192
1193 return sprintf(buf, "%u\n", wq->max_batch_size);
1194}
1195
1196static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1197 const char *buf, size_t count)
1198{
1199 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1200 struct idxd_device *idxd = wq->idxd;
1201 u64 batch_size;
1202 int rc;
1203
1204 if (wq->state != IDXD_WQ_DISABLED)
1205 return -EPERM;
1206
1207 rc = __get_sysfs_u64(buf, &batch_size);
1208 if (rc < 0)
1209 return rc;
1210
1211 if (batch_size > idxd->max_batch_size)
1212 return -EINVAL;
1213
1214 wq->max_batch_size = (u32)batch_size;
1215
1216 return count;
1217}
1218
1219static struct device_attribute dev_attr_wq_max_batch_size =
1220 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1221
1222static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1223{
1224 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1225
1226 return sprintf(buf, "%u\n", wq->ats_dis);
1227}
1228
1229static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1230 const char *buf, size_t count)
1231{
1232 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1233 struct idxd_device *idxd = wq->idxd;
1234 bool ats_dis;
1235 int rc;
1236
1237 if (wq->state != IDXD_WQ_DISABLED)
1238 return -EPERM;
1239
1240 if (!idxd->hw.wq_cap.wq_ats_support)
1241 return -EOPNOTSUPP;
1242
1243 rc = kstrtobool(buf, &ats_dis);
1244 if (rc < 0)
1245 return rc;
1246
1247 wq->ats_dis = ats_dis;
1248
1249 return count;
1250}
1251
1252static struct device_attribute dev_attr_wq_ats_disable =
1253 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1254
1255static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1256{
1257 struct idxd_wq *wq = confdev_to_wq(dev);
1258 struct idxd_device *idxd = wq->idxd;
1259 u32 occup, offset;
1260
1261 if (!idxd->hw.wq_cap.occupancy)
1262 return -EOPNOTSUPP;
1263
1264 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1265 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1266
1267 return sysfs_emit(buf, "%u\n", occup);
1268}
1269
1270static struct device_attribute dev_attr_wq_occupancy =
1271 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1272
1273static struct attribute *idxd_wq_attributes[] = {
1274 &dev_attr_wq_clients.attr,
1275 &dev_attr_wq_state.attr,
1276 &dev_attr_wq_group_id.attr,
1277 &dev_attr_wq_mode.attr,
1278 &dev_attr_wq_size.attr,
1279 &dev_attr_wq_priority.attr,
1280 &dev_attr_wq_block_on_fault.attr,
1281 &dev_attr_wq_threshold.attr,
1282 &dev_attr_wq_type.attr,
1283 &dev_attr_wq_name.attr,
1284 &dev_attr_wq_cdev_minor.attr,
1285 &dev_attr_wq_max_transfer_size.attr,
1286 &dev_attr_wq_max_batch_size.attr,
1287 &dev_attr_wq_ats_disable.attr,
1288 &dev_attr_wq_occupancy.attr,
1289 NULL,
1290};
1291
1292static const struct attribute_group idxd_wq_attribute_group = {
1293 .attrs = idxd_wq_attributes,
1294};
1295
1296static const struct attribute_group *idxd_wq_attribute_groups[] = {
1297 &idxd_wq_attribute_group,
1298 NULL,
1299};
1300
1301static void idxd_conf_wq_release(struct device *dev)
1302{
1303 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1304
1305 kfree(wq->wqcfg);
1306 kfree(wq);
1307}
1308
1309struct device_type idxd_wq_device_type = {
1310 .name = "wq",
1311 .release = idxd_conf_wq_release,
1312 .groups = idxd_wq_attribute_groups,
1313};
1314
1315
1316static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1317 char *buf)
1318{
1319 struct idxd_device *idxd =
1320 container_of(dev, struct idxd_device, conf_dev);
1321
1322 return sprintf(buf, "%#x\n", idxd->hw.version);
1323}
1324static DEVICE_ATTR_RO(version);
1325
1326static ssize_t max_work_queues_size_show(struct device *dev,
1327 struct device_attribute *attr,
1328 char *buf)
1329{
1330 struct idxd_device *idxd =
1331 container_of(dev, struct idxd_device, conf_dev);
1332
1333 return sprintf(buf, "%u\n", idxd->max_wq_size);
1334}
1335static DEVICE_ATTR_RO(max_work_queues_size);
1336
1337static ssize_t max_groups_show(struct device *dev,
1338 struct device_attribute *attr, char *buf)
1339{
1340 struct idxd_device *idxd =
1341 container_of(dev, struct idxd_device, conf_dev);
1342
1343 return sprintf(buf, "%u\n", idxd->max_groups);
1344}
1345static DEVICE_ATTR_RO(max_groups);
1346
1347static ssize_t max_work_queues_show(struct device *dev,
1348 struct device_attribute *attr, char *buf)
1349{
1350 struct idxd_device *idxd =
1351 container_of(dev, struct idxd_device, conf_dev);
1352
1353 return sprintf(buf, "%u\n", idxd->max_wqs);
1354}
1355static DEVICE_ATTR_RO(max_work_queues);
1356
1357static ssize_t max_engines_show(struct device *dev,
1358 struct device_attribute *attr, char *buf)
1359{
1360 struct idxd_device *idxd =
1361 container_of(dev, struct idxd_device, conf_dev);
1362
1363 return sprintf(buf, "%u\n", idxd->max_engines);
1364}
1365static DEVICE_ATTR_RO(max_engines);
1366
1367static ssize_t numa_node_show(struct device *dev,
1368 struct device_attribute *attr, char *buf)
1369{
1370 struct idxd_device *idxd =
1371 container_of(dev, struct idxd_device, conf_dev);
1372
1373 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1374}
1375static DEVICE_ATTR_RO(numa_node);
1376
1377static ssize_t max_batch_size_show(struct device *dev,
1378 struct device_attribute *attr, char *buf)
1379{
1380 struct idxd_device *idxd =
1381 container_of(dev, struct idxd_device, conf_dev);
1382
1383 return sprintf(buf, "%u\n", idxd->max_batch_size);
1384}
1385static DEVICE_ATTR_RO(max_batch_size);
1386
1387static ssize_t max_transfer_size_show(struct device *dev,
1388 struct device_attribute *attr,
1389 char *buf)
1390{
1391 struct idxd_device *idxd =
1392 container_of(dev, struct idxd_device, conf_dev);
1393
1394 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1395}
1396static DEVICE_ATTR_RO(max_transfer_size);
1397
1398static ssize_t op_cap_show(struct device *dev,
1399 struct device_attribute *attr, char *buf)
1400{
1401 struct idxd_device *idxd =
1402 container_of(dev, struct idxd_device, conf_dev);
1403 int i, rc = 0;
1404
1405 for (i = 0; i < 4; i++)
1406 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1407
1408 rc--;
1409 rc += sysfs_emit_at(buf, rc, "\n");
1410 return rc;
1411}
1412static DEVICE_ATTR_RO(op_cap);
1413
1414static ssize_t gen_cap_show(struct device *dev,
1415 struct device_attribute *attr, char *buf)
1416{
1417 struct idxd_device *idxd =
1418 container_of(dev, struct idxd_device, conf_dev);
1419
1420 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1421}
1422static DEVICE_ATTR_RO(gen_cap);
1423
1424static ssize_t configurable_show(struct device *dev,
1425 struct device_attribute *attr, char *buf)
1426{
1427 struct idxd_device *idxd =
1428 container_of(dev, struct idxd_device, conf_dev);
1429
1430 return sprintf(buf, "%u\n",
1431 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1432}
1433static DEVICE_ATTR_RO(configurable);
1434
1435static ssize_t clients_show(struct device *dev,
1436 struct device_attribute *attr, char *buf)
1437{
1438 struct idxd_device *idxd =
1439 container_of(dev, struct idxd_device, conf_dev);
1440 unsigned long flags;
1441 int count = 0, i;
1442
1443 spin_lock_irqsave(&idxd->dev_lock, flags);
1444 for (i = 0; i < idxd->max_wqs; i++) {
1445 struct idxd_wq *wq = idxd->wqs[i];
1446
1447 count += wq->client_count;
1448 }
1449 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1450
1451 return sprintf(buf, "%d\n", count);
1452}
1453static DEVICE_ATTR_RO(clients);
1454
1455static ssize_t pasid_enabled_show(struct device *dev,
1456 struct device_attribute *attr, char *buf)
1457{
1458 struct idxd_device *idxd =
1459 container_of(dev, struct idxd_device, conf_dev);
1460
1461 return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
1462}
1463static DEVICE_ATTR_RO(pasid_enabled);
1464
1465static ssize_t state_show(struct device *dev,
1466 struct device_attribute *attr, char *buf)
1467{
1468 struct idxd_device *idxd =
1469 container_of(dev, struct idxd_device, conf_dev);
1470
1471 switch (idxd->state) {
1472 case IDXD_DEV_DISABLED:
1473 case IDXD_DEV_CONF_READY:
1474 return sprintf(buf, "disabled\n");
1475 case IDXD_DEV_ENABLED:
1476 return sprintf(buf, "enabled\n");
1477 case IDXD_DEV_HALTED:
1478 return sprintf(buf, "halted\n");
1479 }
1480
1481 return sprintf(buf, "unknown\n");
1482}
1483static DEVICE_ATTR_RO(state);
1484
1485static ssize_t errors_show(struct device *dev,
1486 struct device_attribute *attr, char *buf)
1487{
1488 struct idxd_device *idxd =
1489 container_of(dev, struct idxd_device, conf_dev);
1490 int i, out = 0;
1491 unsigned long flags;
1492
1493 spin_lock_irqsave(&idxd->dev_lock, flags);
1494 for (i = 0; i < 4; i++)
1495 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1496 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1497 out--;
1498 out += sprintf(buf + out, "\n");
1499 return out;
1500}
1501static DEVICE_ATTR_RO(errors);
1502
1503static ssize_t max_tokens_show(struct device *dev,
1504 struct device_attribute *attr, char *buf)
1505{
1506 struct idxd_device *idxd =
1507 container_of(dev, struct idxd_device, conf_dev);
1508
1509 return sprintf(buf, "%u\n", idxd->max_tokens);
1510}
1511static DEVICE_ATTR_RO(max_tokens);
1512
1513static ssize_t token_limit_show(struct device *dev,
1514 struct device_attribute *attr, char *buf)
1515{
1516 struct idxd_device *idxd =
1517 container_of(dev, struct idxd_device, conf_dev);
1518
1519 return sprintf(buf, "%u\n", idxd->token_limit);
1520}
1521
1522static ssize_t token_limit_store(struct device *dev,
1523 struct device_attribute *attr,
1524 const char *buf, size_t count)
1525{
1526 struct idxd_device *idxd =
1527 container_of(dev, struct idxd_device, conf_dev);
1528 unsigned long val;
1529 int rc;
1530
1531 rc = kstrtoul(buf, 10, &val);
1532 if (rc < 0)
1533 return -EINVAL;
1534
1535 if (idxd->state == IDXD_DEV_ENABLED)
1536 return -EPERM;
1537
1538 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1539 return -EPERM;
1540
1541 if (!idxd->hw.group_cap.token_limit)
1542 return -EPERM;
1543
1544 if (val > idxd->hw.group_cap.total_tokens)
1545 return -EINVAL;
1546
1547 idxd->token_limit = val;
1548 return count;
1549}
1550static DEVICE_ATTR_RW(token_limit);
1551
1552static ssize_t cdev_major_show(struct device *dev,
1553 struct device_attribute *attr, char *buf)
1554{
1555 struct idxd_device *idxd =
1556 container_of(dev, struct idxd_device, conf_dev);
1557
1558 return sprintf(buf, "%u\n", idxd->major);
1559}
1560static DEVICE_ATTR_RO(cdev_major);
1561
1562static ssize_t cmd_status_show(struct device *dev,
1563 struct device_attribute *attr, char *buf)
1564{
1565 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1566
1567 return sprintf(buf, "%#x\n", idxd->cmd_status);
1568}
1569static DEVICE_ATTR_RO(cmd_status);
1570
1571static struct attribute *idxd_device_attributes[] = {
1572 &dev_attr_version.attr,
1573 &dev_attr_max_groups.attr,
1574 &dev_attr_max_work_queues.attr,
1575 &dev_attr_max_work_queues_size.attr,
1576 &dev_attr_max_engines.attr,
1577 &dev_attr_numa_node.attr,
1578 &dev_attr_max_batch_size.attr,
1579 &dev_attr_max_transfer_size.attr,
1580 &dev_attr_op_cap.attr,
1581 &dev_attr_gen_cap.attr,
1582 &dev_attr_configurable.attr,
1583 &dev_attr_clients.attr,
1584 &dev_attr_pasid_enabled.attr,
1585 &dev_attr_state.attr,
1586 &dev_attr_errors.attr,
1587 &dev_attr_max_tokens.attr,
1588 &dev_attr_token_limit.attr,
1589 &dev_attr_cdev_major.attr,
1590 &dev_attr_cmd_status.attr,
1591 NULL,
1592};
1593
1594static const struct attribute_group idxd_device_attribute_group = {
1595 .attrs = idxd_device_attributes,
1596};
1597
1598static const struct attribute_group *idxd_attribute_groups[] = {
1599 &idxd_device_attribute_group,
1600 NULL,
1601};
1602
1603static void idxd_conf_device_release(struct device *dev)
1604{
1605 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1606
1607 kfree(idxd->groups);
1608 kfree(idxd->wqs);
1609 kfree(idxd->engines);
1610 kfree(idxd->irq_entries);
1611 ida_free(&idxd_ida, idxd->id);
1612 kfree(idxd);
1613}
1614
1615struct device_type dsa_device_type = {
1616 .name = "dsa",
1617 .release = idxd_conf_device_release,
1618 .groups = idxd_attribute_groups,
1619};
1620
1621struct device_type iax_device_type = {
1622 .name = "iax",
1623 .release = idxd_conf_device_release,
1624 .groups = idxd_attribute_groups,
1625};
1626
1627static int idxd_register_engine_devices(struct idxd_device *idxd)
1628{
1629 int i, j, rc;
1630
1631 for (i = 0; i < idxd->max_engines; i++) {
1632 struct idxd_engine *engine = idxd->engines[i];
1633
1634 rc = device_add(&engine->conf_dev);
1635 if (rc < 0)
1636 goto cleanup;
1637 }
1638
1639 return 0;
1640
1641cleanup:
1642 j = i - 1;
1643 for (; i < idxd->max_engines; i++)
1644 put_device(&idxd->engines[i]->conf_dev);
1645
1646 while (j--)
1647 device_unregister(&idxd->engines[j]->conf_dev);
1648 return rc;
1649}
1650
1651static int idxd_register_group_devices(struct idxd_device *idxd)
1652{
1653 int i, j, rc;
1654
1655 for (i = 0; i < idxd->max_groups; i++) {
1656 struct idxd_group *group = idxd->groups[i];
1657
1658 rc = device_add(&group->conf_dev);
1659 if (rc < 0)
1660 goto cleanup;
1661 }
1662
1663 return 0;
1664
1665cleanup:
1666 j = i - 1;
1667 for (; i < idxd->max_groups; i++)
1668 put_device(&idxd->groups[i]->conf_dev);
1669
1670 while (j--)
1671 device_unregister(&idxd->groups[j]->conf_dev);
1672 return rc;
1673}
1674
1675static int idxd_register_wq_devices(struct idxd_device *idxd)
1676{
1677 int i, rc, j;
1678
1679 for (i = 0; i < idxd->max_wqs; i++) {
1680 struct idxd_wq *wq = idxd->wqs[i];
1681
1682 rc = device_add(&wq->conf_dev);
1683 if (rc < 0)
1684 goto cleanup;
1685 }
1686
1687 return 0;
1688
1689cleanup:
1690 j = i - 1;
1691 for (; i < idxd->max_wqs; i++)
1692 put_device(&idxd->wqs[i]->conf_dev);
1693
1694 while (j--)
1695 device_unregister(&idxd->wqs[j]->conf_dev);
1696 return rc;
1697}
1698
1699int idxd_register_devices(struct idxd_device *idxd)
1700{
1701 struct device *dev = &idxd->pdev->dev;
1702 int rc, i;
1703
1704 rc = device_add(&idxd->conf_dev);
1705 if (rc < 0)
1706 return rc;
1707
1708 rc = idxd_register_wq_devices(idxd);
1709 if (rc < 0) {
1710 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1711 goto err_wq;
1712 }
1713
1714 rc = idxd_register_engine_devices(idxd);
1715 if (rc < 0) {
1716 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1717 goto err_engine;
1718 }
1719
1720 rc = idxd_register_group_devices(idxd);
1721 if (rc < 0) {
1722 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1723 goto err_group;
1724 }
1725
1726 return 0;
1727
1728 err_group:
1729 for (i = 0; i < idxd->max_engines; i++)
1730 device_unregister(&idxd->engines[i]->conf_dev);
1731 err_engine:
1732 for (i = 0; i < idxd->max_wqs; i++)
1733 device_unregister(&idxd->wqs[i]->conf_dev);
1734 err_wq:
1735 device_del(&idxd->conf_dev);
1736 return rc;
1737}
1738
1739void idxd_unregister_devices(struct idxd_device *idxd)
1740{
1741 int i;
1742
1743 for (i = 0; i < idxd->max_wqs; i++) {
1744 struct idxd_wq *wq = idxd->wqs[i];
1745
1746 device_unregister(&wq->conf_dev);
1747 }
1748
1749 for (i = 0; i < idxd->max_engines; i++) {
1750 struct idxd_engine *engine = idxd->engines[i];
1751
1752 device_unregister(&engine->conf_dev);
1753 }
1754
1755 for (i = 0; i < idxd->max_groups; i++) {
1756 struct idxd_group *group = idxd->groups[i];
1757
1758 device_unregister(&group->conf_dev);
1759 }
1760
1761 device_unregister(&idxd->conf_dev);
1762}
1763
1764int idxd_register_bus_type(void)
1765{
1766 return bus_register(&dsa_bus_type);
1767}
1768
1769void idxd_unregister_bus_type(void)
1770{
1771 bus_unregister(&dsa_bus_type);
1772}
1773