1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/dmaengine.h>
22#include <linux/pci.h>
23#include "dma.h"
24#include "registers.h"
25#include "hw.h"
26
27#include "../dmaengine.h"
28
29static ssize_t cap_show(struct dma_chan *c, char *page)
30{
31 struct dma_device *dma = c->device;
32
33 return sprintf(page, "copy%s%s%s%s%s\n",
34 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
35 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
36 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
37 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
38 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
39
40}
41struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
42
43static ssize_t version_show(struct dma_chan *c, char *page)
44{
45 struct dma_device *dma = c->device;
46 struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
47
48 return sprintf(page, "%d.%d\n",
49 ioat_dma->version >> 4, ioat_dma->version & 0xf);
50}
51struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
52
53static ssize_t
54ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
55{
56 struct ioat_sysfs_entry *entry;
57 struct ioatdma_chan *ioat_chan;
58
59 entry = container_of(attr, struct ioat_sysfs_entry, attr);
60 ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
61
62 if (!entry->show)
63 return -EIO;
64 return entry->show(&ioat_chan->dma_chan, page);
65}
66
67const struct sysfs_ops ioat_sysfs_ops = {
68 .show = ioat_attr_show,
69};
70
71void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
72{
73 struct dma_device *dma = &ioat_dma->dma_dev;
74 struct dma_chan *c;
75
76 list_for_each_entry(c, &dma->channels, device_node) {
77 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
78 struct kobject *parent = &c->dev->device.kobj;
79 int err;
80
81 err = kobject_init_and_add(&ioat_chan->kobj, type,
82 parent, "quickdata");
83 if (err) {
84 dev_warn(to_dev(ioat_chan),
85 "sysfs init error (%d), continuing...\n", err);
86 kobject_put(&ioat_chan->kobj);
87 set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
88 }
89 }
90}
91
92void ioat_kobject_del(struct ioatdma_device *ioat_dma)
93{
94 struct dma_device *dma = &ioat_dma->dma_dev;
95 struct dma_chan *c;
96
97 list_for_each_entry(c, &dma->channels, device_node) {
98 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
99
100 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
101 kobject_del(&ioat_chan->kobj);
102 kobject_put(&ioat_chan->kobj);
103 }
104 }
105}
106
107static ssize_t ring_size_show(struct dma_chan *c, char *page)
108{
109 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
110
111 return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
112}
113static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
114
115static ssize_t ring_active_show(struct dma_chan *c, char *page)
116{
117 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
118
119
120 return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
121}
122static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
123
124static struct attribute *ioat_attrs[] = {
125 &ring_size_attr.attr,
126 &ring_active_attr.attr,
127 &ioat_cap_attr.attr,
128 &ioat_version_attr.attr,
129 NULL,
130};
131
132struct kobj_type ioat_ktype = {
133 .sysfs_ops = &ioat_sysfs_ops,
134 .default_attrs = ioat_attrs,
135};
136