1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/fpga-dfl.h>
18#include <linux/uaccess.h>
19
20#include "dfl-afu.h"
21
22#define PORT_ERROR_MASK 0x8
23#define PORT_ERROR 0x10
24#define PORT_FIRST_ERROR 0x18
25#define PORT_MALFORMED_REQ0 0x20
26#define PORT_MALFORMED_REQ1 0x28
27
28#define ERROR_MASK GENMASK_ULL(63, 0)
29
30
31static void __afu_port_err_mask(struct device *dev, bool mask)
32{
33 void __iomem *base;
34
35 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
36
37 writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
38}
39
40static void afu_port_err_mask(struct device *dev, bool mask)
41{
42 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
43
44 mutex_lock(&pdata->lock);
45 __afu_port_err_mask(dev, mask);
46 mutex_unlock(&pdata->lock);
47}
48
49
50static int afu_port_err_clear(struct device *dev, u64 err)
51{
52 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
53 struct platform_device *pdev = to_platform_device(dev);
54 void __iomem *base_err, *base_hdr;
55 int enable_ret = 0, ret = -EBUSY;
56 u64 v;
57
58 base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
59 base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
60
61 mutex_lock(&pdata->lock);
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76 v = readq(base_hdr + PORT_HDR_STS);
77 if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
78 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
79 goto done;
80 }
81
82
83 ret = __afu_port_disable(pdev);
84 if (ret)
85 goto done;
86
87
88 __afu_port_err_mask(dev, true);
89
90
91 v = readq(base_err + PORT_ERROR);
92
93 if (v == err) {
94 writeq(v, base_err + PORT_ERROR);
95
96 v = readq(base_err + PORT_FIRST_ERROR);
97 writeq(v, base_err + PORT_FIRST_ERROR);
98 } else {
99 dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
100 __func__, v, err);
101 ret = -EINVAL;
102 }
103
104
105 __afu_port_err_mask(dev, false);
106
107
108 enable_ret = __afu_port_enable(pdev);
109
110done:
111 mutex_unlock(&pdata->lock);
112 return enable_ret ? enable_ret : ret;
113}
114
115static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
116 char *buf)
117{
118 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
119 void __iomem *base;
120 u64 error;
121
122 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
123
124 mutex_lock(&pdata->lock);
125 error = readq(base + PORT_ERROR);
126 mutex_unlock(&pdata->lock);
127
128 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
129}
130
131static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
132 const char *buff, size_t count)
133{
134 u64 value;
135 int ret;
136
137 if (kstrtou64(buff, 0, &value))
138 return -EINVAL;
139
140 ret = afu_port_err_clear(dev, value);
141
142 return ret ? ret : count;
143}
144static DEVICE_ATTR_RW(errors);
145
146static ssize_t first_error_show(struct device *dev,
147 struct device_attribute *attr, char *buf)
148{
149 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
150 void __iomem *base;
151 u64 error;
152
153 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
154
155 mutex_lock(&pdata->lock);
156 error = readq(base + PORT_FIRST_ERROR);
157 mutex_unlock(&pdata->lock);
158
159 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
160}
161static DEVICE_ATTR_RO(first_error);
162
163static ssize_t first_malformed_req_show(struct device *dev,
164 struct device_attribute *attr,
165 char *buf)
166{
167 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
168 void __iomem *base;
169 u64 req0, req1;
170
171 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
172
173 mutex_lock(&pdata->lock);
174 req0 = readq(base + PORT_MALFORMED_REQ0);
175 req1 = readq(base + PORT_MALFORMED_REQ1);
176 mutex_unlock(&pdata->lock);
177
178 return sprintf(buf, "0x%016llx%016llx\n",
179 (unsigned long long)req1, (unsigned long long)req0);
180}
181static DEVICE_ATTR_RO(first_malformed_req);
182
183static struct attribute *port_err_attrs[] = {
184 &dev_attr_errors.attr,
185 &dev_attr_first_error.attr,
186 &dev_attr_first_malformed_req.attr,
187 NULL,
188};
189
190static umode_t port_err_attrs_visible(struct kobject *kobj,
191 struct attribute *attr, int n)
192{
193 struct device *dev = kobj_to_dev(kobj);
194
195
196
197
198
199 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
200 return 0;
201
202 return attr->mode;
203}
204
205const struct attribute_group port_err_group = {
206 .name = "errors",
207 .attrs = port_err_attrs,
208 .is_visible = port_err_attrs_visible,
209};
210
211static int port_err_init(struct platform_device *pdev,
212 struct dfl_feature *feature)
213{
214 afu_port_err_mask(&pdev->dev, false);
215
216 return 0;
217}
218
219static void port_err_uinit(struct platform_device *pdev,
220 struct dfl_feature *feature)
221{
222 afu_port_err_mask(&pdev->dev, true);
223}
224
225static long
226port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
227 unsigned int cmd, unsigned long arg)
228{
229 switch (cmd) {
230 case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
231 return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
232 case DFL_FPGA_PORT_ERR_SET_IRQ:
233 return dfl_feature_ioctl_set_irq(pdev, feature, arg);
234 default:
235 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
236 return -ENODEV;
237 }
238}
239
240const struct dfl_feature_id port_err_id_table[] = {
241 {.id = PORT_FEATURE_ID_ERROR,},
242 {0,}
243};
244
245const struct dfl_feature_ops port_err_ops = {
246 .init = port_err_init,
247 .uinit = port_err_uinit,
248 .ioctl = port_err_ioctl,
249};
250