1
2
3
4
5#include "ifpga_feature_dev.h"
6
7static u64
8pr_err_handle(struct feature_fme_pr *fme_pr)
9{
10 struct feature_fme_pr_status fme_pr_status;
11 unsigned long err_code;
12 u64 fme_pr_error;
13 int i;
14
15 fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
16 if (!fme_pr_status.pr_status)
17 return 0;
18
19 err_code = readq(&fme_pr->ccip_fme_pr_err);
20 fme_pr_error = err_code;
21
22 for (i = 0; i < PR_MAX_ERR_NUM; i++) {
23 if (err_code & (1 << i))
24 dev_info(NULL, "%s\n", pr_err_msg[i]);
25 }
26
27 writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
28 return fme_pr_error;
29}
30
31static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
32 struct fpga_pr_info *info)
33{
34 struct feature_fme_pr *fme_pr;
35 struct feature_fme_pr_ctl fme_pr_ctl;
36 struct feature_fme_pr_status fme_pr_status;
37
38 fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
39 FME_FEATURE_ID_PR_MGMT);
40 if (!fme_pr)
41 return -EINVAL;
42
43 if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
44 return -EINVAL;
45
46 dev_info(fme_dev, "resetting PR before initiated PR\n");
47
48 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
49 fme_pr_ctl.pr_reset = 1;
50 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
51
52 fme_pr_ctl.pr_reset_ack = 1;
53
54 if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
55 &fme_pr->ccip_fme_pr_control,
56 PR_WAIT_TIMEOUT, 1)) {
57 dev_err(fme_dev, "maximum PR timeout\n");
58 return -ETIMEDOUT;
59 }
60
61 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
62 fme_pr_ctl.pr_reset = 0;
63 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
64
65 dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
66
67 fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
68
69 if (fpga_wait_register_field(pr_host_status, fme_pr_status,
70 &fme_pr->ccip_fme_pr_status,
71 PR_WAIT_TIMEOUT, 1)) {
72 dev_err(fme_dev, "maximum PR timeout\n");
73 return -ETIMEDOUT;
74 }
75
76 dev_info(fme_dev, "check if have any previous PR error\n");
77 pr_err_handle(fme_pr);
78 return 0;
79}
80
81static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
82 int port_id, const char *buf, size_t count,
83 struct fpga_pr_info *info)
84{
85 struct feature_fme_pr *fme_pr;
86 struct feature_fme_pr_ctl fme_pr_ctl;
87 struct feature_fme_pr_status fme_pr_status;
88 struct feature_fme_pr_data fme_pr_data;
89 int delay, pr_credit;
90 int ret = 0;
91
92 fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
93 FME_FEATURE_ID_PR_MGMT);
94 if (!fme_pr)
95 return -EINVAL;
96
97 dev_info(fme_dev, "set PR port ID and start request\n");
98
99 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
100 fme_pr_ctl.pr_regionid = port_id;
101 fme_pr_ctl.pr_start_req = 1;
102 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
103
104 dev_info(fme_dev, "pushing data from bitstream to HW\n");
105
106 fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
107 pr_credit = fme_pr_status.pr_credit;
108
109 while (count > 0) {
110 delay = 0;
111 while (pr_credit <= 1) {
112 if (delay++ > PR_WAIT_TIMEOUT) {
113 dev_err(fme_dev, "maximum try\n");
114
115 info->pr_err = pr_err_handle(fme_pr);
116 return info->pr_err ? -EIO : -ETIMEDOUT;
117 }
118 udelay(1);
119
120 fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
121 pr_credit = fme_pr_status.pr_credit;
122 };
123
124 if (count >= fme_dev->pr_bandwidth) {
125 switch (fme_dev->pr_bandwidth) {
126 case 4:
127 fme_pr_data.rsvd = 0;
128 fme_pr_data.pr_data_raw = *((const u32 *)buf);
129 writeq(fme_pr_data.csr,
130 &fme_pr->ccip_fme_pr_data);
131 break;
132 default:
133 ret = -EFAULT;
134 goto done;
135 }
136
137 buf += fme_dev->pr_bandwidth;
138 count -= fme_dev->pr_bandwidth;
139 pr_credit--;
140 } else {
141 WARN_ON(1);
142 ret = -EINVAL;
143 goto done;
144 }
145 }
146
147done:
148 return ret;
149}
150
151static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
152 struct fpga_pr_info *info)
153{
154 struct feature_fme_pr *fme_pr;
155 struct feature_fme_pr_ctl fme_pr_ctl;
156
157 fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
158 FME_FEATURE_ID_PR_MGMT);
159
160 fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
161 fme_pr_ctl.pr_push_complete = 1;
162 writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
163
164 dev_info(fme_dev, "green bitstream push complete\n");
165 dev_info(fme_dev, "waiting for HW to release PR resource\n");
166
167 fme_pr_ctl.pr_start_req = 0;
168
169 if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
170 &fme_pr->ccip_fme_pr_control,
171 PR_WAIT_TIMEOUT, 1)) {
172 printf("maximum try.\n");
173 return -ETIMEDOUT;
174 }
175
176 dev_info(fme_dev, "PR operation complete, checking status\n");
177 info->pr_err = pr_err_handle(fme_pr);
178 if (info->pr_err)
179 return -EIO;
180
181 dev_info(fme_dev, "PR done successfully\n");
182 return 0;
183}
184
185static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
186 struct fpga_pr_info *info, const char *buf,
187 size_t count)
188{
189 int ret;
190
191 info->state = FPGA_PR_STATE_WRITE_INIT;
192 ret = fme_pr_write_init(fme_dev, info);
193 if (ret) {
194 dev_err(fme_dev, "Error preparing FPGA for writing\n");
195 info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
196 return ret;
197 }
198
199
200
201
202 info->state = FPGA_PR_STATE_WRITE;
203 ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
204 if (ret) {
205 dev_err(fme_dev, "Error while writing image data to FPGA\n");
206 info->state = FPGA_PR_STATE_WRITE_ERR;
207 return ret;
208 }
209
210
211
212
213
214 info->state = FPGA_PR_STATE_WRITE_COMPLETE;
215 ret = fme_pr_write_complete(fme_dev, info);
216 if (ret) {
217 dev_err(fme_dev, "Error after writing image data to FPGA\n");
218 info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
219 return ret;
220 }
221 info->state = FPGA_PR_STATE_DONE;
222
223 return 0;
224}
225
226static int fme_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer,
227 u32 size, u64 *status)
228{
229 struct feature_fme_header *fme_hdr;
230 struct feature_fme_capability fme_capability;
231 struct ifpga_fme_hw *fme = &hw->fme;
232 struct fpga_pr_info info;
233 struct ifpga_port_hw *port;
234 int ret = 0;
235
236 if (!buffer || size == 0)
237 return -EINVAL;
238 if (fme->state != IFPGA_FME_IMPLEMENTED)
239 return -EINVAL;
240
241
242
243
244
245 size = IFPGA_ALIGN(size, fme->pr_bandwidth);
246
247
248 fme_hdr = get_fme_feature_ioaddr_by_index(fme,
249 FME_FEATURE_ID_HEADER);
250 if (!fme_hdr)
251 return -EINVAL;
252
253
254 fme_capability.csr = readq(&fme_hdr->capability);
255 if (port_id >= fme_capability.num_ports) {
256 dev_err(fme, "port number more than maximum\n");
257 return -EINVAL;
258 }
259
260 opae_memset(&info, 0, sizeof(struct fpga_pr_info));
261 info.flags = FPGA_MGR_PARTIAL_RECONFIG;
262 info.port_id = port_id;
263
264 spinlock_lock(&fme->lock);
265
266
267 port = &hw->port[port_id];
268
269
270 fpga_port_disable(port);
271
272 ret = fpga_pr_buf_load(fme, &info, buffer, size);
273
274 *status = info.pr_err;
275
276
277 fpga_port_enable(port);
278 spinlock_unlock(&fme->lock);
279
280 return ret;
281}
282
283int do_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer,
284 u32 size, u64 *status)
285{
286 const struct bts_header *bts_hdr;
287 const char *buf;
288 struct ifpga_port_hw *port;
289 int ret;
290 u32 header_size;
291
292 if (!buffer || size == 0) {
293 dev_err(hw, "invalid parameter\n");
294 return -EINVAL;
295 }
296
297 bts_hdr = (const struct bts_header *)buffer;
298
299 if (is_valid_bts(bts_hdr)) {
300 dev_info(hw, "this is a valid bitsteam..\n");
301 header_size = sizeof(struct bts_header) +
302 bts_hdr->metadata_len;
303 if (size < header_size)
304 return -EINVAL;
305 size -= header_size;
306 buf = buffer + header_size;
307 } else {
308 dev_err(hw, "this is an invalid bitstream..\n");
309 return -EINVAL;
310 }
311
312
313 port = &hw->port[port_id];
314 ret = port_clear_error(port);
315 if (ret) {
316 dev_err(hw, "port cannot clear error\n");
317 return -EINVAL;
318 }
319
320 return fme_pr(hw, port_id, buf, size, status);
321}
322
323static int fme_pr_mgmt_init(struct ifpga_feature *feature)
324{
325 struct feature_fme_pr *fme_pr;
326 struct feature_header fme_pr_header;
327 struct ifpga_fme_hw *fme;
328
329 dev_info(NULL, "FME PR MGMT Init.\n");
330
331 fme = (struct ifpga_fme_hw *)feature->parent;
332
333 fme_pr = (struct feature_fme_pr *)feature->addr;
334
335 fme_pr_header.csr = readq(&fme_pr->header);
336 if (fme_pr_header.revision == 2) {
337 dev_info(NULL, "using 512-bit PR\n");
338 fme->pr_bandwidth = 64;
339 } else {
340 dev_info(NULL, "using 32-bit PR\n");
341 fme->pr_bandwidth = 4;
342 }
343
344 return 0;
345}
346
347static void fme_pr_mgmt_uinit(struct ifpga_feature *feature)
348{
349 UNUSED(feature);
350
351 dev_info(NULL, "FME PR MGMT UInit.\n");
352}
353
354struct ifpga_feature_ops fme_pr_mgmt_ops = {
355 .init = fme_pr_mgmt_init,
356 .uinit = fme_pr_mgmt_uinit,
357};
358