1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/cdev.h>
26#include <linux/device.h>
27#include <linux/fs.h>
28#include <linux/io.h>
29#include <linux/interrupt.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/platform_device.h>
35#include <linux/poll.h>
36#include <linux/slab.h>
37#include <linux/uaccess.h>
38
39#include <uapi/misc/xilinx_sdfec.h>
40
41#define DRIVER_NAME "xilinx_sdfec"
42#define DRIVER_VERSION "0.3"
43#define DRIVER_MAX_DEV BIT(MINORBITS)
44
45static struct class *xsdfec_class;
46static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
47static dev_t xsdfec_devt;
48
49
50#define XSDFEC_AXI_WR_PROTECT_ADDR (0x00000)
51#define XSDFEC_CODE_WR_PROTECT_ADDR (0x00004)
52#define XSDFEC_ACTIVE_ADDR (0x00008)
53#define XSDFEC_AXIS_WIDTH_ADDR (0x0000c)
54#define XSDFEC_AXIS_ENABLE_ADDR (0x00010)
55#define XSDFEC_AXIS_ENABLE_MASK (0x0003F)
56#define XSDFEC_FEC_CODE_ADDR (0x00014)
57#define XSDFEC_ORDER_ADDR (0x00018)
58
59
60#define XSDFEC_ISR_MASK (0x0003F)
61
62#define XSDFEC_ISR_ADDR (0x0001c)
63
64#define XSDFEC_IER_ADDR (0x00020)
65
66#define XSDFEC_IDR_ADDR (0x00024)
67
68#define XSDFEC_IMR_ADDR (0x00028)
69
70
71#define XSDFEC_ECC_ISR_SBE (0x7FF)
72
73#define XSDFEC_ECC_ISR_MBE (0x3FF800)
74
75#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE | XSDFEC_ECC_ISR_MBE)
76
77
78#define XSDFEC_ECC_MULTI_BIT_POS (11)
79#define XSDFEC_ERROR_MAX_THRESHOLD (100)
80
81
82#define XSDFEC_ECC_ISR_ADDR (0x0002c)
83
84#define XSDFEC_ECC_IER_ADDR (0x00030)
85
86#define XSDFEC_ECC_IDR_ADDR (0x00034)
87
88#define XSDFEC_ECC_IMR_ADDR (0x00038)
89
90#define XSDFEC_BYPASS_ADDR (0x0003c)
91#define XSDFEC_TEST_EMA_ADDR_BASE (0x00080)
92#define XSDFEC_TEST_EMA_ADDR_HIGH (0x00089)
93#define XSDFEC_TURBO_ADDR (0x00100)
94#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x02000)
95#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x021fc)
96#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x02004)
97#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x02200)
98#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x02008)
99#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x02204)
100#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x0200c)
101#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x02208)
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct xsdfec_dev {
122 void __iomem *regs;
123 struct device *dev;
124 enum xsdfec_state state;
125 struct xsdfec_config config;
126 bool intr_enabled;
127 bool wr_protect;
128 atomic_t isr_err_count;
129 atomic_t cecc_count;
130 atomic_t uecc_count;
131 atomic_t open_count;
132 int irq;
133 struct cdev xsdfec_cdev;
134 wait_queue_head_t waitq;
135};
136
137static inline void
138xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, u32 value)
139{
140 if (xsdfec->wr_protect) {
141 dev_err(xsdfec->dev, "SDFEC in write protect");
142 return;
143 }
144
145 dev_dbg(xsdfec->dev,
146 "Writing 0x%x to offset 0x%x", value, addr);
147 iowrite32(value, xsdfec->regs + addr);
148}
149
150static inline u32
151xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
152{
153 u32 rval;
154
155 rval = ioread32(xsdfec->regs + addr);
156 dev_dbg(xsdfec->dev,
157 "Read value = 0x%x from offset 0x%x",
158 rval, addr);
159 return rval;
160}
161
162#define XSDFEC_WRITE_PROTECT_ENABLE (1)
163#define XSDFEC_WRITE_PROTECT_DISABLE (0)
164static void
165xsdfec_wr_protect(struct xsdfec_dev *xsdfec, bool wr_pr)
166{
167 if (wr_pr) {
168 xsdfec_regwrite(xsdfec,
169 XSDFEC_CODE_WR_PROTECT_ADDR,
170 XSDFEC_WRITE_PROTECT_ENABLE);
171 xsdfec_regwrite(xsdfec,
172 XSDFEC_AXI_WR_PROTECT_ADDR,
173 XSDFEC_WRITE_PROTECT_ENABLE);
174
175 xsdfec->wr_protect = wr_pr;
176 } else {
177
178 xsdfec->wr_protect = wr_pr;
179 xsdfec_regwrite(xsdfec,
180 XSDFEC_AXI_WR_PROTECT_ADDR,
181 XSDFEC_WRITE_PROTECT_DISABLE);
182 xsdfec_regwrite(xsdfec,
183 XSDFEC_CODE_WR_PROTECT_ADDR,
184 XSDFEC_WRITE_PROTECT_DISABLE);
185 }
186}
187
188static int
189xsdfec_dev_open(struct inode *iptr, struct file *fptr)
190{
191 struct xsdfec_dev *xsdfec;
192
193 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
194 if (!xsdfec)
195 return -EAGAIN;
196
197
198 if (!atomic_dec_and_test(&xsdfec->open_count)) {
199 atomic_inc(&xsdfec->open_count);
200 return -EBUSY;
201 }
202
203 fptr->private_data = xsdfec;
204 return 0;
205}
206
207static int
208xsdfec_dev_release(struct inode *iptr, struct file *fptr)
209{
210 struct xsdfec_dev *xsdfec;
211
212 xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
213 if (!xsdfec)
214 return -EAGAIN;
215
216 atomic_inc(&xsdfec->open_count);
217 return 0;
218}
219
220#define XSDFEC_IS_ACTIVITY_SET (0x1)
221static int
222xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
223{
224 struct xsdfec_status status;
225 int err = 0;
226
227 status.fec_id = xsdfec->config.fec_id;
228 status.state = xsdfec->state;
229 status.activity =
230 (xsdfec_regread(xsdfec,
231 XSDFEC_ACTIVE_ADDR) &
232 XSDFEC_IS_ACTIVITY_SET);
233
234 err = copy_to_user(arg, &status, sizeof(status));
235 if (err) {
236 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
237 __func__, xsdfec->config.fec_id);
238 err = -EFAULT;
239 }
240 return err;
241}
242
243static int
244xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
245{
246 int err = 0;
247
248 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
249 if (err) {
250 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
251 __func__, xsdfec->config.fec_id);
252 err = -EFAULT;
253 }
254 return err;
255}
256
257static int
258xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
259{
260 u32 mask_read;
261
262 if (enable) {
263
264 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR,
265 XSDFEC_ISR_MASK);
266 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
267 if (mask_read & XSDFEC_ISR_MASK) {
268 dev_err(xsdfec->dev,
269 "SDFEC enabling irq with IER failed");
270 return -EIO;
271 }
272 } else {
273
274 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR,
275 XSDFEC_ISR_MASK);
276 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
277 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
278 dev_err(xsdfec->dev,
279 "SDFEC disabling irq with IDR failed");
280 return -EIO;
281 }
282 }
283 return 0;
284}
285
286static int
287xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
288{
289 u32 mask_read;
290
291 if (enable) {
292
293 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
294 XSDFEC_ECC_ISR_MASK);
295 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
296 if (mask_read & XSDFEC_ECC_ISR_MASK) {
297 dev_err(xsdfec->dev,
298 "SDFEC enabling ECC irq with ECC IER failed");
299 return -EIO;
300 }
301 } else {
302
303 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
304 XSDFEC_ECC_ISR_MASK);
305 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
306 if ((mask_read & XSDFEC_ECC_ISR_MASK) != XSDFEC_ECC_ISR_MASK) {
307 dev_err(xsdfec->dev,
308 "SDFEC disable ECC irq with ECC IDR failed");
309 return -EIO;
310 }
311 }
312 return 0;
313}
314
315static int
316xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
317{
318 struct xsdfec_irq irq;
319 int err = 0;
320
321 err = copy_from_user(&irq, arg, sizeof(irq));
322 if (err) {
323 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
324 __func__, xsdfec->config.fec_id);
325 return -EFAULT;
326 }
327
328
329 if (irq.enable_isr) {
330 err = xsdfec_isr_enable(xsdfec, true);
331 if (err < 0)
332 return err;
333 }
334
335
336 if (irq.enable_ecc_isr) {
337 err = xsdfec_ecc_isr_enable(xsdfec, true);
338 if (err < 0)
339 return err;
340 }
341
342 return 0;
343}
344
345#define XSDFEC_TURBO_SCALE_MASK (0xF)
346#define XSDFEC_TURBO_SCALE_BIT_POS (8)
347static int
348xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
349{
350 struct xsdfec_turbo turbo;
351 int err = 0;
352 u32 turbo_write = 0;
353
354 err = copy_from_user(&turbo, arg, sizeof(turbo));
355 if (err) {
356 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
357 __func__, xsdfec->config.fec_id);
358 return -EFAULT;
359 }
360
361
362 if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
363 dev_err(xsdfec->dev,
364 "%s: Unable to write Turbo to SDFEC%d check DT",
365 __func__, xsdfec->config.fec_id);
366 return -EIO;
367 } else if (xsdfec->config.code == XSDFEC_CODE_INVALID) {
368 xsdfec->config.code = XSDFEC_TURBO_CODE;
369 }
370
371 if (xsdfec->wr_protect)
372 xsdfec_wr_protect(xsdfec, false);
373
374 turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) <<
375 XSDFEC_TURBO_SCALE_BIT_POS) | turbo.alg;
376 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
377 return err;
378}
379
380static int
381xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
382{
383 u32 reg_value;
384 struct xsdfec_turbo turbo_params;
385 int err;
386
387 if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
388 dev_err(xsdfec->dev,
389 "%s: SDFEC%d is configured for LDPC, check DT",
390 __func__, xsdfec->config.fec_id);
391 return -EIO;
392 }
393
394 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
395
396 turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
397 XSDFEC_TURBO_SCALE_BIT_POS;
398 turbo_params.alg = reg_value & 0x1;
399
400 err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
401 if (err) {
402 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
403 __func__, xsdfec->config.fec_id);
404 err = -EFAULT;
405 }
406
407 return err;
408}
409
410#define XSDFEC_LDPC_REG_JUMP (0x10)
411#define XSDFEC_REG0_N_MASK (0x0000FFFF)
412#define XSDFEC_REG0_N_LSB (0)
413#define XSDFEC_REG0_K_MASK (0x7fff0000)
414#define XSDFEC_REG0_K_LSB (16)
415static int
416xsdfec_reg0_write(struct xsdfec_dev *xsdfec,
417 u32 n, u32 k, u32 offset)
418{
419 u32 wdata;
420
421
422 if (n & ~XSDFEC_REG0_N_MASK)
423 dev_err(xsdfec->dev, "N value is beyond 16 bits");
424 n &= XSDFEC_REG0_N_MASK;
425 n <<= XSDFEC_REG0_N_LSB;
426
427 if (k & XSDFEC_REG0_K_MASK)
428 dev_err(xsdfec->dev, "K value is beyond 16 bits");
429
430 k = ((k << XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK);
431 wdata = k | n;
432
433 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
434 > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
435 dev_err(xsdfec->dev,
436 "Writing outside of LDPC reg0 space 0x%x",
437 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
438 (offset * XSDFEC_LDPC_REG_JUMP));
439 return -EINVAL;
440 }
441 xsdfec_regwrite(xsdfec,
442 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
443 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
444 return 0;
445}
446
447static int
448xsdfec_collect_ldpc_reg0(struct xsdfec_dev *xsdfec,
449 u32 code_id,
450 struct xsdfec_ldpc_params *ldpc_params)
451{
452 u32 reg_value;
453 u32 reg_addr = XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
454 (code_id * XSDFEC_LDPC_REG_JUMP);
455
456 if (reg_addr > XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
457 dev_err(xsdfec->dev,
458 "Accessing outside of LDPC reg0 for code_id %d",
459 code_id);
460 return -EINVAL;
461 }
462
463 reg_value = xsdfec_regread(xsdfec, reg_addr);
464
465 ldpc_params->n = (reg_value >> XSDFEC_REG0_N_LSB) & XSDFEC_REG0_N_MASK;
466
467 ldpc_params->k = (reg_value >> XSDFEC_REG0_K_LSB) & XSDFEC_REG0_K_MASK;
468
469 return 0;
470}
471
472#define XSDFEC_REG1_PSIZE_MASK (0x000001ff)
473#define XSDFEC_REG1_NO_PACKING_MASK (0x00000400)
474#define XSDFEC_REG1_NO_PACKING_LSB (10)
475#define XSDFEC_REG1_NM_MASK (0x000ff800)
476#define XSDFEC_REG1_NM_LSB (11)
477#define XSDFEC_REG1_BYPASS_MASK (0x00100000)
478static int
479xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
480 u32 no_packing, u32 nm, u32 offset)
481{
482 u32 wdata;
483
484 if (psize & ~XSDFEC_REG1_PSIZE_MASK)
485 dev_err(xsdfec->dev, "Psize is beyond 10 bits");
486 psize &= XSDFEC_REG1_PSIZE_MASK;
487
488 if (no_packing != 0 && no_packing != 1)
489 dev_err(xsdfec->dev, "No-packing bit register invalid");
490 no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
491 XSDFEC_REG1_NO_PACKING_MASK);
492
493 if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
494 dev_err(xsdfec->dev, "NM is beyond 10 bits");
495 nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
496
497 wdata = nm | no_packing | psize;
498 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
499 > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
500 dev_err(xsdfec->dev,
501 "Writing outside of LDPC reg1 space 0x%x",
502 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
503 (offset * XSDFEC_LDPC_REG_JUMP));
504 return -EINVAL;
505 }
506 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
507 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
508 return 0;
509}
510
511static int
512xsdfec_collect_ldpc_reg1(struct xsdfec_dev *xsdfec,
513 u32 code_id,
514 struct xsdfec_ldpc_params *ldpc_params)
515{
516 u32 reg_value;
517 u32 reg_addr = XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
518 (code_id * XSDFEC_LDPC_REG_JUMP);
519
520 if (reg_addr > XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
521 dev_err(xsdfec->dev,
522 "Accessing outside of LDPC reg1 for code_id %d",
523 code_id);
524 return -EINVAL;
525 }
526
527 reg_value = xsdfec_regread(xsdfec, reg_addr);
528
529 ldpc_params->psize = reg_value & XSDFEC_REG1_PSIZE_MASK;
530
531 ldpc_params->no_packing = ((reg_value >> XSDFEC_REG1_NO_PACKING_LSB) &
532 XSDFEC_REG1_NO_PACKING_MASK);
533
534 ldpc_params->nm = (reg_value >> XSDFEC_REG1_NM_LSB) &
535 XSDFEC_REG1_NM_MASK;
536 return 0;
537}
538
539#define XSDFEC_REG2_NLAYERS_MASK (0x000001FF)
540#define XSDFEC_REG2_NLAYERS_LSB (0)
541#define XSDFEC_REG2_NNMQC_MASK (0x000FFE00)
542#define XSDFEC_REG2_NMQC_LSB (9)
543#define XSDFEC_REG2_NORM_TYPE_MASK (0x00100000)
544#define XSDFEC_REG2_NORM_TYPE_LSB (20)
545#define XSDFEC_REG2_SPECIAL_QC_MASK (0x00200000)
546#define XSDFEC_REG2_SPEICAL_QC_LSB (21)
547#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x00400000)
548#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
549#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x01800000)
550#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
551
552static int
553xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
554 u32 norm_type, u32 special_qc, u32 no_final_parity,
555 u32 max_schedule, u32 offset)
556{
557 u32 wdata;
558
559 if (nlayers & ~(XSDFEC_REG2_NLAYERS_MASK >>
560 XSDFEC_REG2_NLAYERS_LSB))
561 dev_err(xsdfec->dev, "Nlayers exceeds 9 bits");
562 nlayers &= XSDFEC_REG2_NLAYERS_MASK;
563
564 if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
565 dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
566 nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
567
568 if (norm_type > 1)
569 dev_err(xsdfec->dev, "Norm type is invalid");
570 norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
571 XSDFEC_REG2_NORM_TYPE_MASK);
572 if (special_qc > 1)
573 dev_err(xsdfec->dev, "Special QC in invalid");
574 special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
575 XSDFEC_REG2_SPECIAL_QC_MASK);
576
577 if (no_final_parity > 1)
578 dev_err(xsdfec->dev, "No final parity check invalid");
579 no_final_parity =
580 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
581 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
582 if (max_schedule & ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >>
583 XSDFEC_REG2_MAX_SCHEDULE_LSB))
584 dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
585 max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
586 XSDFEC_REG2_MAX_SCHEDULE_MASK);
587
588 wdata = (max_schedule | no_final_parity | special_qc | norm_type |
589 nmqc | nlayers);
590
591 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP)
592 > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
593 dev_err(xsdfec->dev,
594 "Writing outside of LDPC reg2 space 0x%x",
595 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
596 (offset * XSDFEC_LDPC_REG_JUMP));
597 return -EINVAL;
598 }
599 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
600 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
601 return 0;
602}
603
604static int
605xsdfec_collect_ldpc_reg2(struct xsdfec_dev *xsdfec,
606 u32 code_id,
607 struct xsdfec_ldpc_params *ldpc_params)
608{
609 u32 reg_value;
610 u32 reg_addr = XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
611 (code_id * XSDFEC_LDPC_REG_JUMP);
612
613 if (reg_addr > XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
614 dev_err(xsdfec->dev,
615 "Accessing outside of LDPC reg2 for code_id %d",
616 code_id);
617 return -EINVAL;
618 }
619
620 reg_value = xsdfec_regread(xsdfec, reg_addr);
621
622 ldpc_params->nlayers = ((reg_value >> XSDFEC_REG2_NLAYERS_LSB) &
623 XSDFEC_REG2_NLAYERS_MASK);
624
625 ldpc_params->nmqc = (reg_value >> XSDFEC_REG2_NMQC_LSB) &
626 XSDFEC_REG2_NNMQC_MASK;
627
628 ldpc_params->norm_type = ((reg_value >> XSDFEC_REG2_NORM_TYPE_LSB) &
629 XSDFEC_REG2_NORM_TYPE_MASK);
630
631 ldpc_params->special_qc = ((reg_value >> XSDFEC_REG2_SPEICAL_QC_LSB) &
632 XSDFEC_REG2_SPECIAL_QC_MASK);
633
634 ldpc_params->no_final_parity =
635 ((reg_value >> XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
636 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
637
638 ldpc_params->max_schedule =
639 ((reg_value >> XSDFEC_REG2_MAX_SCHEDULE_LSB) &
640 XSDFEC_REG2_MAX_SCHEDULE_MASK);
641
642 return 0;
643}
644
645#define XSDFEC_REG3_LA_OFF_LSB (8)
646#define XSDFEC_REG3_QC_OFF_LSB (16)
647static int
648xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off,
649 u8 la_off, u16 qc_off, u32 offset)
650{
651 u32 wdata;
652
653 wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
654 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
655 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
656 (offset * XSDFEC_LDPC_REG_JUMP) >
657 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
658 dev_err(xsdfec->dev,
659 "Writing outside of LDPC reg3 space 0x%x",
660 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
661 (offset * XSDFEC_LDPC_REG_JUMP));
662 return -EINVAL;
663 }
664 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
665 (offset * XSDFEC_LDPC_REG_JUMP), wdata);
666 return 0;
667}
668
669static int
670xsdfec_collect_ldpc_reg3(struct xsdfec_dev *xsdfec,
671 u32 code_id,
672 struct xsdfec_ldpc_params *ldpc_params)
673{
674 u32 reg_value;
675 u32 reg_addr = XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
676 (code_id * XSDFEC_LDPC_REG_JUMP);
677
678 if (reg_addr > XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
679 dev_err(xsdfec->dev,
680 "Accessing outside of LDPC reg3 for code_id %d",
681 code_id);
682 return -EINVAL;
683 }
684
685 reg_value = xsdfec_regread(xsdfec, reg_addr);
686
687 ldpc_params->qc_off = (reg_addr >> XSDFEC_REG3_QC_OFF_LSB) & 0xFF;
688 ldpc_params->la_off = (reg_addr >> XSDFEC_REG3_LA_OFF_LSB) & 0xFF;
689 ldpc_params->sc_off = (reg_addr & 0xFF);
690
691 return 0;
692}
693
694#define XSDFEC_SC_TABLE_DEPTH (0x3fc)
695#define XSDFEC_REG_WIDTH_JUMP (4)
696static int
697xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
698 u32 *sc_ptr, u32 len)
699{
700 int reg;
701
702
703
704
705
706 if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
707 dev_err(xsdfec->dev, "Write exceeds SC table length");
708 return -EINVAL;
709 }
710
711 for (reg = 0; reg < len; reg++) {
712 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
713 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, sc_ptr[reg]);
714 }
715 return reg;
716}
717
718static int
719xsdfec_collect_sc_table(struct xsdfec_dev *xsdfec, u32 offset,
720 u32 *sc_ptr, u32 len)
721{
722 u32 reg;
723 u32 reg_addr;
724 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
725
726 if (deepest_reach > XSDFEC_SC_TABLE_DEPTH) {
727 dev_err(xsdfec->dev, "Access will exceed SC table length");
728 return -EINVAL;
729 }
730
731 for (reg = 0; reg < len; reg++) {
732 reg_addr = XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
733 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
734
735 sc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
736 }
737
738 return 0;
739}
740
741#define XSDFEC_LA_TABLE_DEPTH (0xFFC)
742static int
743xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
744 u32 *la_ptr, u32 len)
745{
746 int reg;
747
748 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
749 dev_err(xsdfec->dev, "Write exceeds LA table length");
750 return -EINVAL;
751 }
752
753 for (reg = 0; reg < len; reg++) {
754 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
755 (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
756 la_ptr[reg]);
757 }
758 return reg;
759}
760
761static int
762xsdfec_collect_la_table(struct xsdfec_dev *xsdfec, u32 offset,
763 u32 *la_ptr, u32 len)
764{
765 u32 reg;
766 u32 reg_addr;
767 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
768
769 if (deepest_reach > XSDFEC_LA_TABLE_DEPTH) {
770 dev_err(xsdfec->dev, "Access will exceed LA table length");
771 return -EINVAL;
772 }
773
774 for (reg = 0; reg < len; reg++) {
775 reg_addr = XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
776 ((offset + reg) * XSDFEC_REG_WIDTH_JUMP);
777
778 la_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
779 }
780
781 return 0;
782}
783
784#define XSDFEC_QC_TABLE_DEPTH (0x7FFC)
785static int
786xsdfec_qc_table_write(struct xsdfec_dev *xsdfec,
787 u32 offset, u32 *qc_ptr, u32 len)
788{
789 int reg;
790
791 if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
792 dev_err(xsdfec->dev, "Write exceeds QC table length");
793 return -EINVAL;
794 }
795
796 for (reg = 0; reg < len; reg++) {
797 xsdfec_regwrite(xsdfec, XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
798 (offset + reg) * XSDFEC_REG_WIDTH_JUMP, qc_ptr[reg]);
799 }
800
801 return reg;
802}
803
804static int
805xsdfec_collect_qc_table(struct xsdfec_dev *xsdfec,
806 u32 offset, u32 *qc_ptr, u32 len)
807{
808 u32 reg;
809 u32 reg_addr;
810 u32 deepest_reach = (XSDFEC_REG_WIDTH_JUMP * (offset + len));
811
812 if (deepest_reach > XSDFEC_QC_TABLE_DEPTH) {
813 dev_err(xsdfec->dev, "Access will exceed QC table length");
814 return -EINVAL;
815 }
816
817 for (reg = 0; reg < len; reg++) {
818 reg_addr = XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
819 (offset + reg) * XSDFEC_REG_WIDTH_JUMP;
820
821 qc_ptr[reg] = xsdfec_regread(xsdfec, reg_addr);
822 }
823
824 return 0;
825}
826
827static int
828xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
829{
830 struct xsdfec_ldpc_params *ldpc;
831 int err;
832
833 ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
834 if (!ldpc)
835 return -ENOMEM;
836
837 err = copy_from_user(ldpc, arg, sizeof(*ldpc));
838 if (err) {
839 dev_err(xsdfec->dev,
840 "%s failed to copy from user for SDFEC%d",
841 __func__, xsdfec->config.fec_id);
842 goto err_out;
843 }
844 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
845 dev_err(xsdfec->dev,
846 "%s: Unable to write LDPC to SDFEC%d check DT",
847 __func__, xsdfec->config.fec_id);
848 goto err_out;
849 }
850
851 if (xsdfec->wr_protect)
852 xsdfec_wr_protect(xsdfec, false);
853
854
855 err = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->code_id);
856 if (err)
857 goto err_out;
858
859
860 err = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing,
861 ldpc->nm, ldpc->code_id);
862 if (err)
863 goto err_out;
864
865
866 err = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
867 ldpc->norm_type, ldpc->special_qc,
868 ldpc->no_final_parity, ldpc->max_schedule,
869 ldpc->code_id);
870 if (err)
871 goto err_out;
872
873
874 err = xsdfec_reg3_write(xsdfec, ldpc->sc_off,
875 ldpc->la_off, ldpc->qc_off, ldpc->code_id);
876 if (err)
877 goto err_out;
878
879
880 err = xsdfec_sc_table_write(xsdfec, ldpc->sc_off,
881 ldpc->sc_table, ldpc->nlayers);
882 if (err < 0)
883 goto err_out;
884
885 err = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off,
886 ldpc->la_table, ldpc->nlayers);
887 if (err < 0)
888 goto err_out;
889
890 err = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off,
891 ldpc->qc_table, ldpc->nqc);
892 if (err < 0)
893 goto err_out;
894
895 kfree(ldpc);
896 return 0;
897
898err_out:
899 kfree(ldpc);
900 return err;
901}
902
903static int
904xsdfec_get_ldpc_code_params(struct xsdfec_dev *xsdfec, void __user *arg)
905{
906 struct xsdfec_ldpc_params *ldpc_params;
907 int err = 0;
908
909 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
910 dev_err(xsdfec->dev,
911 "%s: SDFEC%d is configured for TURBO, check DT",
912 __func__, xsdfec->config.fec_id);
913 return -EIO;
914 }
915
916 ldpc_params = kzalloc(sizeof(*ldpc_params), GFP_KERNEL);
917 if (!ldpc_params)
918 return -ENOMEM;
919
920 err = copy_from_user(ldpc_params, arg, sizeof(*ldpc_params));
921 if (err) {
922 dev_err(xsdfec->dev,
923 "%s failed to copy from user for SDFEC%d",
924 __func__, xsdfec->config.fec_id);
925 goto err_out;
926 }
927
928 err = xsdfec_collect_ldpc_reg0(xsdfec, ldpc_params->code_id,
929 ldpc_params);
930 if (err)
931 goto err_out;
932
933 err = xsdfec_collect_ldpc_reg1(xsdfec, ldpc_params->code_id,
934 ldpc_params);
935 if (err)
936 goto err_out;
937
938 err = xsdfec_collect_ldpc_reg2(xsdfec, ldpc_params->code_id,
939 ldpc_params);
940 if (err)
941 goto err_out;
942
943 err = xsdfec_collect_ldpc_reg3(xsdfec, ldpc_params->code_id,
944 ldpc_params);
945 if (err)
946 goto err_out;
947
948
949
950
951
952 err = xsdfec_collect_sc_table(xsdfec, ldpc_params->sc_off,
953 ldpc_params->sc_table,
954 ldpc_params->nlayers);
955 if (err < 0)
956 goto err_out;
957
958 err = xsdfec_collect_la_table(xsdfec, 4 * ldpc_params->la_off,
959 ldpc_params->la_table,
960 ldpc_params->nlayers);
961 if (err < 0)
962 goto err_out;
963
964 err = xsdfec_collect_qc_table(xsdfec, 4 * ldpc_params->qc_off,
965 ldpc_params->qc_table,
966 ldpc_params->nqc);
967 if (err < 0)
968 goto err_out;
969
970 err = copy_to_user(arg, ldpc_params, sizeof(*ldpc_params));
971 if (err) {
972 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
973 __func__, xsdfec->config.fec_id);
974 err = -EFAULT;
975 }
976
977 kfree(ldpc_params);
978 return 0;
979
980err_out:
981 kfree(ldpc_params);
982 return err;
983}
984
985static int
986xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
987{
988 bool order_out_of_range;
989 enum xsdfec_order order = *((enum xsdfec_order *)arg);
990
991 order_out_of_range = (order <= XSDFEC_INVALID_ORDER) ||
992 (order >= XSDFEC_ORDER_MAX);
993 if (order_out_of_range) {
994 dev_err(xsdfec->dev,
995 "%s invalid order value %d for SDFEC%d",
996 __func__, order, xsdfec->config.fec_id);
997 return -EINVAL;
998 }
999
1000
1001 if (xsdfec->state == XSDFEC_STARTED) {
1002 dev_err(xsdfec->dev,
1003 "%s attempting to set Order while started for SDFEC%d",
1004 __func__, xsdfec->config.fec_id);
1005 return -EIO;
1006 }
1007
1008 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, (order - 1));
1009
1010 xsdfec->config.order = order;
1011
1012 return 0;
1013}
1014
1015static int
1016xsdfec_set_bypass(struct xsdfec_dev *xsdfec, void __user *arg)
1017{
1018 unsigned long bypass = *((unsigned long *)arg);
1019
1020 if (bypass > 1) {
1021 dev_err(xsdfec->dev,
1022 "%s invalid bypass value %ld for SDFEC%d",
1023 __func__, bypass, xsdfec->config.fec_id);
1024 return -EINVAL;
1025 }
1026
1027
1028 if (xsdfec->state == XSDFEC_STARTED) {
1029 dev_err(xsdfec->dev,
1030 "%s attempting to set bypass while started for SDFEC%d",
1031 __func__, xsdfec->config.fec_id);
1032 return -EIO;
1033 }
1034
1035 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, bypass);
1036
1037 return 0;
1038}
1039
1040static int
1041xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *is_active)
1042{
1043 u32 reg_value;
1044
1045 reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
1046
1047 *is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
1048
1049 return 0;
1050}
1051
1052static u32
1053xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
1054{
1055 u32 axis_width_field = 0;
1056
1057 switch (axis_width_cfg) {
1058 case XSDFEC_1x128b:
1059 axis_width_field = 0;
1060 break;
1061 case XSDFEC_2x128b:
1062 axis_width_field = 1;
1063 break;
1064 case XSDFEC_4x128b:
1065 axis_width_field = 2;
1066 break;
1067 }
1068
1069 return axis_width_field;
1070}
1071
1072static u32
1073xsdfec_translate_axis_words_cfg_val(
1074 enum xsdfec_axis_word_include axis_word_inc_cfg)
1075{
1076 u32 axis_words_field = 0;
1077
1078 if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
1079 axis_word_inc_cfg == XSDFEC_IN_BLOCK)
1080 axis_words_field = 0;
1081 else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
1082 axis_words_field = 1;
1083
1084 return axis_words_field;
1085}
1086
1087#define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
1088#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
1089#define XSDFEC_AXIS_DIN_WORDS_LSB (2)
1090#define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
1091static int
1092xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
1093{
1094 u32 reg_value;
1095 u32 dout_words_field;
1096 u32 dout_width_field;
1097 u32 din_words_field;
1098 u32 din_width_field;
1099 struct xsdfec_config *config = &xsdfec->config;
1100
1101
1102 dout_words_field =
1103 xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
1104 dout_width_field =
1105 xsdfec_translate_axis_width_cfg_val(config->dout_width);
1106 din_words_field =
1107 xsdfec_translate_axis_words_cfg_val(config->din_word_include);
1108 din_width_field =
1109 xsdfec_translate_axis_width_cfg_val(config->din_width);
1110
1111 reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
1112 reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
1113 reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
1114 reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
1115
1116 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
1117
1118 return 0;
1119}
1120
1121static int xsdfec_start(struct xsdfec_dev *xsdfec)
1122{
1123 u32 regread;
1124
1125
1126 if (xsdfec->config.code == XSDFEC_CODE_INVALID) {
1127 dev_err(xsdfec->dev,
1128 "%s : set code before start for SDFEC%d",
1129 __func__, xsdfec->config.fec_id);
1130 return -EINVAL;
1131 }
1132 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
1133 regread &= 0x1;
1134 if (regread != (xsdfec->config.code - 1)) {
1135 dev_err(xsdfec->dev,
1136 "%s SDFEC HW code does not match driver code, reg %d, code %d",
1137 __func__, regread, (xsdfec->config.code - 1));
1138 return -EINVAL;
1139 }
1140
1141
1142 if (xsdfec->config.order == XSDFEC_INVALID_ORDER) {
1143 dev_err(xsdfec->dev,
1144 "%s : set order before starting SDFEC%d",
1145 __func__, xsdfec->config.fec_id);
1146 return -EINVAL;
1147 }
1148
1149
1150 xsdfec_regwrite(xsdfec,
1151 XSDFEC_AXIS_ENABLE_ADDR,
1152 XSDFEC_AXIS_ENABLE_MASK);
1153
1154 xsdfec_wr_protect(xsdfec, true);
1155
1156 xsdfec->state = XSDFEC_STARTED;
1157 return 0;
1158}
1159
1160static int
1161xsdfec_stop(struct xsdfec_dev *xsdfec)
1162{
1163 u32 regread;
1164
1165 if (xsdfec->state != XSDFEC_STARTED)
1166 dev_err(xsdfec->dev, "Device not started correctly");
1167
1168 xsdfec_wr_protect(xsdfec, false);
1169
1170 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
1171 regread &= (~XSDFEC_AXIS_ENABLE_MASK);
1172 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
1173
1174 xsdfec->state = XSDFEC_STOPPED;
1175 return 0;
1176}
1177
1178static int
1179xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
1180{
1181 atomic_set(&xsdfec->isr_err_count, 0);
1182 atomic_set(&xsdfec->uecc_count, 0);
1183 atomic_set(&xsdfec->cecc_count, 0);
1184
1185 return 0;
1186}
1187
1188static int
1189xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
1190{
1191 int err = 0;
1192 struct xsdfec_stats user_stats;
1193
1194 user_stats.isr_err_count = atomic_read(&xsdfec->isr_err_count);
1195 user_stats.cecc_count = atomic_read(&xsdfec->cecc_count);
1196 user_stats.uecc_count = atomic_read(&xsdfec->uecc_count);
1197
1198 err = copy_to_user(arg, &user_stats, sizeof(user_stats));
1199 if (err) {
1200 dev_err(xsdfec->dev, "%s failed for SDFEC%d",
1201 __func__, xsdfec->config.fec_id);
1202 err = -EFAULT;
1203 }
1204
1205 return err;
1206}
1207
1208static int
1209xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
1210{
1211 xsdfec->state = XSDFEC_INIT;
1212 xsdfec->config.order = XSDFEC_INVALID_ORDER;
1213 xsdfec->wr_protect = false;
1214
1215 xsdfec_wr_protect(xsdfec, false);
1216
1217 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code - 1);
1218 xsdfec_cfg_axi_streams(xsdfec);
1219
1220 return 0;
1221}
1222
1223static long
1224xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data)
1225{
1226 struct xsdfec_dev *xsdfec = fptr->private_data;
1227 void __user *arg = NULL;
1228 int rval = -EINVAL;
1229 int err = 0;
1230
1231 if (!xsdfec)
1232 return rval;
1233
1234
1235 if (xsdfec->state == XSDFEC_NEEDS_RESET &&
1236 (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
1237 cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
1238 dev_err(xsdfec->dev,
1239 "SDFEC%d in failed state. Reset Required",
1240 xsdfec->config.fec_id);
1241 return -EPERM;
1242 }
1243
1244 if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) {
1245 dev_err(xsdfec->dev, "Not a xilinx sdfec ioctl");
1246 return -ENOTTY;
1247 }
1248
1249
1250 if (_IOC_DIR(cmd) != _IOC_NONE) {
1251 arg = (void __user *)data;
1252 if (!arg) {
1253 dev_err(xsdfec->dev, "xilinx sdfec ioctl argument is NULL Pointer");
1254 return rval;
1255 }
1256 }
1257
1258
1259 if (_IOC_DIR(cmd) & _IOC_READ)
1260 err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
1261 else if (_IOC_DIR(cmd) & _IOC_WRITE)
1262 err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
1263
1264 if (err) {
1265 dev_err(xsdfec->dev, "Invalid xilinx sdfec ioctl argument");
1266 return -EFAULT;
1267 }
1268
1269 switch (cmd) {
1270 case XSDFEC_START_DEV:
1271 rval = xsdfec_start(xsdfec);
1272 break;
1273 case XSDFEC_STOP_DEV:
1274 rval = xsdfec_stop(xsdfec);
1275 break;
1276 case XSDFEC_CLEAR_STATS:
1277 rval = xsdfec_clear_stats(xsdfec);
1278 break;
1279 case XSDFEC_GET_STATS:
1280 rval = xsdfec_get_stats(xsdfec, arg);
1281 break;
1282 case XSDFEC_GET_STATUS:
1283 rval = xsdfec_get_status(xsdfec, arg);
1284 break;
1285 case XSDFEC_GET_CONFIG:
1286 rval = xsdfec_get_config(xsdfec, arg);
1287 break;
1288 case XSDFEC_SET_DEFAULT_CONFIG:
1289 rval = xsdfec_set_default_config(xsdfec);
1290 break;
1291 case XSDFEC_SET_IRQ:
1292 rval = xsdfec_set_irq(xsdfec, arg);
1293 break;
1294 case XSDFEC_SET_TURBO:
1295 rval = xsdfec_set_turbo(xsdfec, arg);
1296 break;
1297 case XSDFEC_GET_TURBO:
1298 rval = xsdfec_get_turbo(xsdfec, arg);
1299 break;
1300 case XSDFEC_ADD_LDPC_CODE_PARAMS:
1301 rval = xsdfec_add_ldpc(xsdfec, arg);
1302 break;
1303 case XSDFEC_GET_LDPC_CODE_PARAMS:
1304 rval = xsdfec_get_ldpc_code_params(xsdfec, arg);
1305 break;
1306 case XSDFEC_SET_ORDER:
1307 rval = xsdfec_set_order(xsdfec, arg);
1308 break;
1309 case XSDFEC_SET_BYPASS:
1310 rval = xsdfec_set_bypass(xsdfec, arg);
1311 break;
1312 case XSDFEC_IS_ACTIVE:
1313 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1314 break;
1315 default:
1316
1317 dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
1318 break;
1319 }
1320 return rval;
1321}
1322
1323static unsigned int
1324xsdfec_poll(struct file *file, poll_table *wait)
1325{
1326 unsigned int mask;
1327 struct xsdfec_dev *xsdfec = file->private_data;
1328
1329 if (!xsdfec)
1330 return POLLNVAL | POLLHUP;
1331
1332 poll_wait(file, &xsdfec->waitq, wait);
1333
1334
1335 if (xsdfec->state == XSDFEC_NEEDS_RESET)
1336 mask = POLLIN | POLLRDNORM;
1337 else
1338 mask = POLLPRI | POLLERR;
1339
1340 return mask;
1341}
1342
1343static const struct file_operations xsdfec_fops = {
1344 .owner = THIS_MODULE,
1345 .open = xsdfec_dev_open,
1346 .release = xsdfec_dev_release,
1347 .unlocked_ioctl = xsdfec_dev_ioctl,
1348 .poll = xsdfec_poll,
1349};
1350
1351static int
1352xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1353{
1354 struct device *dev = xsdfec->dev;
1355 struct device_node *node = dev->of_node;
1356 int rval;
1357 const char *fec_code;
1358 u32 din_width;
1359 u32 din_word_include;
1360 u32 dout_width;
1361 u32 dout_word_include;
1362
1363 rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1364 if (rval < 0) {
1365 dev_err(dev, "xlnx,sdfec-code not in DT");
1366 return rval;
1367 }
1368
1369 if (!strcasecmp(fec_code, "ldpc")) {
1370 xsdfec->config.code = XSDFEC_LDPC_CODE;
1371 } else if (!strcasecmp(fec_code, "turbo")) {
1372 xsdfec->config.code = XSDFEC_TURBO_CODE;
1373 } else {
1374 dev_err(xsdfec->dev, "Invalid Code in DT");
1375 return -EINVAL;
1376 }
1377
1378 rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
1379 &din_word_include);
1380 if (rval < 0) {
1381 dev_err(dev, "xlnx,sdfec-din-words not in DT");
1382 return rval;
1383 }
1384
1385 if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) {
1386 xsdfec->config.din_word_include = din_word_include;
1387 } else {
1388 dev_err(xsdfec->dev, "Invalid DIN Words in DT");
1389 return -EINVAL;
1390 }
1391
1392 rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
1393 if (rval < 0) {
1394 dev_err(dev, "xlnx,sdfec-din-width not in DT");
1395 return rval;
1396 }
1397
1398 switch (din_width) {
1399
1400 case XSDFEC_1x128b:
1401 case XSDFEC_2x128b:
1402 case XSDFEC_4x128b:
1403 xsdfec->config.din_width = din_width;
1404 break;
1405 default:
1406 dev_err(xsdfec->dev, "Invalid DIN Width in DT");
1407 return -EINVAL;
1408 }
1409
1410 rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
1411 &dout_word_include);
1412 if (rval < 0) {
1413 dev_err(dev, "xlnx,sdfec-dout-words not in DT");
1414 return rval;
1415 }
1416
1417 if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) {
1418 xsdfec->config.dout_word_include = dout_word_include;
1419 } else {
1420 dev_err(xsdfec->dev, "Invalid DOUT Words in DT");
1421 return -EINVAL;
1422 }
1423
1424 rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
1425 if (rval < 0) {
1426 dev_err(dev, "xlnx,sdfec-dout-width not in DT");
1427 return rval;
1428 }
1429
1430 switch (dout_width) {
1431
1432 case XSDFEC_1x128b:
1433 case XSDFEC_2x128b:
1434 case XSDFEC_4x128b:
1435 xsdfec->config.dout_width = dout_width;
1436 break;
1437 default:
1438 dev_err(xsdfec->dev, "Invalid DOUT Width in DT");
1439 return -EINVAL;
1440 }
1441
1442
1443 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code - 1);
1444
1445 xsdfec_cfg_axi_streams(xsdfec);
1446
1447 return 0;
1448}
1449
1450static void
1451xsdfec_log_ecc_errors(struct xsdfec_dev *xsdfec, u32 ecc_err)
1452{
1453 u32 cecc, uecc;
1454 int uecc_cnt;
1455
1456 cecc = ecc_err & XSDFEC_ECC_ISR_SBE;
1457 uecc = ecc_err & XSDFEC_ECC_ISR_MBE;
1458
1459 uecc_cnt = atomic_add_return(hweight32(uecc), &xsdfec->uecc_count);
1460 atomic_add(hweight32(cecc), &xsdfec->cecc_count);
1461
1462 if (uecc_cnt > 0 && uecc_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1463 dev_err(xsdfec->dev,
1464 "Multi-bit error on xsdfec%d. Needs reset",
1465 xsdfec->config.fec_id);
1466 }
1467
1468
1469 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1470}
1471
1472static void
1473xsdfec_log_isr_errors(struct xsdfec_dev *xsdfec, u32 isr_err)
1474{
1475 int isr_err_cnt;
1476
1477
1478 isr_err_cnt = atomic_add_return(hweight32(isr_err),
1479 &xsdfec->isr_err_count);
1480 if (isr_err_cnt > 0 && isr_err_cnt < XSDFEC_ERROR_MAX_THRESHOLD) {
1481 dev_err(xsdfec->dev,
1482 "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
1483 }
1484
1485
1486 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, 0);
1487}
1488
1489static void
1490xsdfec_reset_required(struct xsdfec_dev *xsdfec)
1491{
1492 xsdfec->state = XSDFEC_NEEDS_RESET;
1493}
1494
1495static irqreturn_t
1496xsdfec_irq_thread(int irq, void *dev_id)
1497{
1498 struct xsdfec_dev *xsdfec = dev_id;
1499 irqreturn_t ret = IRQ_HANDLED;
1500 u32 ecc_err;
1501 u32 isr_err;
1502 bool fatal_err = false;
1503
1504 WARN_ON(xsdfec->irq != irq);
1505
1506
1507 xsdfec_isr_enable(xsdfec, false);
1508 xsdfec_ecc_isr_enable(xsdfec, false);
1509
1510
1511 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1512 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1513
1514 if (ecc_err & XSDFEC_ECC_ISR_MBE) {
1515
1516 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1517 xsdfec_reset_required(xsdfec);
1518 fatal_err = true;
1519 } else if (isr_err & XSDFEC_ISR_MASK) {
1520
1521
1522
1523
1524 xsdfec_log_isr_errors(xsdfec, isr_err);
1525 xsdfec_reset_required(xsdfec);
1526 fatal_err = true;
1527 } else if (ecc_err & XSDFEC_ECC_ISR_SBE) {
1528
1529 xsdfec_log_ecc_errors(xsdfec, ecc_err);
1530 } else {
1531 ret = IRQ_NONE;
1532 }
1533
1534 if (fatal_err)
1535 wake_up_interruptible(&xsdfec->waitq);
1536
1537
1538 xsdfec_isr_enable(xsdfec, true);
1539 xsdfec_ecc_isr_enable(xsdfec, true);
1540
1541 return ret;
1542}
1543
1544static int
1545xsdfec_probe(struct platform_device *pdev)
1546{
1547 struct xsdfec_dev *xsdfec;
1548 struct device *dev;
1549 struct device *dev_create;
1550 struct resource *res;
1551 int err;
1552 bool irq_enabled = true;
1553
1554 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1555 if (!xsdfec)
1556 return -ENOMEM;
1557
1558 xsdfec->dev = &pdev->dev;
1559 xsdfec->config.fec_id = atomic_read(&xsdfec_ndevs);
1560
1561 dev = xsdfec->dev;
1562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1563 xsdfec->regs = devm_ioremap_resource(dev, res);
1564 if (IS_ERR(xsdfec->regs)) {
1565 dev_err(dev, "Unable to map resource");
1566 err = PTR_ERR(xsdfec->regs);
1567 goto err_xsdfec_dev;
1568 }
1569
1570 xsdfec->irq = platform_get_irq(pdev, 0);
1571 if (xsdfec->irq < 0) {
1572 dev_dbg(dev, "platform_get_irq failed");
1573 irq_enabled = false;
1574 }
1575
1576 err = xsdfec_parse_of(xsdfec);
1577 if (err < 0)
1578 goto err_xsdfec_dev;
1579
1580
1581 platform_set_drvdata(pdev, xsdfec);
1582
1583 if (irq_enabled) {
1584 init_waitqueue_head(&xsdfec->waitq);
1585
1586 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1587 xsdfec_irq_thread,
1588 IRQF_ONESHOT,
1589 "xilinx-sdfec16",
1590 xsdfec);
1591 if (err < 0) {
1592 dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1593 goto err_xsdfec_dev;
1594 }
1595 }
1596
1597 cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
1598 xsdfec->xsdfec_cdev.owner = THIS_MODULE;
1599 err = cdev_add(&xsdfec->xsdfec_cdev,
1600 MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id), 1);
1601 if (err < 0) {
1602 dev_err(dev, "cdev_add failed");
1603 err = -EIO;
1604 goto err_xsdfec_dev;
1605 }
1606
1607 if (!xsdfec_class) {
1608 err = -EIO;
1609 dev_err(dev, "xsdfec class not created correctly");
1610 goto err_xsdfec_cdev;
1611 }
1612
1613 dev_create = device_create(xsdfec_class, dev,
1614 MKDEV(MAJOR(xsdfec_devt),
1615 xsdfec->config.fec_id),
1616 xsdfec, "xsdfec%d", xsdfec->config.fec_id);
1617 if (IS_ERR(dev_create)) {
1618 dev_err(dev, "unable to create device");
1619 err = PTR_ERR(dev_create);
1620 goto err_xsdfec_cdev;
1621 }
1622
1623 atomic_set(&xsdfec->open_count, 1);
1624 dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->config.fec_id);
1625 atomic_inc(&xsdfec_ndevs);
1626 return 0;
1627
1628
1629err_xsdfec_cdev:
1630 cdev_del(&xsdfec->xsdfec_cdev);
1631err_xsdfec_dev:
1632 return err;
1633}
1634
1635static int
1636xsdfec_remove(struct platform_device *pdev)
1637{
1638 struct xsdfec_dev *xsdfec;
1639 struct device *dev = &pdev->dev;
1640
1641 xsdfec = platform_get_drvdata(pdev);
1642 if (!xsdfec)
1643 return -ENODEV;
1644 dev = xsdfec->dev;
1645 if (!xsdfec_class) {
1646 dev_err(dev, "xsdfec_class is NULL");
1647 return -EIO;
1648 }
1649
1650 device_destroy(xsdfec_class,
1651 MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id));
1652 cdev_del(&xsdfec->xsdfec_cdev);
1653 atomic_dec(&xsdfec_ndevs);
1654 return 0;
1655}
1656
1657static const struct of_device_id xsdfec_of_match[] = {
1658 { .compatible = "xlnx,sd-fec-1.1", },
1659 { }
1660};
1661MODULE_DEVICE_TABLE(of, xsdfec_of_match);
1662
1663static struct platform_driver xsdfec_driver = {
1664 .driver = {
1665 .name = "xilinx-sdfec",
1666 .of_match_table = xsdfec_of_match,
1667 },
1668 .probe = xsdfec_probe,
1669 .remove = xsdfec_remove,
1670};
1671
1672static int __init xsdfec_init_mod(void)
1673{
1674 int err;
1675
1676 xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
1677 if (IS_ERR(xsdfec_class)) {
1678 err = PTR_ERR(xsdfec_class);
1679 pr_err("%s : Unable to register xsdfec class", __func__);
1680 return err;
1681 }
1682
1683 err = alloc_chrdev_region(&xsdfec_devt,
1684 0, DRIVER_MAX_DEV, DRIVER_NAME);
1685 if (err < 0) {
1686 pr_err("%s : Unable to get major number", __func__);
1687 goto err_xsdfec_class;
1688 }
1689
1690 err = platform_driver_register(&xsdfec_driver);
1691 if (err < 0) {
1692 pr_err("%s Unabled to register %s driver",
1693 __func__, DRIVER_NAME);
1694 goto err_xsdfec_drv;
1695 }
1696 return 0;
1697
1698
1699err_xsdfec_drv:
1700 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1701err_xsdfec_class:
1702 class_destroy(xsdfec_class);
1703 return err;
1704}
1705
1706static void __exit xsdfec_cleanup_mod(void)
1707{
1708 platform_driver_unregister(&xsdfec_driver);
1709 unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
1710 class_destroy(xsdfec_class);
1711 xsdfec_class = NULL;
1712}
1713
1714module_init(xsdfec_init_mod);
1715module_exit(xsdfec_cleanup_mod);
1716
1717MODULE_AUTHOR("Xilinx, Inc");
1718MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
1719MODULE_LICENSE("GPL");
1720MODULE_VERSION(DRIVER_VERSION);
1721