1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/clk.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/netdevice.h>
27#include <linux/of.h>
28#include <linux/platform_device.h>
29#include <linux/skbuff.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/can/led.h>
35#include <linux/pm_runtime.h>
36#include <linux/of_device.h>
37
38#define DRIVER_NAME "xilinx_can"
39
40
41enum xcan_reg {
42 XCAN_SRR_OFFSET = 0x00,
43 XCAN_MSR_OFFSET = 0x04,
44 XCAN_BRPR_OFFSET = 0x08,
45 XCAN_BTR_OFFSET = 0x0C,
46 XCAN_ECR_OFFSET = 0x10,
47 XCAN_ESR_OFFSET = 0x14,
48 XCAN_SR_OFFSET = 0x18,
49 XCAN_ISR_OFFSET = 0x1C,
50 XCAN_IER_OFFSET = 0x20,
51 XCAN_ICR_OFFSET = 0x24,
52 XCAN_TXFIFO_ID_OFFSET = 0x30,
53 XCAN_TXFIFO_DLC_OFFSET = 0x34,
54 XCAN_TXFIFO_DW1_OFFSET = 0x38,
55 XCAN_TXFIFO_DW2_OFFSET = 0x3C,
56 XCAN_RXFIFO_ID_OFFSET = 0x50,
57 XCAN_RXFIFO_DLC_OFFSET = 0x54,
58 XCAN_RXFIFO_DW1_OFFSET = 0x58,
59 XCAN_RXFIFO_DW2_OFFSET = 0x5C,
60 XCAN_F_BRPR_OFFSET = 0x088,
61
62
63 XCAN_F_BTR_OFFSET = 0x08C,
64 XCAN_TRR_OFFSET = 0x090,
65 XCAN_IETRS_OFFSET = 0x094,
66
67
68 XCANFD_TXFIFO_ID_OFFSET = 0x0100,
69
70
71 XCANFD_TXFIFO_DLC_OFFSET = 0x0104,
72
73
74 XCANFD_TXFIFO_DW_OFFSET = 0x0108,
75
76
77 XCANFD_RXFIFO_ID_OFFSET = 0x1100,
78
79
80 XCANFD_RXFIFO_DLC_OFFSET = 0x1104,
81
82
83 XCANFD_RXFIFO_DW_OFFSET = 0x1108,
84
85
86 XCAN_AFMR_BASE_OFFSET = 0x1A00,
87 XCAN_AFIDR_BASE_OFFSET = 0x1A04,
88 XCAN_AFR_OFFSET = 0x0E0,
89 XCAN_FSR_OFFSET = 0x0E8,
90 XCAN_TIMESTAMPR_OFFSET = 0x0028,
91};
92
93
94#define XCAN_SRR_CEN_MASK 0x00000002
95#define XCAN_SRR_RESET_MASK 0x00000001
96#define XCAN_MSR_LBACK_MASK 0x00000002
97#define XCAN_MSR_SLEEP_MASK 0x00000001
98#define XCAN_BRPR_BRP_MASK 0x000000FF
99#define XCAN_BTR_SJW_MASK 0x00000180
100#define XCAN_BTR_TS2_MASK 0x00000070
101#define XCAN_BTR_TS1_MASK 0x0000000F
102#define XCANFD_BTR_SJW_MASK 0x000F0000
103#define XCANFD_BTR_TS2_MASK 0x00000F00
104#define XCANFD_BTR_TS1_MASK 0x0000003F
105#define XCAN_ECR_REC_MASK 0x0000FF00
106#define XCAN_ECR_TEC_MASK 0x000000FF
107#define XCAN_ESR_ACKER_MASK 0x00000010
108#define XCAN_ESR_BERR_MASK 0x00000008
109#define XCAN_ESR_STER_MASK 0x00000004
110#define XCAN_ESR_FMER_MASK 0x00000002
111#define XCAN_ESR_CRCER_MASK 0x00000001
112#define XCAN_SR_TXFLL_MASK 0x00000400
113#define XCAN_SR_ESTAT_MASK 0x00000180
114#define XCAN_SR_ERRWRN_MASK 0x00000040
115#define XCAN_SR_NORMAL_MASK 0x00000008
116#define XCAN_SR_LBACK_MASK 0x00000002
117#define XCAN_SR_CONFIG_MASK 0x00000001
118#define XCAN_IXR_TXFEMP_MASK 0x00004000
119#define XCAN_IXR_WKUP_MASK 0x00000800
120#define XCAN_IXR_SLP_MASK 0x00000400
121#define XCAN_IXR_BSOFF_MASK 0x00000200
122#define XCAN_IXR_ERROR_MASK 0x00000100
123#define XCAN_IXR_RXNEMP_MASK 0x00000080
124#define XCAN_IXR_RXOFLW_MASK 0x00000040
125#define XCAN_IXR_RXOK_MASK 0x00000010
126#define XCAN_IXR_TXFLL_MASK 0x00000004
127#define XCAN_IXR_TXOK_MASK 0x00000002
128#define XCAN_IXR_ARBLST_MASK 0x00000001
129#define XCAN_IDR_ID1_MASK 0xFFE00000
130#define XCAN_IDR_SRR_MASK 0x00100000
131#define XCAN_IDR_IDE_MASK 0x00080000
132#define XCAN_IDR_ID2_MASK 0x0007FFFE
133#define XCAN_IDR_RTR_MASK 0x00000001
134#define XCAN_DLCR_DLC_MASK 0xF0000000
135#define XCAN_MSR_BRSD_MASK 0x00000008
136#define XCAN_MSR_SNOOP_MASK 0x00000004
137#define XCAN_MSR_DPEE_MASK 0x00000020
138
139
140#define XCAN_MSR_SBR_MASK 0x00000040
141#define XCAN_MSR_ABR_MASK 0x00000080
142#define XCAN_MSR_CONFIG_MASK 0x000000F8
143#define XCAN_F_BRPR_TDCMASK 0x00001F00
144#define XCAN_F_BTR_SJW_MASK 0x00070000
145#define XCAN_F_BTR_TS2_MASK 0x00000700
146#define XCAN_F_BTR_TS1_MASK 0x0000000F
147#define XCAN_ESR_F_BERR_MASK 0x00000800
148#define XCAN_ESR_F_STER_MASK 0x00000400
149#define XCAN_ESR_F_FMER_MASK 0x00000200
150#define XCAN_ESR_F_CRCER_MASK 0x00000100
151#define XCAN_SR_SNOOP_MASK 0x00001000
152#define XCAN_SR_BBSY_MASK 0x00000020
153#define XCAN_SR_BIDLE_MASK 0x00000010
154#define XCAN_SR_SLEEP_MASK 0x00000004
155#define XCAN_SR_PEE_CONFIG_MASK 0x00000200
156
157
158#define XCAN_SR_BSFR_CONFIG_MASK 0x00000400
159
160
161#define XCAN_SR_NISO_MASK 0x00000800
162#define XCAN_FSR_FL_MASK 0x00003F00
163#define XCAN_FSR_RI_MASK 0x0000001F
164#define XCAN_FSR_IRI_MASK 0x00000080
165#define XCAN_IXR_RXMNF_MASK 0x00020000
166#define XCAN_IXR_TXRRS_MASK 0x00002000
167
168
169#define XCAN_IXR_PEE_MASK 0x00000004
170#define XCAN_IXR_BSRD_MASK 0x00000008
171#define XCAN_AFR_ENABLE_ALL 0xFFFFFFFF
172#define XCAN_DLCR_EDL_MASK 0x08000000
173#define XCAN_DLCR_BRS_MASK 0x04000000
174#define XCAN_DLCR_DLC_SHIFT 28
175#define XCAN_DLCR_EDL_SHIFT 27
176#define XCAN_DLCR_BRS_SHIFT 26
177
178#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
179 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
180 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | \
181 XCAN_IXR_ARBLST_MASK)
182
183
184#define XCAN_BTR_SJW_SHIFT 7
185#define XCAN_BTR_TS2_SHIFT 4
186#define XCANFD_BTR_SJW_SHIFT 16
187#define XCANFD_BTR_TS2_SHIFT 8
188#define XCAN_SR_ESTAT_SHIFT 7
189#define XCAN_RXLRM_BI_SHIFT 18
190#define XCAN_CSB_SHIFT 16
191#define XCAN_IDR_SRR_SHIFT 20
192#define XCAN_IDR_IDE_SHIFT 19
193#define XCAN_IDR_ID1_SHIFT 21
194#define XCAN_IDR_ID2_SHIFT 1
195#define XCAN_DLCR_DLC_SHIFT 28
196#define XCAN_ESR_REC_SHIFT 8
197
198
199#define XCAN_FRAME_MAX_DATA_LEN 8
200#define XCAN_TIMEOUT (1 * HZ)
201#define XCANFD_MAX_FRAME_LEN 72
202#define XCANFD_FRAME_MAX_DATA_LEN 64
203#define XCANFD_DW_BYTES 4
204#define XCANFD_CTRLREG_WIDTH 4
205
206
207#define CANFD_SUPPORT BIT(0)
208
209
210#define XCANFD_TXDW_OFFSET(n) (XCANFD_TXFIFO_DW_OFFSET + (n * \
211 XCANFD_MAX_FRAME_LEN))
212#define XCANFD_TXID_OFFSET(n) (XCANFD_TXFIFO_ID_OFFSET + (n * \
213 XCANFD_MAX_FRAME_LEN))
214#define XCANFD_TXDLC_OFFSET(n) (XCANFD_TXFIFO_DLC_OFFSET + (n *\
215 XCANFD_MAX_FRAME_LEN))
216#define XCANFD_RXDLC_OFFSET(readindex) (XCANFD_RXFIFO_DLC_OFFSET + (readindex \
217 * XCANFD_MAX_FRAME_LEN))
218#define XCANFD_RXID_OFFSET(readindex) (XCANFD_RXFIFO_ID_OFFSET + (readindex \
219 * XCANFD_MAX_FRAME_LEN))
220#define XCANFD_RXDW_OFFSET(readindex) (XCANFD_RXFIFO_DW_OFFSET + (readindex \
221 * XCANFD_MAX_FRAME_LEN))
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239struct xcan_priv {
240 struct can_priv can;
241 unsigned int tx_head;
242 unsigned int tx_tail;
243 unsigned int tx_max;
244 struct napi_struct napi;
245 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
246 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
247 u32 val);
248 struct device *dev;
249 void __iomem *reg_base;
250 unsigned long irq_flags;
251 struct clk *bus_clk;
252 struct clk *can_clk;
253 u32 quirks;
254};
255
256struct xcan_platform_data {
257 u32 quirks;
258};
259
260
261static struct can_bittiming_const xcan_bittiming_const = {
262 .name = DRIVER_NAME,
263 .tseg1_min = 1,
264 .tseg1_max = 16,
265 .tseg2_min = 1,
266 .tseg2_max = 8,
267 .sjw_max = 4,
268 .brp_min = 1,
269 .brp_max = 256,
270 .brp_inc = 1,
271};
272
273
274static struct can_bittiming_const xcan_data_bittiming_const = {
275 .name = DRIVER_NAME,
276 .tseg1_min = 1,
277 .tseg1_max = 16,
278 .tseg2_min = 1,
279 .tseg2_max = 8,
280 .sjw_max = 8,
281 .brp_min = 1,
282 .brp_max = 256,
283 .brp_inc = 1,
284};
285
286
287
288
289
290
291
292
293
294static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
295 u32 val)
296{
297 iowrite32(val, priv->reg_base + reg);
298}
299
300
301
302
303
304
305
306
307
308static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
309{
310 return ioread32(priv->reg_base + reg);
311}
312
313
314
315
316
317
318
319
320
321static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
322 u32 val)
323{
324 iowrite32be(val, priv->reg_base + reg);
325}
326
327
328
329
330
331
332
333
334
335static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
336{
337 return ioread32be(priv->reg_base + reg);
338}
339
340
341
342
343
344
345
346
347
348
349static int set_reset_mode(struct net_device *ndev)
350{
351 struct xcan_priv *priv = netdev_priv(ndev);
352 unsigned long timeout;
353
354 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
355
356 timeout = jiffies + XCAN_TIMEOUT;
357 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
358 if (time_after(jiffies, timeout)) {
359 netdev_warn(ndev, "timed out for config mode\n");
360 return -ETIMEDOUT;
361 }
362 usleep_range(500, 10000);
363 }
364
365 return 0;
366}
367
368
369
370
371
372
373
374
375static int xcan_set_bittiming(struct net_device *ndev)
376{
377 struct xcan_priv *priv = netdev_priv(ndev);
378 struct can_bittiming *bt = &priv->can.bittiming;
379 struct can_bittiming *dbt = &priv->can.data_bittiming;
380 u32 btr0, btr1;
381 u32 is_config_mode;
382
383
384
385
386 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
387 XCAN_SR_CONFIG_MASK;
388 if (!is_config_mode) {
389 netdev_alert(ndev,
390 "BUG! Cannot set bittiming - CAN is not in config mode\n");
391 return -EPERM;
392 }
393
394
395 btr0 = (bt->brp - 1);
396
397
398 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
399
400
401 btr1 |= (bt->phase_seg2 - 1) << ((priv->quirks & CANFD_SUPPORT) ?
402 XCANFD_BTR_TS2_SHIFT : XCAN_BTR_TS2_SHIFT);
403
404
405 btr1 |= (bt->sjw - 1) << ((priv->quirks & CANFD_SUPPORT) ?
406 XCANFD_BTR_SJW_SHIFT : XCAN_BTR_SJW_SHIFT);
407
408 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
409 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
410
411 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
412 priv->read_reg(priv, XCAN_BRPR_OFFSET),
413 priv->read_reg(priv, XCAN_BTR_OFFSET));
414
415 if (priv->quirks & CANFD_SUPPORT) {
416
417 btr0 = dbt->brp - 1;
418
419
420 btr1 = dbt->prop_seg + bt->phase_seg1 - 1;
421
422
423 btr1 |= (dbt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
424
425
426 btr1 |= (dbt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
427
428 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
429 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
430 }
431 netdev_dbg(ndev, "F_BRPR=0x%08x, F_BTR=0x%08x\n",
432 priv->read_reg(priv, XCAN_F_BRPR_OFFSET),
433 priv->read_reg(priv, XCAN_F_BTR_OFFSET));
434
435 return 0;
436}
437
438
439
440
441
442
443
444
445
446
447
448static int xcan_chip_start(struct net_device *ndev)
449{
450 struct xcan_priv *priv = netdev_priv(ndev);
451 u32 reg_msr, reg_sr_mask, intr_all = 0;
452 int err;
453 unsigned long timeout;
454
455
456 err = set_reset_mode(ndev);
457 if (err < 0)
458 return err;
459
460 err = xcan_set_bittiming(ndev);
461 if (err < 0)
462 return err;
463
464
465 if (priv->quirks & CANFD_SUPPORT) {
466 intr_all = XCAN_INTR_ALL | XCAN_IXR_PEE_MASK |
467 XCAN_IXR_BSRD_MASK | XCAN_IXR_RXMNF_MASK |
468 XCAN_IXR_TXRRS_MASK | XCAN_IXR_RXOK_MASK;
469 } else {
470 intr_all = XCAN_INTR_ALL | XCAN_IXR_RXNEMP_MASK;
471 }
472
473 priv->write_reg(priv, XCAN_IER_OFFSET, intr_all);
474
475
476 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
477 reg_msr = XCAN_MSR_LBACK_MASK;
478 reg_sr_mask = XCAN_SR_LBACK_MASK;
479 } else {
480 reg_msr = 0x0;
481 reg_sr_mask = XCAN_SR_NORMAL_MASK;
482 }
483
484 if (priv->quirks & CANFD_SUPPORT) {
485
486
487
488 priv->write_reg(priv, XCAN_AFR_OFFSET, XCAN_AFR_ENABLE_ALL);
489 }
490 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
491 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
492
493 timeout = jiffies + XCAN_TIMEOUT;
494 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
495 if (time_after(jiffies, timeout)) {
496 netdev_warn(ndev,
497 "timed out for correct mode\n");
498 return -ETIMEDOUT;
499 }
500 }
501 netdev_dbg(ndev, "status:#x%08x\n",
502 priv->read_reg(priv, XCAN_SR_OFFSET));
503
504 priv->can.state = CAN_STATE_ERROR_ACTIVE;
505 return 0;
506}
507
508
509
510
511
512
513
514
515
516
517
518static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
519{
520 int ret;
521
522 switch (mode) {
523 case CAN_MODE_START:
524 ret = xcan_chip_start(ndev);
525 if (ret < 0) {
526 netdev_err(ndev, "xcan_chip_start failed!\n");
527 return ret;
528 }
529 netif_wake_queue(ndev);
530 break;
531 default:
532 ret = -EOPNOTSUPP;
533 break;
534 }
535
536 return ret;
537}
538
539
540
541
542
543
544
545
546
547
548int xcan_get_freebuffer(struct xcan_priv *priv)
549{
550 u32 bufindex = 0, trrregval = 0;
551
552 trrregval = priv->read_reg(priv, XCAN_TRR_OFFSET);
553 for (bufindex = 0; bufindex < priv->tx_max; bufindex++) {
554 if (trrregval & (1 << bufindex))
555 continue;
556 return bufindex;
557 }
558 return -1;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
573{
574 struct xcan_priv *priv = netdev_priv(ndev);
575 struct net_device_stats *stats = &ndev->stats;
576 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
577 u32 id, dlc, data[2] = {0, 0};
578 u32 buffnr, ramoff, dwindex = 0, i, trrval;
579
580 if (can_dropped_invalid_skb(ndev, skb))
581 return NETDEV_TX_OK;
582
583 if (!(priv->quirks & CANFD_SUPPORT)) {
584
585 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
586 XCAN_SR_TXFLL_MASK)) {
587 netif_stop_queue(ndev);
588 netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
589 return NETDEV_TX_BUSY;
590 }
591 }
592
593
594 if (cf->can_id & CAN_EFF_FLAG) {
595
596 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
597 XCAN_IDR_ID2_MASK;
598 id |= (((cf->can_id & CAN_EFF_MASK) >>
599 (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
600 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
601
602
603
604
605 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
606
607 if (cf->can_id & CAN_RTR_FLAG)
608
609 id |= XCAN_IDR_RTR_MASK;
610 } else {
611
612 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
613 XCAN_IDR_ID1_MASK;
614
615 if (cf->can_id & CAN_RTR_FLAG)
616
617 id |= XCAN_IDR_SRR_MASK;
618 }
619
620 dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
621 if (priv->quirks & CANFD_SUPPORT) {
622 if (can_is_canfd_skb(skb)) {
623 if (cf->flags & CANFD_BRS)
624 dlc |= XCAN_DLCR_BRS_MASK;
625 dlc |= XCAN_DLCR_EDL_MASK;
626 }
627
628 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
629 priv->tx_head++;
630 buffnr = xcan_get_freebuffer(priv);
631 if (buffnr == -1)
632 netif_stop_queue(ndev);
633
634 priv->write_reg(priv, XCANFD_TXID_OFFSET(buffnr), id);
635 priv->write_reg(priv, XCANFD_TXDLC_OFFSET(buffnr), dlc);
636
637 for (i = 0; i < cf->len; i += 4) {
638 ramoff = XCANFD_TXDW_OFFSET(buffnr) + (dwindex *
639 XCANFD_DW_BYTES);
640 priv->write_reg(priv, ramoff,
641 be32_to_cpup((__be32 *)(cf->data + i)));
642 dwindex++;
643 }
644
645 trrval = priv->read_reg(priv, XCAN_TRR_OFFSET);
646 trrval |= 1 << buffnr;
647 priv->write_reg(priv, XCAN_TRR_OFFSET, trrval);
648 stats->tx_bytes += cf->len;
649 if (buffnr == -1)
650 netif_stop_queue(ndev);
651 } else {
652 if (cf->len > 0)
653 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
654 if (cf->len > 4)
655 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
656
657 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
658 priv->tx_head++;
659
660
661 priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
662
663
664
665 priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
666 if (!(cf->can_id & CAN_RTR_FLAG)) {
667 priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
668
669
670
671 priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
672 stats->tx_bytes += cf->len;
673 }
674 }
675
676 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
677 netif_stop_queue(ndev);
678
679 return NETDEV_TX_OK;
680}
681
682
683
684
685
686
687
688
689
690
691
692static int xcan_rx(struct net_device *ndev)
693{
694 struct xcan_priv *priv = netdev_priv(ndev);
695 struct net_device_stats *stats = &ndev->stats;
696 struct can_frame *cf;
697 struct sk_buff *skb;
698 u32 id_xcan, dlc, data[2] = {0, 0};
699
700
701 id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
702 dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
703 XCAN_DLCR_DLC_SHIFT;
704 skb = alloc_can_skb(ndev, &cf);
705 if (unlikely(!skb)) {
706 stats->rx_dropped++;
707 return 0;
708 }
709
710
711 cf->can_dlc = get_can_dlc(dlc);
712
713
714 if (id_xcan & XCAN_IDR_IDE_MASK) {
715
716 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
717 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
718 XCAN_IDR_ID2_SHIFT;
719 cf->can_id |= CAN_EFF_FLAG;
720 if (id_xcan & XCAN_IDR_RTR_MASK)
721 cf->can_id |= CAN_RTR_FLAG;
722 } else {
723
724 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
725 XCAN_IDR_ID1_SHIFT;
726 if (id_xcan & XCAN_IDR_SRR_MASK)
727 cf->can_id |= CAN_RTR_FLAG;
728 }
729
730
731 data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
732 data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
733
734 if (!(cf->can_id & CAN_RTR_FLAG)) {
735
736 if (cf->can_dlc > 0)
737 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
738 if (cf->can_dlc > 4)
739 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
740 }
741
742 stats->rx_bytes += cf->can_dlc;
743 stats->rx_packets++;
744 netif_receive_skb(skb);
745
746 return 1;
747}
748
749
750
751
752
753
754
755
756
757
758
759static int xcanfd_rx(struct net_device *ndev)
760{
761 struct xcan_priv *priv = netdev_priv(ndev);
762 struct net_device_stats *stats = &ndev->stats;
763 struct canfd_frame *cf;
764 struct sk_buff *skb;
765 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, fsr, readindex;
766
767 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
768 if (fsr & XCAN_FSR_FL_MASK) {
769 readindex = fsr & XCAN_FSR_RI_MASK;
770 id_xcan = priv->read_reg(priv, XCANFD_RXID_OFFSET(readindex));
771 dlc = priv->read_reg(priv, XCANFD_RXDLC_OFFSET(readindex));
772 if (dlc & XCAN_DLCR_EDL_MASK)
773 skb = alloc_canfd_skb(ndev, &cf);
774 else
775 skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
776
777 if (unlikely(!skb)) {
778 stats->rx_dropped++;
779 return 0;
780 }
781
782
783
784
785 if (dlc & XCAN_DLCR_EDL_MASK)
786 cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
787 XCAN_DLCR_DLC_SHIFT);
788 else
789 cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
790 XCAN_DLCR_DLC_SHIFT);
791
792
793 if (id_xcan & XCAN_IDR_IDE_MASK) {
794
795 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
796 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
797 XCAN_IDR_ID2_SHIFT;
798 cf->can_id |= CAN_EFF_FLAG;
799 if (id_xcan & XCAN_IDR_RTR_MASK)
800 cf->can_id |= CAN_RTR_FLAG;
801 } else {
802
803 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
804 XCAN_IDR_ID1_SHIFT;
805 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
806 XCAN_IDR_SRR_MASK))
807 cf->can_id |= CAN_RTR_FLAG;
808 }
809
810
811 if (dlc & XCAN_DLCR_EDL_MASK) {
812 for (i = 0; i < cf->len; i += 4) {
813 data[0] = priv->read_reg(priv,
814 (XCANFD_RXDW_OFFSET(readindex) +
815 (dwindex * XCANFD_DW_BYTES)));
816 *(__be32 *)(cf->data + i) = cpu_to_be32(
817 data[0]);
818 dwindex++;
819 }
820 } else {
821 for (i = 0; i < cf->len; i += 4) {
822 data[0] = priv->read_reg(priv,
823 XCANFD_RXDW_OFFSET(readindex) + i);
824 *(__be32 *)(cf->data + i) = cpu_to_be32(
825 data[0]);
826 }
827 }
828
829
830
831 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
832 fsr |= XCAN_FSR_IRI_MASK;
833 priv->write_reg(priv, XCAN_FSR_OFFSET, fsr);
834 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
835 stats->rx_bytes += cf->len;
836 stats->rx_packets++;
837 netif_receive_skb(skb);
838
839 return 1;
840 }
841
842 return 0;
843}
844
845static void xcan_chip_stop(struct net_device *ndev);
846
847
848
849
850
851
852
853
854
855static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
856{
857 struct xcan_priv *priv = netdev_priv(ndev);
858 struct net_device_stats *stats = &ndev->stats;
859 struct can_frame *cf;
860 struct sk_buff *skb;
861 u32 err_status, status, txerr = 0, rxerr = 0;
862
863 skb = alloc_can_err_skb(ndev, &cf);
864
865 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
866 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
867 txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
868 rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
869 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
870 status = priv->read_reg(priv, XCAN_SR_OFFSET);
871
872 if (isr & XCAN_IXR_BSOFF_MASK) {
873 priv->can.state = CAN_STATE_BUS_OFF;
874 priv->can.can_stats.bus_off++;
875
876 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
877 can_bus_off(ndev);
878 if (skb)
879 cf->can_id |= CAN_ERR_BUSOFF;
880 } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
881 priv->can.state = CAN_STATE_ERROR_PASSIVE;
882 priv->can.can_stats.error_passive++;
883 if (skb) {
884 cf->can_id |= CAN_ERR_CRTL;
885 cf->data[1] = (rxerr > 127) ?
886 CAN_ERR_CRTL_RX_PASSIVE :
887 CAN_ERR_CRTL_TX_PASSIVE;
888 cf->data[6] = txerr;
889 cf->data[7] = rxerr;
890 }
891 } else if (status & XCAN_SR_ERRWRN_MASK) {
892 priv->can.state = CAN_STATE_ERROR_WARNING;
893 priv->can.can_stats.error_warning++;
894 if (skb) {
895 cf->can_id |= CAN_ERR_CRTL;
896 cf->data[1] |= (txerr > rxerr) ?
897 CAN_ERR_CRTL_TX_WARNING :
898 CAN_ERR_CRTL_RX_WARNING;
899 cf->data[6] = txerr;
900 cf->data[7] = rxerr;
901 }
902 }
903
904
905 if (isr & XCAN_IXR_ARBLST_MASK) {
906 priv->can.can_stats.arbitration_lost++;
907 if (skb) {
908 cf->can_id |= CAN_ERR_LOSTARB;
909 cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
910 }
911 }
912
913
914 if (isr & XCAN_IXR_RXOFLW_MASK) {
915 stats->rx_over_errors++;
916 stats->rx_errors++;
917 xcan_chip_stop(ndev);
918 xcan_chip_start(ndev);
919 if (skb) {
920 cf->can_id |= CAN_ERR_CRTL;
921 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
922 }
923 }
924
925
926 if (isr & XCAN_IXR_ERROR_MASK) {
927 if (skb)
928 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
929
930
931 if (err_status & XCAN_ESR_ACKER_MASK) {
932 stats->tx_errors++;
933 if (skb) {
934 cf->can_id |= CAN_ERR_ACK;
935 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
936 }
937 }
938
939
940 if (err_status & XCAN_ESR_BERR_MASK) {
941 stats->tx_errors++;
942 if (skb) {
943 cf->can_id |= CAN_ERR_PROT;
944 cf->data[2] = CAN_ERR_PROT_BIT;
945 }
946 }
947
948
949 if (err_status & XCAN_ESR_STER_MASK) {
950 stats->rx_errors++;
951 if (skb) {
952 cf->can_id |= CAN_ERR_PROT;
953 cf->data[2] = CAN_ERR_PROT_STUFF;
954 }
955 }
956
957
958 if (err_status & XCAN_ESR_FMER_MASK) {
959 stats->rx_errors++;
960 if (skb) {
961 cf->can_id |= CAN_ERR_PROT;
962 cf->data[2] = CAN_ERR_PROT_FORM;
963 }
964 }
965
966
967 if (err_status & XCAN_ESR_CRCER_MASK) {
968 stats->rx_errors++;
969 if (skb) {
970 cf->can_id |= CAN_ERR_PROT;
971 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
972 }
973 }
974 if (priv->quirks & CANFD_SUPPORT) {
975
976 if (err_status & XCAN_ESR_F_BERR_MASK) {
977 stats->tx_errors++;
978 if (skb) {
979 cf->can_id |= CAN_ERR_PROT;
980 cf->data[2] = CAN_ERR_PROT_BIT;
981 }
982 }
983
984 if (err_status & XCAN_ESR_F_STER_MASK) {
985 stats->rx_errors++;
986 if (skb) {
987 cf->can_id |= CAN_ERR_PROT;
988 cf->data[2] = CAN_ERR_PROT_STUFF;
989 }
990 }
991
992 if (err_status & XCAN_ESR_F_FMER_MASK) {
993 stats->rx_errors++;
994 if (skb) {
995 cf->can_id |= CAN_ERR_PROT;
996 cf->data[2] = CAN_ERR_PROT_FORM;
997 }
998 }
999 if (err_status & XCAN_ESR_F_CRCER_MASK) {
1000 stats->rx_errors++;
1001 if (skb) {
1002 cf->can_id |= CAN_ERR_PROT;
1003 priv->can.can_stats.bus_error++;
1004 }
1005 }
1006 }
1007 priv->can.can_stats.bus_error++;
1008 }
1009
1010 if (skb) {
1011 stats->rx_packets++;
1012 stats->rx_bytes += cf->can_dlc;
1013 netif_rx(skb);
1014 }
1015
1016 netdev_dbg(ndev, "%s: error status register:0x%x\n",
1017 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1029{
1030 struct xcan_priv *priv = netdev_priv(ndev);
1031
1032
1033 if (isr & XCAN_IXR_SLP_MASK)
1034 priv->can.state = CAN_STATE_SLEEPING;
1035
1036
1037 if (isr & XCAN_IXR_WKUP_MASK)
1038 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051static int xcan_rx_poll(struct napi_struct *napi, int quota)
1052{
1053 struct net_device *ndev = napi->dev;
1054 struct xcan_priv *priv = netdev_priv(ndev);
1055 u32 isr, ier;
1056 int work_done = 0, rx_bit_mask;
1057
1058 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1059 rx_bit_mask = ((priv->quirks & CANFD_SUPPORT) ?
1060 XCAN_IXR_RXOK_MASK : XCAN_IXR_RXNEMP_MASK);
1061 while ((isr & rx_bit_mask) && (work_done < quota)) {
1062 if (rx_bit_mask & XCAN_IXR_RXOK_MASK)
1063 work_done += xcanfd_rx(ndev);
1064 else
1065 work_done += xcan_rx(ndev);
1066 priv->write_reg(priv, XCAN_ICR_OFFSET, rx_bit_mask);
1067 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1068 }
1069
1070 if (work_done)
1071 can_led_event(ndev, CAN_LED_EVENT_RX);
1072
1073 if (work_done < quota) {
1074 napi_complete(napi);
1075 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1076 ier |= rx_bit_mask;
1077 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1078 }
1079 return work_done;
1080}
1081
1082
1083
1084
1085
1086
1087static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1088{
1089 struct xcan_priv *priv = netdev_priv(ndev);
1090 struct net_device_stats *stats = &ndev->stats;
1091
1092 while ((priv->tx_head - priv->tx_tail > 0) &&
1093 (isr & XCAN_IXR_TXOK_MASK)) {
1094 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1095 can_get_echo_skb(ndev, priv->tx_tail %
1096 priv->tx_max);
1097 priv->tx_tail++;
1098 stats->tx_packets++;
1099 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1100 }
1101 can_led_event(ndev, CAN_LED_EVENT_TX);
1102 netif_wake_queue(ndev);
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1117{
1118 struct net_device *ndev = (struct net_device *)dev_id;
1119 struct xcan_priv *priv = netdev_priv(ndev);
1120 u32 isr, ier, rx_bit_mask;
1121
1122
1123 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1124 if (!isr)
1125 return IRQ_NONE;
1126
1127
1128 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1129 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1130 XCAN_IXR_WKUP_MASK));
1131 xcan_state_interrupt(ndev, isr);
1132 }
1133
1134
1135 if (isr & XCAN_IXR_TXOK_MASK)
1136 xcan_tx_interrupt(ndev, isr);
1137
1138
1139 if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1140 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
1141 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
1142 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
1143 XCAN_IXR_ARBLST_MASK));
1144 xcan_err_interrupt(ndev, isr);
1145 }
1146 if (priv->quirks & CANFD_SUPPORT) {
1147 if (isr & (XCAN_IXR_RXMNF_MASK | XCAN_IXR_TXRRS_MASK |
1148 XCAN_IXR_PEE_MASK | XCAN_IXR_BSRD_MASK)) {
1149 priv->write_reg(priv, XCAN_ICR_OFFSET,
1150 (XCAN_IXR_RXMNF_MASK |
1151 XCAN_IXR_TXRRS_MASK |
1152 XCAN_IXR_PEE_MASK |
1153 XCAN_IXR_BSRD_MASK));
1154 xcan_err_interrupt(ndev, isr);
1155 }
1156 }
1157
1158 rx_bit_mask = ((priv->quirks & CANFD_SUPPORT) ?
1159 XCAN_IXR_RXOK_MASK : XCAN_IXR_RXNEMP_MASK);
1160 if (isr & rx_bit_mask) {
1161 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1162 ier &= ~(rx_bit_mask);
1163 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1164 napi_schedule(&priv->napi);
1165 }
1166 return IRQ_HANDLED;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176static void xcan_chip_stop(struct net_device *ndev)
1177{
1178 struct xcan_priv *priv = netdev_priv(ndev);
1179 u32 ier, intr_all = 0;
1180
1181
1182 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1183 if (priv->quirks & CANFD_SUPPORT) {
1184 intr_all = XCAN_INTR_ALL | XCAN_IXR_PEE_MASK |
1185 XCAN_IXR_BSRD_MASK | XCAN_IXR_RXMNF_MASK |
1186 XCAN_IXR_TXRRS_MASK | XCAN_IXR_RXOK_MASK;
1187 } else {
1188 intr_all = XCAN_INTR_ALL | XCAN_IXR_RXNEMP_MASK;
1189 }
1190
1191 ier &= ~intr_all;
1192 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1193 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1194 priv->can.state = CAN_STATE_STOPPED;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204static int xcan_open(struct net_device *ndev)
1205{
1206 struct xcan_priv *priv = netdev_priv(ndev);
1207 int ret;
1208
1209 ret = pm_runtime_get_sync(priv->dev);
1210 if (ret < 0) {
1211 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1212 __func__, ret);
1213 return ret;
1214 }
1215
1216 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1217 ndev->name, ndev);
1218 if (ret < 0) {
1219 netdev_err(ndev, "irq allocation for CAN failed\n");
1220 goto err;
1221 }
1222
1223
1224 ret = set_reset_mode(ndev);
1225 if (ret < 0) {
1226 netdev_err(ndev, "mode resetting failed!\n");
1227 goto err_irq;
1228 }
1229
1230
1231 ret = open_candev(ndev);
1232 if (ret)
1233 goto err_irq;
1234
1235 ret = xcan_chip_start(ndev);
1236 if (ret < 0) {
1237 netdev_err(ndev, "xcan_chip_start failed!\n");
1238 goto err_candev;
1239 }
1240
1241 can_led_event(ndev, CAN_LED_EVENT_OPEN);
1242 napi_enable(&priv->napi);
1243 netif_start_queue(ndev);
1244
1245 return 0;
1246
1247err_candev:
1248 close_candev(ndev);
1249err_irq:
1250 free_irq(ndev->irq, ndev);
1251err:
1252 pm_runtime_put(priv->dev);
1253
1254 return ret;
1255}
1256
1257
1258
1259
1260
1261
1262
1263static int xcan_close(struct net_device *ndev)
1264{
1265 struct xcan_priv *priv = netdev_priv(ndev);
1266
1267 netif_stop_queue(ndev);
1268 napi_disable(&priv->napi);
1269 xcan_chip_stop(ndev);
1270 free_irq(ndev->irq, ndev);
1271 close_candev(ndev);
1272
1273 can_led_event(ndev, CAN_LED_EVENT_STOP);
1274 pm_runtime_put(priv->dev);
1275
1276 return 0;
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287static int xcan_get_berr_counter(const struct net_device *ndev,
1288 struct can_berr_counter *bec)
1289{
1290 struct xcan_priv *priv = netdev_priv(ndev);
1291 int ret;
1292
1293 ret = pm_runtime_get_sync(priv->dev);
1294 if (ret < 0) {
1295 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1296 __func__, ret);
1297 return ret;
1298 }
1299
1300 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1301 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1302 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1303
1304 pm_runtime_put(priv->dev);
1305
1306 return 0;
1307}
1308
1309
1310static const struct net_device_ops xcan_netdev_ops = {
1311 .ndo_open = xcan_open,
1312 .ndo_stop = xcan_close,
1313 .ndo_start_xmit = xcan_start_xmit,
1314 .ndo_change_mtu = can_change_mtu,
1315};
1316
1317
1318
1319
1320
1321
1322
1323
1324static int __maybe_unused xcan_suspend(struct device *dev)
1325{
1326 if (!device_may_wakeup(dev))
1327 return pm_runtime_force_suspend(dev);
1328
1329 return 0;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339static int __maybe_unused xcan_resume(struct device *dev)
1340{
1341 if (!device_may_wakeup(dev))
1342 return pm_runtime_force_resume(dev);
1343
1344 return 0;
1345
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1356{
1357 struct net_device *ndev = dev_get_drvdata(dev);
1358 struct xcan_priv *priv = netdev_priv(ndev);
1359
1360 if (netif_running(ndev)) {
1361 netif_stop_queue(ndev);
1362 netif_device_detach(ndev);
1363 }
1364
1365 priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
1366 priv->can.state = CAN_STATE_SLEEPING;
1367
1368 clk_disable_unprepare(priv->bus_clk);
1369 clk_disable_unprepare(priv->can_clk);
1370
1371 return 0;
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381static int __maybe_unused xcan_runtime_resume(struct device *dev)
1382{
1383 struct net_device *ndev = dev_get_drvdata(dev);
1384 struct xcan_priv *priv = netdev_priv(ndev);
1385 int ret;
1386 u32 isr, status;
1387
1388 ret = clk_prepare_enable(priv->bus_clk);
1389 if (ret) {
1390 dev_err(dev, "Cannot enable clock.\n");
1391 return ret;
1392 }
1393 ret = clk_prepare_enable(priv->can_clk);
1394 if (ret) {
1395 dev_err(dev, "Cannot enable clock.\n");
1396 clk_disable_unprepare(priv->bus_clk);
1397 return ret;
1398 }
1399
1400 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1401 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1402 status = priv->read_reg(priv, XCAN_SR_OFFSET);
1403
1404 if (netif_running(ndev)) {
1405 if (isr & XCAN_IXR_BSOFF_MASK) {
1406 priv->can.state = CAN_STATE_BUS_OFF;
1407 priv->write_reg(priv, XCAN_SRR_OFFSET,
1408 XCAN_SRR_RESET_MASK);
1409 } else if ((status & XCAN_SR_ESTAT_MASK) ==
1410 XCAN_SR_ESTAT_MASK) {
1411 priv->can.state = CAN_STATE_ERROR_PASSIVE;
1412 } else if (status & XCAN_SR_ERRWRN_MASK) {
1413 priv->can.state = CAN_STATE_ERROR_WARNING;
1414 } else {
1415 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1416 }
1417 netif_device_attach(ndev);
1418 netif_start_queue(ndev);
1419 }
1420
1421 return 0;
1422}
1423
1424static const struct dev_pm_ops xcan_dev_pm_ops = {
1425 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1426 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1427};
1428
1429static const struct xcan_platform_data xcan_def = {
1430 .quirks = CANFD_SUPPORT,
1431};
1432
1433
1434static const struct of_device_id xcan_of_match[] = {
1435 { .compatible = "xlnx,zynq-can-1.0", },
1436 { .compatible = "xlnx,axi-can-1.00.a", },
1437 { .compatible = "xlnx,canfd-1.0", .data = &xcan_def },
1438 { },
1439};
1440MODULE_DEVICE_TABLE(of, xcan_of_match);
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451static int xcan_probe(struct platform_device *pdev)
1452{
1453 struct resource *res;
1454 struct net_device *ndev;
1455 struct xcan_priv *priv;
1456 const struct of_device_id *match;
1457 void __iomem *addr;
1458 int ret, rx_max, tx_max;
1459
1460
1461 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1462 addr = devm_ioremap_resource(&pdev->dev, res);
1463 if (IS_ERR(addr)) {
1464 ret = PTR_ERR(addr);
1465 goto err;
1466 }
1467
1468 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
1469 if (ret < 0)
1470 goto err;
1471
1472 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
1473 if (ret < 0)
1474 goto err;
1475
1476
1477 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1478 if (!ndev)
1479 return -ENOMEM;
1480
1481 priv = netdev_priv(ndev);
1482
1483 match = of_match_node(xcan_of_match, pdev->dev.of_node);
1484 if (match && match->data) {
1485 const struct xcan_platform_data *data = match->data;
1486
1487 priv->quirks = data->quirks;
1488 }
1489
1490 priv->dev = &pdev->dev;
1491 priv->can.bittiming_const = &xcan_bittiming_const;
1492 priv->can.do_set_mode = xcan_do_set_mode;
1493 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1494 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1495 CAN_CTRLMODE_BERR_REPORTING;
1496 if (priv->quirks & CANFD_SUPPORT) {
1497 priv->can.data_bittiming_const = &xcan_data_bittiming_const;
1498 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
1499 xcan_bittiming_const.tseg1_max = 64;
1500 xcan_bittiming_const.tseg2_max = 16;
1501 xcan_bittiming_const.sjw_max = 16;
1502 }
1503 priv->reg_base = addr;
1504 priv->tx_max = tx_max;
1505
1506
1507 ndev->irq = platform_get_irq(pdev, 0);
1508 ndev->flags |= IFF_ECHO;
1509
1510 platform_set_drvdata(pdev, ndev);
1511 SET_NETDEV_DEV(ndev, &pdev->dev);
1512 ndev->netdev_ops = &xcan_netdev_ops;
1513
1514
1515 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1516 if (IS_ERR(priv->can_clk)) {
1517 dev_err(&pdev->dev, "Device clock not found.\n");
1518 ret = PTR_ERR(priv->can_clk);
1519 goto err_free;
1520 }
1521
1522 if (of_device_is_compatible(pdev->dev.of_node,
1523 "xlnx,zynq-can-1.0")) {
1524 priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
1525 if (IS_ERR(priv->bus_clk)) {
1526 dev_err(&pdev->dev, "bus clock not found\n");
1527 ret = PTR_ERR(priv->bus_clk);
1528 goto err_free;
1529 }
1530 } else {
1531 priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1532 if (IS_ERR(priv->bus_clk)) {
1533 dev_err(&pdev->dev, "bus clock not found\n");
1534 ret = PTR_ERR(priv->bus_clk);
1535 goto err_free;
1536 }
1537 }
1538
1539 priv->write_reg = xcan_write_reg_le;
1540 priv->read_reg = xcan_read_reg_le;
1541
1542 pm_runtime_enable(&pdev->dev);
1543 ret = pm_runtime_get_sync(&pdev->dev);
1544 if (ret < 0) {
1545 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1546 __func__, ret);
1547 goto err_pmdisable;
1548 }
1549
1550 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1551 priv->write_reg = xcan_write_reg_be;
1552 priv->read_reg = xcan_read_reg_be;
1553 }
1554
1555 priv->can.clock.freq = clk_get_rate(priv->can_clk);
1556
1557 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1558
1559 ret = register_candev(ndev);
1560 if (ret) {
1561 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1562 goto err_disableclks;
1563 }
1564
1565 devm_can_led_init(ndev);
1566
1567 pm_runtime_put(&pdev->dev);
1568
1569 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
1570 priv->reg_base, ndev->irq, priv->can.clock.freq,
1571 priv->tx_max);
1572
1573 return 0;
1574
1575err_disableclks:
1576 pm_runtime_put(priv->dev);
1577err_pmdisable:
1578 pm_runtime_disable(&pdev->dev);
1579err_free:
1580 free_candev(ndev);
1581err:
1582 return ret;
1583}
1584
1585
1586
1587
1588
1589
1590
1591
1592static int xcan_remove(struct platform_device *pdev)
1593{
1594 struct net_device *ndev = platform_get_drvdata(pdev);
1595 struct xcan_priv *priv = netdev_priv(ndev);
1596
1597 unregister_candev(ndev);
1598 pm_runtime_disable(&pdev->dev);
1599 netif_napi_del(&priv->napi);
1600 free_candev(ndev);
1601
1602 return 0;
1603}
1604
1605static struct platform_driver xcan_driver = {
1606 .probe = xcan_probe,
1607 .remove = xcan_remove,
1608 .driver = {
1609 .name = DRIVER_NAME,
1610 .pm = &xcan_dev_pm_ops,
1611 .of_match_table = xcan_of_match,
1612 },
1613};
1614
1615module_platform_driver(xcan_driver);
1616
1617MODULE_LICENSE("GPL");
1618MODULE_AUTHOR("Xilinx Inc");
1619MODULE_DESCRIPTION("Xilinx CAN interface");
1620