1
2
3
4
5
6
7
8
9
10
11
12#include <linux/clk.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
24#include <linux/spinlock.h>
25#include <linux/string.h>
26#include <linux/types.h>
27#include <linux/can/dev.h>
28#include <linux/can/error.h>
29#include <linux/can/led.h>
30#include <linux/pm_runtime.h>
31
32#define DRIVER_NAME "xilinx_can"
33
34
35enum xcan_reg {
36 XCAN_SRR_OFFSET = 0x00,
37 XCAN_MSR_OFFSET = 0x04,
38 XCAN_BRPR_OFFSET = 0x08,
39 XCAN_BTR_OFFSET = 0x0C,
40 XCAN_ECR_OFFSET = 0x10,
41 XCAN_ESR_OFFSET = 0x14,
42 XCAN_SR_OFFSET = 0x18,
43 XCAN_ISR_OFFSET = 0x1C,
44 XCAN_IER_OFFSET = 0x20,
45 XCAN_ICR_OFFSET = 0x24,
46
47
48 XCAN_TXFIFO_OFFSET = 0x30,
49 XCAN_RXFIFO_OFFSET = 0x50,
50 XCAN_AFR_OFFSET = 0x60,
51
52
53 XCAN_F_BRPR_OFFSET = 0x088,
54
55
56 XCAN_F_BTR_OFFSET = 0x08C,
57 XCAN_TRR_OFFSET = 0x0090,
58 XCAN_AFR_EXT_OFFSET = 0x00E0,
59 XCAN_FSR_OFFSET = 0x00E8,
60 XCAN_TXMSG_BASE_OFFSET = 0x0100,
61 XCAN_RXMSG_BASE_OFFSET = 0x1100,
62 XCAN_RXMSG_2_BASE_OFFSET = 0x2100,
63 XCAN_AFR_2_MASK_OFFSET = 0x0A00,
64 XCAN_AFR_2_ID_OFFSET = 0x0A04,
65};
66
67#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
68#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
69#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
70#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
71#define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
72
73#define XCAN_CANFD_FRAME_SIZE 0x48
74#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
75 XCAN_CANFD_FRAME_SIZE * (n))
76#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
77 XCAN_CANFD_FRAME_SIZE * (n))
78#define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
79 XCAN_CANFD_FRAME_SIZE * (n))
80
81
82#define XCAN_TX_MAILBOX_IDX 0
83
84
85#define XCAN_SRR_CEN_MASK 0x00000002
86#define XCAN_SRR_RESET_MASK 0x00000001
87#define XCAN_MSR_LBACK_MASK 0x00000002
88#define XCAN_MSR_SLEEP_MASK 0x00000001
89#define XCAN_BRPR_BRP_MASK 0x000000FF
90#define XCAN_BTR_SJW_MASK 0x00000180
91#define XCAN_BTR_TS2_MASK 0x00000070
92#define XCAN_BTR_TS1_MASK 0x0000000F
93#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000
94#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00
95#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F
96#define XCAN_ECR_REC_MASK 0x0000FF00
97#define XCAN_ECR_TEC_MASK 0x000000FF
98#define XCAN_ESR_ACKER_MASK 0x00000010
99#define XCAN_ESR_BERR_MASK 0x00000008
100#define XCAN_ESR_STER_MASK 0x00000004
101#define XCAN_ESR_FMER_MASK 0x00000002
102#define XCAN_ESR_CRCER_MASK 0x00000001
103#define XCAN_SR_TXFLL_MASK 0x00000400
104#define XCAN_SR_ESTAT_MASK 0x00000180
105#define XCAN_SR_ERRWRN_MASK 0x00000040
106#define XCAN_SR_NORMAL_MASK 0x00000008
107#define XCAN_SR_LBACK_MASK 0x00000002
108#define XCAN_SR_CONFIG_MASK 0x00000001
109#define XCAN_IXR_RXMNF_MASK 0x00020000
110#define XCAN_IXR_TXFEMP_MASK 0x00004000
111#define XCAN_IXR_WKUP_MASK 0x00000800
112#define XCAN_IXR_SLP_MASK 0x00000400
113#define XCAN_IXR_BSOFF_MASK 0x00000200
114#define XCAN_IXR_ERROR_MASK 0x00000100
115#define XCAN_IXR_RXNEMP_MASK 0x00000080
116#define XCAN_IXR_RXOFLW_MASK 0x00000040
117#define XCAN_IXR_RXOK_MASK 0x00000010
118#define XCAN_IXR_TXFLL_MASK 0x00000004
119#define XCAN_IXR_TXOK_MASK 0x00000002
120#define XCAN_IXR_ARBLST_MASK 0x00000001
121#define XCAN_IDR_ID1_MASK 0xFFE00000
122#define XCAN_IDR_SRR_MASK 0x00100000
123#define XCAN_IDR_IDE_MASK 0x00080000
124#define XCAN_IDR_ID2_MASK 0x0007FFFE
125#define XCAN_IDR_RTR_MASK 0x00000001
126#define XCAN_DLCR_DLC_MASK 0xF0000000
127#define XCAN_FSR_FL_MASK 0x00003F00
128#define XCAN_2_FSR_FL_MASK 0x00007F00
129#define XCAN_FSR_IRI_MASK 0x00000080
130#define XCAN_FSR_RI_MASK 0x0000001F
131#define XCAN_2_FSR_RI_MASK 0x0000003F
132#define XCAN_DLCR_EDL_MASK 0x08000000
133#define XCAN_DLCR_BRS_MASK 0x04000000
134
135
136#define XCAN_BTR_SJW_SHIFT 7
137#define XCAN_BTR_TS2_SHIFT 4
138#define XCAN_BTR_SJW_SHIFT_CANFD 16
139#define XCAN_BTR_TS2_SHIFT_CANFD 8
140#define XCAN_IDR_ID1_SHIFT 21
141#define XCAN_IDR_ID2_SHIFT 1
142#define XCAN_DLCR_DLC_SHIFT 28
143#define XCAN_ESR_REC_SHIFT 8
144
145
146#define XCAN_FRAME_MAX_DATA_LEN 8
147#define XCANFD_DW_BYTES 4
148#define XCAN_TIMEOUT (1 * HZ)
149
150
151#define XCAN_FLAG_TXFEMP 0x0001
152
153#define XCAN_FLAG_RXMNF 0x0002
154
155#define XCAN_FLAG_EXT_FILTERS 0x0004
156
157#define XCAN_FLAG_TX_MAILBOXES 0x0008
158
159
160
161#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
162#define XCAN_FLAG_CANFD_2 0x0020
163
164enum xcan_ip_type {
165 XAXI_CAN = 0,
166 XZYNQ_CANPS,
167 XAXI_CANFD,
168 XAXI_CANFD_2_0,
169};
170
171struct xcan_devtype_data {
172 enum xcan_ip_type cantype;
173 unsigned int flags;
174 const struct can_bittiming_const *bittiming_const;
175 const char *bus_clk_name;
176 unsigned int btr_ts2_shift;
177 unsigned int btr_sjw_shift;
178};
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197struct xcan_priv {
198 struct can_priv can;
199 spinlock_t tx_lock;
200 unsigned int tx_head;
201 unsigned int tx_tail;
202 unsigned int tx_max;
203 struct napi_struct napi;
204 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
205 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
206 u32 val);
207 struct device *dev;
208 void __iomem *reg_base;
209 unsigned long irq_flags;
210 struct clk *bus_clk;
211 struct clk *can_clk;
212 struct xcan_devtype_data devtype;
213};
214
215
216static const struct can_bittiming_const xcan_bittiming_const = {
217 .name = DRIVER_NAME,
218 .tseg1_min = 1,
219 .tseg1_max = 16,
220 .tseg2_min = 1,
221 .tseg2_max = 8,
222 .sjw_max = 4,
223 .brp_min = 1,
224 .brp_max = 256,
225 .brp_inc = 1,
226};
227
228
229static const struct can_bittiming_const xcan_bittiming_const_canfd = {
230 .name = DRIVER_NAME,
231 .tseg1_min = 1,
232 .tseg1_max = 64,
233 .tseg2_min = 1,
234 .tseg2_max = 16,
235 .sjw_max = 16,
236 .brp_min = 1,
237 .brp_max = 256,
238 .brp_inc = 1,
239};
240
241
242static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
243 .name = DRIVER_NAME,
244 .tseg1_min = 1,
245 .tseg1_max = 16,
246 .tseg2_min = 1,
247 .tseg2_max = 8,
248 .sjw_max = 8,
249 .brp_min = 1,
250 .brp_max = 256,
251 .brp_inc = 1,
252};
253
254
255static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
256 .name = DRIVER_NAME,
257 .tseg1_min = 1,
258 .tseg1_max = 256,
259 .tseg2_min = 1,
260 .tseg2_max = 128,
261 .sjw_max = 128,
262 .brp_min = 1,
263 .brp_max = 256,
264 .brp_inc = 1,
265};
266
267
268static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
269 .name = DRIVER_NAME,
270 .tseg1_min = 1,
271 .tseg1_max = 32,
272 .tseg2_min = 1,
273 .tseg2_max = 16,
274 .sjw_max = 16,
275 .brp_min = 1,
276 .brp_max = 256,
277 .brp_inc = 1,
278};
279
280
281
282
283
284
285
286
287
288static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
289 u32 val)
290{
291 iowrite32(val, priv->reg_base + reg);
292}
293
294
295
296
297
298
299
300
301
302static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
303{
304 return ioread32(priv->reg_base + reg);
305}
306
307
308
309
310
311
312
313
314
315static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
316 u32 val)
317{
318 iowrite32be(val, priv->reg_base + reg);
319}
320
321
322
323
324
325
326
327
328
329static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
330{
331 return ioread32be(priv->reg_base + reg);
332}
333
334
335
336
337
338
339
340static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
341{
342
343
344
345 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
346 return XCAN_IXR_RXOK_MASK;
347 else
348 return XCAN_IXR_RXNEMP_MASK;
349}
350
351
352
353
354
355
356
357
358
359
360static int set_reset_mode(struct net_device *ndev)
361{
362 struct xcan_priv *priv = netdev_priv(ndev);
363 unsigned long timeout;
364
365 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
366
367 timeout = jiffies + XCAN_TIMEOUT;
368 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
369 if (time_after(jiffies, timeout)) {
370 netdev_warn(ndev, "timed out for config mode\n");
371 return -ETIMEDOUT;
372 }
373 usleep_range(500, 10000);
374 }
375
376
377 priv->tx_head = 0;
378 priv->tx_tail = 0;
379
380 return 0;
381}
382
383
384
385
386
387
388
389
390static int xcan_set_bittiming(struct net_device *ndev)
391{
392 struct xcan_priv *priv = netdev_priv(ndev);
393 struct can_bittiming *bt = &priv->can.bittiming;
394 struct can_bittiming *dbt = &priv->can.data_bittiming;
395 u32 btr0, btr1;
396 u32 is_config_mode;
397
398
399
400
401 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
402 XCAN_SR_CONFIG_MASK;
403 if (!is_config_mode) {
404 netdev_alert(ndev,
405 "BUG! Cannot set bittiming - CAN is not in config mode\n");
406 return -EPERM;
407 }
408
409
410 btr0 = (bt->brp - 1);
411
412
413 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
414
415
416 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
417
418
419 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
420
421 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
422 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
423
424 if (priv->devtype.cantype == XAXI_CANFD ||
425 priv->devtype.cantype == XAXI_CANFD_2_0) {
426
427 btr0 = dbt->brp - 1;
428
429
430 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
431
432
433 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
434
435
436 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
437
438 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
439 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
440 }
441
442 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
443 priv->read_reg(priv, XCAN_BRPR_OFFSET),
444 priv->read_reg(priv, XCAN_BTR_OFFSET));
445
446 return 0;
447}
448
449
450
451
452
453
454
455
456
457
458
459static int xcan_chip_start(struct net_device *ndev)
460{
461 struct xcan_priv *priv = netdev_priv(ndev);
462 u32 reg_msr;
463 int err;
464 u32 ier;
465
466
467 err = set_reset_mode(ndev);
468 if (err < 0)
469 return err;
470
471 err = xcan_set_bittiming(ndev);
472 if (err < 0)
473 return err;
474
475
476
477
478
479
480
481
482 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
483 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
484 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
485 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
486
487 if (priv->devtype.flags & XCAN_FLAG_RXMNF)
488 ier |= XCAN_IXR_RXMNF_MASK;
489
490 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
491
492
493 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
494 reg_msr = XCAN_MSR_LBACK_MASK;
495 else
496 reg_msr = 0x0;
497
498
499
500
501 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
502 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
503
504 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
505 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
506
507 netdev_dbg(ndev, "status:#x%08x\n",
508 priv->read_reg(priv, XCAN_SR_OFFSET));
509
510 priv->can.state = CAN_STATE_ERROR_ACTIVE;
511 return 0;
512}
513
514
515
516
517
518
519
520
521
522
523
524static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
525{
526 int ret;
527
528 switch (mode) {
529 case CAN_MODE_START:
530 ret = xcan_chip_start(ndev);
531 if (ret < 0) {
532 netdev_err(ndev, "xcan_chip_start failed!\n");
533 return ret;
534 }
535 netif_wake_queue(ndev);
536 break;
537 default:
538 ret = -EOPNOTSUPP;
539 break;
540 }
541
542 return ret;
543}
544
545
546
547
548
549
550
551static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
552 int frame_offset)
553{
554 u32 id, dlc, data[2] = {0, 0};
555 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
556 u32 ramoff, dwindex = 0, i;
557 struct xcan_priv *priv = netdev_priv(ndev);
558
559
560 if (cf->can_id & CAN_EFF_FLAG) {
561
562 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
563 XCAN_IDR_ID2_MASK;
564 id |= (((cf->can_id & CAN_EFF_MASK) >>
565 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
566 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
567
568
569
570
571 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
572
573 if (cf->can_id & CAN_RTR_FLAG)
574
575 id |= XCAN_IDR_RTR_MASK;
576 } else {
577
578 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
579 XCAN_IDR_ID1_MASK;
580
581 if (cf->can_id & CAN_RTR_FLAG)
582
583 id |= XCAN_IDR_SRR_MASK;
584 }
585
586 dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
587 if (can_is_canfd_skb(skb)) {
588 if (cf->flags & CANFD_BRS)
589 dlc |= XCAN_DLCR_BRS_MASK;
590 dlc |= XCAN_DLCR_EDL_MASK;
591 }
592
593 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
594 (priv->devtype.flags & XCAN_FLAG_TXFEMP))
595 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
596 else
597 can_put_echo_skb(skb, ndev, 0);
598
599 priv->tx_head++;
600
601 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
602
603
604
605 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
606 if (priv->devtype.cantype == XAXI_CANFD ||
607 priv->devtype.cantype == XAXI_CANFD_2_0) {
608 for (i = 0; i < cf->len; i += 4) {
609 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
610 (dwindex * XCANFD_DW_BYTES);
611 priv->write_reg(priv, ramoff,
612 be32_to_cpup((__be32 *)(cf->data + i)));
613 dwindex++;
614 }
615 } else {
616 if (cf->len > 0)
617 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
618 if (cf->len > 4)
619 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
620
621 if (!(cf->can_id & CAN_RTR_FLAG)) {
622 priv->write_reg(priv,
623 XCAN_FRAME_DW1_OFFSET(frame_offset),
624 data[0]);
625
626
627
628 priv->write_reg(priv,
629 XCAN_FRAME_DW2_OFFSET(frame_offset),
630 data[1]);
631 }
632 }
633}
634
635
636
637
638
639
640
641
642static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
643{
644 struct xcan_priv *priv = netdev_priv(ndev);
645 unsigned long flags;
646
647
648 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
649 XCAN_SR_TXFLL_MASK))
650 return -ENOSPC;
651
652 spin_lock_irqsave(&priv->tx_lock, flags);
653
654 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
655
656
657 if (priv->tx_max > 1)
658 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
659
660
661 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
662 netif_stop_queue(ndev);
663
664 spin_unlock_irqrestore(&priv->tx_lock, flags);
665
666 return 0;
667}
668
669
670
671
672
673
674
675
676static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
677{
678 struct xcan_priv *priv = netdev_priv(ndev);
679 unsigned long flags;
680
681 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
682 BIT(XCAN_TX_MAILBOX_IDX)))
683 return -ENOSPC;
684
685 spin_lock_irqsave(&priv->tx_lock, flags);
686
687 xcan_write_frame(ndev, skb,
688 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
689
690
691 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
692
693 netif_stop_queue(ndev);
694
695 spin_unlock_irqrestore(&priv->tx_lock, flags);
696
697 return 0;
698}
699
700
701
702
703
704
705
706
707
708
709static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
710{
711 struct xcan_priv *priv = netdev_priv(ndev);
712 int ret;
713
714 if (can_dropped_invalid_skb(ndev, skb))
715 return NETDEV_TX_OK;
716
717 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
718 ret = xcan_start_xmit_mailbox(skb, ndev);
719 else
720 ret = xcan_start_xmit_fifo(skb, ndev);
721
722 if (ret < 0) {
723 netdev_err(ndev, "BUG!, TX full when queue awake!\n");
724 netif_stop_queue(ndev);
725 return NETDEV_TX_BUSY;
726 }
727
728 return NETDEV_TX_OK;
729}
730
731
732
733
734
735
736
737
738
739
740
741
742static int xcan_rx(struct net_device *ndev, int frame_base)
743{
744 struct xcan_priv *priv = netdev_priv(ndev);
745 struct net_device_stats *stats = &ndev->stats;
746 struct can_frame *cf;
747 struct sk_buff *skb;
748 u32 id_xcan, dlc, data[2] = {0, 0};
749
750 skb = alloc_can_skb(ndev, &cf);
751 if (unlikely(!skb)) {
752 stats->rx_dropped++;
753 return 0;
754 }
755
756
757 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
758 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
759 XCAN_DLCR_DLC_SHIFT;
760
761
762 cf->can_dlc = get_can_dlc(dlc);
763
764
765 if (id_xcan & XCAN_IDR_IDE_MASK) {
766
767 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
768 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
769 XCAN_IDR_ID2_SHIFT;
770 cf->can_id |= CAN_EFF_FLAG;
771 if (id_xcan & XCAN_IDR_RTR_MASK)
772 cf->can_id |= CAN_RTR_FLAG;
773 } else {
774
775 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
776 XCAN_IDR_ID1_SHIFT;
777 if (id_xcan & XCAN_IDR_SRR_MASK)
778 cf->can_id |= CAN_RTR_FLAG;
779 }
780
781
782 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
783 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
784
785 if (!(cf->can_id & CAN_RTR_FLAG)) {
786
787 if (cf->can_dlc > 0)
788 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
789 if (cf->can_dlc > 4)
790 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
791 }
792
793 stats->rx_bytes += cf->can_dlc;
794 stats->rx_packets++;
795 netif_receive_skb(skb);
796
797 return 1;
798}
799
800
801
802
803
804
805
806
807
808
809
810
811static int xcanfd_rx(struct net_device *ndev, int frame_base)
812{
813 struct xcan_priv *priv = netdev_priv(ndev);
814 struct net_device_stats *stats = &ndev->stats;
815 struct canfd_frame *cf;
816 struct sk_buff *skb;
817 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
818
819 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
820 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
821 if (dlc & XCAN_DLCR_EDL_MASK)
822 skb = alloc_canfd_skb(ndev, &cf);
823 else
824 skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
825
826 if (unlikely(!skb)) {
827 stats->rx_dropped++;
828 return 0;
829 }
830
831
832
833
834 if (dlc & XCAN_DLCR_EDL_MASK)
835 cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
836 XCAN_DLCR_DLC_SHIFT);
837 else
838 cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
839 XCAN_DLCR_DLC_SHIFT);
840
841
842 if (id_xcan & XCAN_IDR_IDE_MASK) {
843
844 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
845 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
846 XCAN_IDR_ID2_SHIFT;
847 cf->can_id |= CAN_EFF_FLAG;
848 if (id_xcan & XCAN_IDR_RTR_MASK)
849 cf->can_id |= CAN_RTR_FLAG;
850 } else {
851
852 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
853 XCAN_IDR_ID1_SHIFT;
854 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
855 XCAN_IDR_SRR_MASK))
856 cf->can_id |= CAN_RTR_FLAG;
857 }
858
859
860 if (dlc & XCAN_DLCR_EDL_MASK) {
861 for (i = 0; i < cf->len; i += 4) {
862 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
863 (dwindex * XCANFD_DW_BYTES);
864 data[0] = priv->read_reg(priv, dw_offset);
865 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
866 dwindex++;
867 }
868 } else {
869 for (i = 0; i < cf->len; i += 4) {
870 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
871 data[0] = priv->read_reg(priv, dw_offset + i);
872 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
873 }
874 }
875 stats->rx_bytes += cf->len;
876 stats->rx_packets++;
877 netif_receive_skb(skb);
878
879 return 1;
880}
881
882
883
884
885
886
887
888
889
890
891
892
893static enum can_state xcan_current_error_state(struct net_device *ndev)
894{
895 struct xcan_priv *priv = netdev_priv(ndev);
896 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
897
898 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
899 return CAN_STATE_ERROR_PASSIVE;
900 else if (status & XCAN_SR_ERRWRN_MASK)
901 return CAN_STATE_ERROR_WARNING;
902 else
903 return CAN_STATE_ERROR_ACTIVE;
904}
905
906
907
908
909
910
911
912
913
914
915static void xcan_set_error_state(struct net_device *ndev,
916 enum can_state new_state,
917 struct can_frame *cf)
918{
919 struct xcan_priv *priv = netdev_priv(ndev);
920 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
921 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
922 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
923 enum can_state tx_state = txerr >= rxerr ? new_state : 0;
924 enum can_state rx_state = txerr <= rxerr ? new_state : 0;
925
926
927 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
928 return;
929
930 can_change_state(ndev, cf, tx_state, rx_state);
931
932 if (cf) {
933 cf->data[6] = txerr;
934 cf->data[7] = rxerr;
935 }
936}
937
938
939
940
941
942
943
944
945
946static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
947{
948 struct xcan_priv *priv = netdev_priv(ndev);
949 enum can_state old_state = priv->can.state;
950 enum can_state new_state;
951
952
953
954
955 if (old_state != CAN_STATE_ERROR_WARNING &&
956 old_state != CAN_STATE_ERROR_PASSIVE)
957 return;
958
959 new_state = xcan_current_error_state(ndev);
960
961 if (new_state != old_state) {
962 struct sk_buff *skb;
963 struct can_frame *cf;
964
965 skb = alloc_can_err_skb(ndev, &cf);
966
967 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
968
969 if (skb) {
970 struct net_device_stats *stats = &ndev->stats;
971
972 stats->rx_packets++;
973 stats->rx_bytes += cf->can_dlc;
974 netif_rx(skb);
975 }
976 }
977}
978
979
980
981
982
983
984
985
986
987
988static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
989{
990 struct xcan_priv *priv = netdev_priv(ndev);
991 struct net_device_stats *stats = &ndev->stats;
992 struct can_frame cf = { };
993 u32 err_status;
994
995 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
996 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
997
998 if (isr & XCAN_IXR_BSOFF_MASK) {
999 priv->can.state = CAN_STATE_BUS_OFF;
1000 priv->can.can_stats.bus_off++;
1001
1002 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1003 can_bus_off(ndev);
1004 cf.can_id |= CAN_ERR_BUSOFF;
1005 } else {
1006 enum can_state new_state = xcan_current_error_state(ndev);
1007
1008 if (new_state != priv->can.state)
1009 xcan_set_error_state(ndev, new_state, &cf);
1010 }
1011
1012
1013 if (isr & XCAN_IXR_ARBLST_MASK) {
1014 priv->can.can_stats.arbitration_lost++;
1015 cf.can_id |= CAN_ERR_LOSTARB;
1016 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1017 }
1018
1019
1020 if (isr & XCAN_IXR_RXOFLW_MASK) {
1021 stats->rx_over_errors++;
1022 stats->rx_errors++;
1023 cf.can_id |= CAN_ERR_CRTL;
1024 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1025 }
1026
1027
1028 if (isr & XCAN_IXR_RXMNF_MASK) {
1029 stats->rx_dropped++;
1030 stats->rx_errors++;
1031 netdev_err(ndev, "RX match not finished, frame discarded\n");
1032 cf.can_id |= CAN_ERR_CRTL;
1033 cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1034 }
1035
1036
1037 if (isr & XCAN_IXR_ERROR_MASK) {
1038 bool berr_reporting = false;
1039
1040 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1041 berr_reporting = true;
1042 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1043 }
1044
1045
1046 if (err_status & XCAN_ESR_ACKER_MASK) {
1047 stats->tx_errors++;
1048 if (berr_reporting) {
1049 cf.can_id |= CAN_ERR_ACK;
1050 cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1051 }
1052 }
1053
1054
1055 if (err_status & XCAN_ESR_BERR_MASK) {
1056 stats->tx_errors++;
1057 if (berr_reporting) {
1058 cf.can_id |= CAN_ERR_PROT;
1059 cf.data[2] = CAN_ERR_PROT_BIT;
1060 }
1061 }
1062
1063
1064 if (err_status & XCAN_ESR_STER_MASK) {
1065 stats->rx_errors++;
1066 if (berr_reporting) {
1067 cf.can_id |= CAN_ERR_PROT;
1068 cf.data[2] = CAN_ERR_PROT_STUFF;
1069 }
1070 }
1071
1072
1073 if (err_status & XCAN_ESR_FMER_MASK) {
1074 stats->rx_errors++;
1075 if (berr_reporting) {
1076 cf.can_id |= CAN_ERR_PROT;
1077 cf.data[2] = CAN_ERR_PROT_FORM;
1078 }
1079 }
1080
1081
1082 if (err_status & XCAN_ESR_CRCER_MASK) {
1083 stats->rx_errors++;
1084 if (berr_reporting) {
1085 cf.can_id |= CAN_ERR_PROT;
1086 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1087 }
1088 }
1089 priv->can.can_stats.bus_error++;
1090 }
1091
1092 if (cf.can_id) {
1093 struct can_frame *skb_cf;
1094 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1095
1096 if (skb) {
1097 skb_cf->can_id |= cf.can_id;
1098 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1099 stats->rx_packets++;
1100 stats->rx_bytes += CAN_ERR_DLC;
1101 netif_rx(skb);
1102 }
1103 }
1104
1105 netdev_dbg(ndev, "%s: error status register:0x%x\n",
1106 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1118{
1119 struct xcan_priv *priv = netdev_priv(ndev);
1120
1121
1122 if (isr & XCAN_IXR_SLP_MASK)
1123 priv->can.state = CAN_STATE_SLEEPING;
1124
1125
1126 if (isr & XCAN_IXR_WKUP_MASK)
1127 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1128}
1129
1130
1131
1132
1133
1134
1135
1136static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1137{
1138 int offset;
1139
1140 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1141 u32 fsr, mask;
1142
1143
1144
1145
1146 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1147
1148 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1149
1150
1151 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1152 mask = XCAN_2_FSR_FL_MASK;
1153 else
1154 mask = XCAN_FSR_FL_MASK;
1155
1156 if (!(fsr & mask))
1157 return -ENOENT;
1158
1159 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1160 offset =
1161 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1162 else
1163 offset =
1164 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1165
1166 } else {
1167
1168 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1169 XCAN_IXR_RXNEMP_MASK))
1170 return -ENOENT;
1171
1172
1173 offset = XCAN_RXFIFO_OFFSET;
1174 }
1175
1176 return offset;
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static int xcan_rx_poll(struct napi_struct *napi, int quota)
1190{
1191 struct net_device *ndev = napi->dev;
1192 struct xcan_priv *priv = netdev_priv(ndev);
1193 u32 ier;
1194 int work_done = 0;
1195 int frame_offset;
1196
1197 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1198 (work_done < quota)) {
1199 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1200 work_done += xcanfd_rx(ndev, frame_offset);
1201 else
1202 work_done += xcan_rx(ndev, frame_offset);
1203
1204 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1205
1206 priv->write_reg(priv, XCAN_FSR_OFFSET,
1207 XCAN_FSR_IRI_MASK);
1208 else
1209
1210
1211
1212 priv->write_reg(priv, XCAN_ICR_OFFSET,
1213 XCAN_IXR_RXNEMP_MASK);
1214 }
1215
1216 if (work_done) {
1217 can_led_event(ndev, CAN_LED_EVENT_RX);
1218 xcan_update_error_state_after_rxtx(ndev);
1219 }
1220
1221 if (work_done < quota) {
1222 napi_complete_done(napi, work_done);
1223 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1224 ier |= xcan_rx_int_mask(priv);
1225 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1226 }
1227 return work_done;
1228}
1229
1230
1231
1232
1233
1234
1235static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1236{
1237 struct xcan_priv *priv = netdev_priv(ndev);
1238 struct net_device_stats *stats = &ndev->stats;
1239 unsigned int frames_in_fifo;
1240 int frames_sent = 1;
1241 unsigned long flags;
1242 int retries = 0;
1243
1244
1245
1246
1247
1248
1249
1250 spin_lock_irqsave(&priv->tx_lock, flags);
1251
1252 frames_in_fifo = priv->tx_head - priv->tx_tail;
1253
1254 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1255
1256 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1257 spin_unlock_irqrestore(&priv->tx_lock, flags);
1258 return;
1259 }
1260
1261
1262
1263
1264 if (frames_in_fifo > 1) {
1265 WARN_ON(frames_in_fifo > priv->tx_max);
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277 while ((isr & XCAN_IXR_TXOK_MASK) &&
1278 !WARN_ON(++retries == 100)) {
1279 priv->write_reg(priv, XCAN_ICR_OFFSET,
1280 XCAN_IXR_TXOK_MASK);
1281 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1282 }
1283
1284 if (isr & XCAN_IXR_TXFEMP_MASK) {
1285
1286 frames_sent = frames_in_fifo;
1287 }
1288 } else {
1289
1290 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1291 }
1292
1293 while (frames_sent--) {
1294 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1295 priv->tx_max);
1296 priv->tx_tail++;
1297 stats->tx_packets++;
1298 }
1299
1300 netif_wake_queue(ndev);
1301
1302 spin_unlock_irqrestore(&priv->tx_lock, flags);
1303
1304 can_led_event(ndev, CAN_LED_EVENT_TX);
1305 xcan_update_error_state_after_rxtx(ndev);
1306}
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1320{
1321 struct net_device *ndev = (struct net_device *)dev_id;
1322 struct xcan_priv *priv = netdev_priv(ndev);
1323 u32 isr, ier;
1324 u32 isr_errors;
1325 u32 rx_int_mask = xcan_rx_int_mask(priv);
1326
1327
1328 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1329 if (!isr)
1330 return IRQ_NONE;
1331
1332
1333 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1334 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1335 XCAN_IXR_WKUP_MASK));
1336 xcan_state_interrupt(ndev, isr);
1337 }
1338
1339
1340 if (isr & XCAN_IXR_TXOK_MASK)
1341 xcan_tx_interrupt(ndev, isr);
1342
1343
1344 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1345 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1346 XCAN_IXR_RXMNF_MASK);
1347 if (isr_errors) {
1348 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1349 xcan_err_interrupt(ndev, isr);
1350 }
1351
1352
1353 if (isr & rx_int_mask) {
1354 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1355 ier &= ~rx_int_mask;
1356 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1357 napi_schedule(&priv->napi);
1358 }
1359 return IRQ_HANDLED;
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369static void xcan_chip_stop(struct net_device *ndev)
1370{
1371 struct xcan_priv *priv = netdev_priv(ndev);
1372
1373
1374 set_reset_mode(ndev);
1375 priv->can.state = CAN_STATE_STOPPED;
1376}
1377
1378
1379
1380
1381
1382
1383
1384
1385static int xcan_open(struct net_device *ndev)
1386{
1387 struct xcan_priv *priv = netdev_priv(ndev);
1388 int ret;
1389
1390 ret = pm_runtime_get_sync(priv->dev);
1391 if (ret < 0) {
1392 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1393 __func__, ret);
1394 return ret;
1395 }
1396
1397 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1398 ndev->name, ndev);
1399 if (ret < 0) {
1400 netdev_err(ndev, "irq allocation for CAN failed\n");
1401 goto err;
1402 }
1403
1404
1405 ret = set_reset_mode(ndev);
1406 if (ret < 0) {
1407 netdev_err(ndev, "mode resetting failed!\n");
1408 goto err_irq;
1409 }
1410
1411
1412 ret = open_candev(ndev);
1413 if (ret)
1414 goto err_irq;
1415
1416 ret = xcan_chip_start(ndev);
1417 if (ret < 0) {
1418 netdev_err(ndev, "xcan_chip_start failed!\n");
1419 goto err_candev;
1420 }
1421
1422 can_led_event(ndev, CAN_LED_EVENT_OPEN);
1423 napi_enable(&priv->napi);
1424 netif_start_queue(ndev);
1425
1426 return 0;
1427
1428err_candev:
1429 close_candev(ndev);
1430err_irq:
1431 free_irq(ndev->irq, ndev);
1432err:
1433 pm_runtime_put(priv->dev);
1434
1435 return ret;
1436}
1437
1438
1439
1440
1441
1442
1443
1444static int xcan_close(struct net_device *ndev)
1445{
1446 struct xcan_priv *priv = netdev_priv(ndev);
1447
1448 netif_stop_queue(ndev);
1449 napi_disable(&priv->napi);
1450 xcan_chip_stop(ndev);
1451 free_irq(ndev->irq, ndev);
1452 close_candev(ndev);
1453
1454 can_led_event(ndev, CAN_LED_EVENT_STOP);
1455 pm_runtime_put(priv->dev);
1456
1457 return 0;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468static int xcan_get_berr_counter(const struct net_device *ndev,
1469 struct can_berr_counter *bec)
1470{
1471 struct xcan_priv *priv = netdev_priv(ndev);
1472 int ret;
1473
1474 ret = pm_runtime_get_sync(priv->dev);
1475 if (ret < 0) {
1476 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1477 __func__, ret);
1478 return ret;
1479 }
1480
1481 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1482 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1483 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1484
1485 pm_runtime_put(priv->dev);
1486
1487 return 0;
1488}
1489
1490static const struct net_device_ops xcan_netdev_ops = {
1491 .ndo_open = xcan_open,
1492 .ndo_stop = xcan_close,
1493 .ndo_start_xmit = xcan_start_xmit,
1494 .ndo_change_mtu = can_change_mtu,
1495};
1496
1497
1498
1499
1500
1501
1502
1503
1504static int __maybe_unused xcan_suspend(struct device *dev)
1505{
1506 struct net_device *ndev = dev_get_drvdata(dev);
1507
1508 if (netif_running(ndev)) {
1509 netif_stop_queue(ndev);
1510 netif_device_detach(ndev);
1511 xcan_chip_stop(ndev);
1512 }
1513
1514 return pm_runtime_force_suspend(dev);
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524static int __maybe_unused xcan_resume(struct device *dev)
1525{
1526 struct net_device *ndev = dev_get_drvdata(dev);
1527 int ret;
1528
1529 ret = pm_runtime_force_resume(dev);
1530 if (ret) {
1531 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1532 return ret;
1533 }
1534
1535 if (netif_running(ndev)) {
1536 ret = xcan_chip_start(ndev);
1537 if (ret) {
1538 dev_err(dev, "xcan_chip_start failed on resume\n");
1539 return ret;
1540 }
1541
1542 netif_device_attach(ndev);
1543 netif_start_queue(ndev);
1544 }
1545
1546 return 0;
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1557{
1558 struct net_device *ndev = dev_get_drvdata(dev);
1559 struct xcan_priv *priv = netdev_priv(ndev);
1560
1561 clk_disable_unprepare(priv->bus_clk);
1562 clk_disable_unprepare(priv->can_clk);
1563
1564 return 0;
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574static int __maybe_unused xcan_runtime_resume(struct device *dev)
1575{
1576 struct net_device *ndev = dev_get_drvdata(dev);
1577 struct xcan_priv *priv = netdev_priv(ndev);
1578 int ret;
1579
1580 ret = clk_prepare_enable(priv->bus_clk);
1581 if (ret) {
1582 dev_err(dev, "Cannot enable clock.\n");
1583 return ret;
1584 }
1585 ret = clk_prepare_enable(priv->can_clk);
1586 if (ret) {
1587 dev_err(dev, "Cannot enable clock.\n");
1588 clk_disable_unprepare(priv->bus_clk);
1589 return ret;
1590 }
1591
1592 return 0;
1593}
1594
1595static const struct dev_pm_ops xcan_dev_pm_ops = {
1596 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1597 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1598};
1599
1600static const struct xcan_devtype_data xcan_zynq_data = {
1601 .cantype = XZYNQ_CANPS,
1602 .flags = XCAN_FLAG_TXFEMP,
1603 .bittiming_const = &xcan_bittiming_const,
1604 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1605 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1606 .bus_clk_name = "pclk",
1607};
1608
1609static const struct xcan_devtype_data xcan_axi_data = {
1610 .cantype = XAXI_CAN,
1611 .bittiming_const = &xcan_bittiming_const,
1612 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1613 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1614 .bus_clk_name = "s_axi_aclk",
1615};
1616
1617static const struct xcan_devtype_data xcan_canfd_data = {
1618 .cantype = XAXI_CANFD,
1619 .flags = XCAN_FLAG_EXT_FILTERS |
1620 XCAN_FLAG_RXMNF |
1621 XCAN_FLAG_TX_MAILBOXES |
1622 XCAN_FLAG_RX_FIFO_MULTI,
1623 .bittiming_const = &xcan_bittiming_const_canfd,
1624 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1625 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1626 .bus_clk_name = "s_axi_aclk",
1627};
1628
1629static const struct xcan_devtype_data xcan_canfd2_data = {
1630 .cantype = XAXI_CANFD_2_0,
1631 .flags = XCAN_FLAG_EXT_FILTERS |
1632 XCAN_FLAG_RXMNF |
1633 XCAN_FLAG_TX_MAILBOXES |
1634 XCAN_FLAG_CANFD_2 |
1635 XCAN_FLAG_RX_FIFO_MULTI,
1636 .bittiming_const = &xcan_bittiming_const_canfd2,
1637 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1638 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1639 .bus_clk_name = "s_axi_aclk",
1640};
1641
1642
1643static const struct of_device_id xcan_of_match[] = {
1644 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1645 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1646 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1647 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1648 { },
1649};
1650MODULE_DEVICE_TABLE(of, xcan_of_match);
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661static int xcan_probe(struct platform_device *pdev)
1662{
1663 struct net_device *ndev;
1664 struct xcan_priv *priv;
1665 const struct of_device_id *of_id;
1666 const struct xcan_devtype_data *devtype = &xcan_axi_data;
1667 void __iomem *addr;
1668 int ret;
1669 int rx_max, tx_max;
1670 int hw_tx_max, hw_rx_max;
1671 const char *hw_tx_max_property;
1672
1673
1674 addr = devm_platform_ioremap_resource(pdev, 0);
1675 if (IS_ERR(addr)) {
1676 ret = PTR_ERR(addr);
1677 goto err;
1678 }
1679
1680 of_id = of_match_device(xcan_of_match, &pdev->dev);
1681 if (of_id && of_id->data)
1682 devtype = of_id->data;
1683
1684 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1685 "tx-mailbox-count" : "tx-fifo-depth";
1686
1687 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1688 &hw_tx_max);
1689 if (ret < 0) {
1690 dev_err(&pdev->dev, "missing %s property\n",
1691 hw_tx_max_property);
1692 goto err;
1693 }
1694
1695 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1696 &hw_rx_max);
1697 if (ret < 0) {
1698 dev_err(&pdev->dev,
1699 "missing rx-fifo-depth property (mailbox mode is not supported)\n");
1700 goto err;
1701 }
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1722 (devtype->flags & XCAN_FLAG_TXFEMP))
1723 tx_max = min(hw_tx_max, 2);
1724 else
1725 tx_max = 1;
1726
1727 rx_max = hw_rx_max;
1728
1729
1730 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1731 if (!ndev)
1732 return -ENOMEM;
1733
1734 priv = netdev_priv(ndev);
1735 priv->dev = &pdev->dev;
1736 priv->can.bittiming_const = devtype->bittiming_const;
1737 priv->can.do_set_mode = xcan_do_set_mode;
1738 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1739 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1740 CAN_CTRLMODE_BERR_REPORTING;
1741
1742 if (devtype->cantype == XAXI_CANFD)
1743 priv->can.data_bittiming_const =
1744 &xcan_data_bittiming_const_canfd;
1745
1746 if (devtype->cantype == XAXI_CANFD_2_0)
1747 priv->can.data_bittiming_const =
1748 &xcan_data_bittiming_const_canfd2;
1749
1750 if (devtype->cantype == XAXI_CANFD ||
1751 devtype->cantype == XAXI_CANFD_2_0)
1752 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
1753
1754 priv->reg_base = addr;
1755 priv->tx_max = tx_max;
1756 priv->devtype = *devtype;
1757 spin_lock_init(&priv->tx_lock);
1758
1759
1760 ndev->irq = platform_get_irq(pdev, 0);
1761 ndev->flags |= IFF_ECHO;
1762
1763 platform_set_drvdata(pdev, ndev);
1764 SET_NETDEV_DEV(ndev, &pdev->dev);
1765 ndev->netdev_ops = &xcan_netdev_ops;
1766
1767
1768 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1769 if (IS_ERR(priv->can_clk)) {
1770 if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
1771 dev_err(&pdev->dev, "Device clock not found.\n");
1772 ret = PTR_ERR(priv->can_clk);
1773 goto err_free;
1774 }
1775
1776 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1777 if (IS_ERR(priv->bus_clk)) {
1778 if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER)
1779 dev_err(&pdev->dev, "bus clock not found\n");
1780 ret = PTR_ERR(priv->bus_clk);
1781 goto err_free;
1782 }
1783
1784 priv->write_reg = xcan_write_reg_le;
1785 priv->read_reg = xcan_read_reg_le;
1786
1787 pm_runtime_enable(&pdev->dev);
1788 ret = pm_runtime_get_sync(&pdev->dev);
1789 if (ret < 0) {
1790 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1791 __func__, ret);
1792 goto err_pmdisable;
1793 }
1794
1795 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1796 priv->write_reg = xcan_write_reg_be;
1797 priv->read_reg = xcan_read_reg_be;
1798 }
1799
1800 priv->can.clock.freq = clk_get_rate(priv->can_clk);
1801
1802 netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1803
1804 ret = register_candev(ndev);
1805 if (ret) {
1806 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1807 goto err_disableclks;
1808 }
1809
1810 devm_can_led_init(ndev);
1811
1812 pm_runtime_put(&pdev->dev);
1813
1814 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
1815 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
1816 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
1817 }
1818
1819 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1820 priv->reg_base, ndev->irq, priv->can.clock.freq,
1821 hw_tx_max, priv->tx_max);
1822
1823 return 0;
1824
1825err_disableclks:
1826 pm_runtime_put(priv->dev);
1827err_pmdisable:
1828 pm_runtime_disable(&pdev->dev);
1829err_free:
1830 free_candev(ndev);
1831err:
1832 return ret;
1833}
1834
1835
1836
1837
1838
1839
1840
1841
1842static int xcan_remove(struct platform_device *pdev)
1843{
1844 struct net_device *ndev = platform_get_drvdata(pdev);
1845 struct xcan_priv *priv = netdev_priv(ndev);
1846
1847 unregister_candev(ndev);
1848 pm_runtime_disable(&pdev->dev);
1849 netif_napi_del(&priv->napi);
1850 free_candev(ndev);
1851
1852 return 0;
1853}
1854
1855static struct platform_driver xcan_driver = {
1856 .probe = xcan_probe,
1857 .remove = xcan_remove,
1858 .driver = {
1859 .name = DRIVER_NAME,
1860 .pm = &xcan_dev_pm_ops,
1861 .of_match_table = xcan_of_match,
1862 },
1863};
1864
1865module_platform_driver(xcan_driver);
1866
1867MODULE_LICENSE("GPL");
1868MODULE_AUTHOR("Xilinx Inc");
1869MODULE_DESCRIPTION("Xilinx CAN interface");
1870