1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/clk.h>
25#include <linux/errno.h>
26#include <linux/if_arp.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33#include <linux/skbuff.h>
34#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37
38#include <linux/can.h>
39#include <linux/can/dev.h>
40#include <linux/can/error.h>
41
42#include <mach/board.h>
43
44#define DRV_NAME "at91_can"
45#define AT91_NAPI_WEIGHT 12
46
47
48
49
50
51#define AT91_MB_RX_NUM 12
52#define AT91_MB_TX_SHIFT 2
53
54#define AT91_MB_RX_FIRST 0
55#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
56
57#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
58#define AT91_MB_RX_SPLIT 8
59#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
60#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
61
62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
64#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
65
66#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
67#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
68#define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1)
69#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
70
71
72enum at91_reg {
73 AT91_MR = 0x000,
74 AT91_IER = 0x004,
75 AT91_IDR = 0x008,
76 AT91_IMR = 0x00C,
77 AT91_SR = 0x010,
78 AT91_BR = 0x014,
79 AT91_TIM = 0x018,
80 AT91_TIMESTP = 0x01C,
81 AT91_ECR = 0x020,
82 AT91_TCR = 0x024,
83 AT91_ACR = 0x028,
84};
85
86
87#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
88#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
89#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
90#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
91#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
92#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
93#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
94#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
95
96
97#define AT91_MR_CANEN BIT(0)
98#define AT91_MR_LPM BIT(1)
99#define AT91_MR_ABM BIT(2)
100#define AT91_MR_OVL BIT(3)
101#define AT91_MR_TEOF BIT(4)
102#define AT91_MR_TTM BIT(5)
103#define AT91_MR_TIMFRZ BIT(6)
104#define AT91_MR_DRPT BIT(7)
105
106#define AT91_SR_RBSY BIT(29)
107
108#define AT91_MMR_PRIO_SHIFT (16)
109
110#define AT91_MID_MIDE BIT(29)
111
112#define AT91_MSR_MRTR BIT(20)
113#define AT91_MSR_MABT BIT(22)
114#define AT91_MSR_MRDY BIT(23)
115#define AT91_MSR_MMI BIT(24)
116
117#define AT91_MCR_MRTR BIT(20)
118#define AT91_MCR_MTCR BIT(23)
119
120
121enum at91_mb_mode {
122 AT91_MB_MODE_DISABLED = 0,
123 AT91_MB_MODE_RX = 1,
124 AT91_MB_MODE_RX_OVRWR = 2,
125 AT91_MB_MODE_TX = 3,
126 AT91_MB_MODE_CONSUMER = 4,
127 AT91_MB_MODE_PRODUCER = 5,
128};
129
130
131#define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \
132 - (1 << AT91_MB_RX_FIRST))
133#define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \
134 - (1 << AT91_MB_TX_FIRST))
135#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
136
137#define AT91_IRQ_ERRA (1 << 16)
138#define AT91_IRQ_WARN (1 << 17)
139#define AT91_IRQ_ERRP (1 << 18)
140#define AT91_IRQ_BOFF (1 << 19)
141#define AT91_IRQ_SLEEP (1 << 20)
142#define AT91_IRQ_WAKEUP (1 << 21)
143#define AT91_IRQ_TOVF (1 << 22)
144#define AT91_IRQ_TSTP (1 << 23)
145#define AT91_IRQ_CERR (1 << 24)
146#define AT91_IRQ_SERR (1 << 25)
147#define AT91_IRQ_AERR (1 << 26)
148#define AT91_IRQ_FERR (1 << 27)
149#define AT91_IRQ_BERR (1 << 28)
150
151#define AT91_IRQ_ERR_ALL (0x1fff0000)
152#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
153 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
154#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
155 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
156
157#define AT91_IRQ_ALL (0x1fffffff)
158
159struct at91_priv {
160 struct can_priv can;
161 struct net_device *dev;
162 struct napi_struct napi;
163
164 void __iomem *reg_base;
165
166 u32 reg_sr;
167 unsigned int tx_next;
168 unsigned int tx_echo;
169 unsigned int rx_next;
170
171 struct clk *clk;
172 struct at91_can_data *pdata;
173};
174
175static struct can_bittiming_const at91_bittiming_const = {
176 .tseg1_min = 4,
177 .tseg1_max = 16,
178 .tseg2_min = 2,
179 .tseg2_max = 8,
180 .sjw_max = 4,
181 .brp_min = 2,
182 .brp_max = 128,
183 .brp_inc = 1,
184};
185
186static inline int get_tx_next_mb(const struct at91_priv *priv)
187{
188 return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
189}
190
191static inline int get_tx_next_prio(const struct at91_priv *priv)
192{
193 return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
194}
195
196static inline int get_tx_echo_mb(const struct at91_priv *priv)
197{
198 return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
199}
200
201static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
202{
203 return readl(priv->reg_base + reg);
204}
205
206static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
207 u32 value)
208{
209 writel(value, priv->reg_base + reg);
210}
211
212static inline void set_mb_mode_prio(const struct at91_priv *priv,
213 unsigned int mb, enum at91_mb_mode mode, int prio)
214{
215 at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
216}
217
218static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
219 enum at91_mb_mode mode)
220{
221 set_mb_mode_prio(priv, mb, mode, 0);
222}
223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256
257
258
259static void at91_transceiver_switch(const struct at91_priv *priv, int on)
260{
261 if (priv->pdata && priv->pdata->transceiver_switch)
262 priv->pdata->transceiver_switch(on);
263}
264
265static void at91_setup_mailboxes(struct net_device *dev)
266{
267 struct at91_priv *priv = netdev_priv(dev);
268 unsigned int i;
269
270
271
272
273
274
275 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
276 set_mb_mode(priv, i, AT91_MB_MODE_RX);
277 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
278
279
280 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
281 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
282
283
284 priv->tx_next = priv->tx_echo = priv->rx_next = 0;
285}
286
287static int at91_set_bittiming(struct net_device *dev)
288{
289 const struct at91_priv *priv = netdev_priv(dev);
290 const struct can_bittiming *bt = &priv->can.bittiming;
291 u32 reg_br;
292
293 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
294 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
295 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
296 ((bt->phase_seg2 - 1) << 0);
297
298 dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
299
300 at91_write(priv, AT91_BR, reg_br);
301
302 return 0;
303}
304
305static void at91_chip_start(struct net_device *dev)
306{
307 struct at91_priv *priv = netdev_priv(dev);
308 u32 reg_mr, reg_ier;
309
310
311 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
312
313
314 reg_mr = at91_read(priv, AT91_MR);
315 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
316
317 at91_setup_mailboxes(dev);
318 at91_transceiver_switch(priv, 1);
319
320
321 at91_write(priv, AT91_MR, AT91_MR_CANEN);
322
323 priv->can.state = CAN_STATE_ERROR_ACTIVE;
324
325
326 reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
327 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
328 at91_write(priv, AT91_IER, reg_ier);
329}
330
331static void at91_chip_stop(struct net_device *dev, enum can_state state)
332{
333 struct at91_priv *priv = netdev_priv(dev);
334 u32 reg_mr;
335
336
337 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
338
339 reg_mr = at91_read(priv, AT91_MR);
340 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
341
342 at91_transceiver_switch(priv, 0);
343 priv->can.state = state;
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
370{
371 struct at91_priv *priv = netdev_priv(dev);
372 struct net_device_stats *stats = &dev->stats;
373 struct can_frame *cf = (struct can_frame *)skb->data;
374 unsigned int mb, prio;
375 u32 reg_mid, reg_mcr;
376
377 mb = get_tx_next_mb(priv);
378 prio = get_tx_next_prio(priv);
379
380 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
381 netif_stop_queue(dev);
382
383 dev_err(dev->dev.parent,
384 "BUG! TX buffer full when queue awake!\n");
385 return NETDEV_TX_BUSY;
386 }
387
388 if (cf->can_id & CAN_EFF_FLAG)
389 reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
390 else
391 reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
392
393 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
394 (cf->can_dlc << 16) | AT91_MCR_MTCR;
395
396
397 set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
398 at91_write(priv, AT91_MID(mb), reg_mid);
399 set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
400
401 at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
402 at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
403
404
405 at91_write(priv, AT91_MCR(mb), reg_mcr);
406
407 stats->tx_bytes += cf->can_dlc;
408 dev->trans_start = jiffies;
409
410
411 can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
412
413
414
415
416
417
418
419
420
421 priv->tx_next++;
422 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
423 AT91_MSR_MRDY) ||
424 (priv->tx_next & AT91_NEXT_MASK) == 0)
425 netif_stop_queue(dev);
426
427
428 at91_write(priv, AT91_IER, 1 << mb);
429
430 return NETDEV_TX_OK;
431}
432
433
434
435
436
437
438
439static inline void at91_activate_rx_low(const struct at91_priv *priv)
440{
441 u32 mask = AT91_MB_RX_LOW_MASK;
442 at91_write(priv, AT91_TCR, mask);
443}
444
445
446
447
448
449
450
451
452static inline void at91_activate_rx_mb(const struct at91_priv *priv,
453 unsigned int mb)
454{
455 u32 mask = 1 << mb;
456 at91_write(priv, AT91_TCR, mask);
457}
458
459
460
461
462
463static void at91_rx_overflow_err(struct net_device *dev)
464{
465 struct net_device_stats *stats = &dev->stats;
466 struct sk_buff *skb;
467 struct can_frame *cf;
468
469 dev_dbg(dev->dev.parent, "RX buffer overflow\n");
470 stats->rx_over_errors++;
471 stats->rx_errors++;
472
473 skb = alloc_can_err_skb(dev, &cf);
474 if (unlikely(!skb))
475 return;
476
477 cf->can_id |= CAN_ERR_CRTL;
478 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
479 netif_receive_skb(skb);
480
481 stats->rx_packets++;
482 stats->rx_bytes += cf->can_dlc;
483}
484
485
486
487
488
489
490
491
492
493
494static void at91_read_mb(struct net_device *dev, unsigned int mb,
495 struct can_frame *cf)
496{
497 const struct at91_priv *priv = netdev_priv(dev);
498 u32 reg_msr, reg_mid;
499
500 reg_mid = at91_read(priv, AT91_MID(mb));
501 if (reg_mid & AT91_MID_MIDE)
502 cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
503 else
504 cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
505
506 reg_msr = at91_read(priv, AT91_MSR(mb));
507 if (reg_msr & AT91_MSR_MRTR)
508 cf->can_id |= CAN_RTR_FLAG;
509 cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
510
511 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
512 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
513
514 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
515 at91_rx_overflow_err(dev);
516}
517
518
519
520
521
522
523
524
525
526static void at91_read_msg(struct net_device *dev, unsigned int mb)
527{
528 struct net_device_stats *stats = &dev->stats;
529 struct can_frame *cf;
530 struct sk_buff *skb;
531
532 skb = alloc_can_skb(dev, &cf);
533 if (unlikely(!skb)) {
534 stats->rx_dropped++;
535 return;
536 }
537
538 at91_read_mb(dev, mb, cf);
539 netif_receive_skb(skb);
540
541 stats->rx_packets++;
542 stats->rx_bytes += cf->can_dlc;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589static int at91_poll_rx(struct net_device *dev, int quota)
590{
591 struct at91_priv *priv = netdev_priv(dev);
592 u32 reg_sr = at91_read(priv, AT91_SR);
593 const unsigned long *addr = (unsigned long *)®_sr;
594 unsigned int mb;
595 int received = 0;
596
597 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
598 reg_sr & AT91_MB_RX_LOW_MASK)
599 dev_info(dev->dev.parent,
600 "order of incoming frames cannot be guaranteed\n");
601
602 again:
603 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
604 mb < AT91_MB_RX_NUM && quota > 0;
605 reg_sr = at91_read(priv, AT91_SR),
606 mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
607 at91_read_msg(dev, mb);
608
609
610 if (mb == AT91_MB_RX_LOW_LAST)
611
612 at91_activate_rx_low(priv);
613 else if (mb > AT91_MB_RX_LOW_LAST)
614
615 at91_activate_rx_mb(priv, mb);
616
617 received++;
618 quota--;
619 }
620
621
622 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
623 quota > 0 && mb >= AT91_MB_RX_NUM) {
624 priv->rx_next = 0;
625 goto again;
626 }
627
628 return received;
629}
630
631static void at91_poll_err_frame(struct net_device *dev,
632 struct can_frame *cf, u32 reg_sr)
633{
634 struct at91_priv *priv = netdev_priv(dev);
635
636
637 if (reg_sr & AT91_IRQ_CERR) {
638 dev_dbg(dev->dev.parent, "CERR irq\n");
639 dev->stats.rx_errors++;
640 priv->can.can_stats.bus_error++;
641 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
642 }
643
644
645 if (reg_sr & AT91_IRQ_SERR) {
646 dev_dbg(dev->dev.parent, "SERR irq\n");
647 dev->stats.rx_errors++;
648 priv->can.can_stats.bus_error++;
649 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
650 cf->data[2] |= CAN_ERR_PROT_STUFF;
651 }
652
653
654 if (reg_sr & AT91_IRQ_AERR) {
655 dev_dbg(dev->dev.parent, "AERR irq\n");
656 dev->stats.tx_errors++;
657 cf->can_id |= CAN_ERR_ACK;
658 }
659
660
661 if (reg_sr & AT91_IRQ_FERR) {
662 dev_dbg(dev->dev.parent, "FERR irq\n");
663 dev->stats.rx_errors++;
664 priv->can.can_stats.bus_error++;
665 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
666 cf->data[2] |= CAN_ERR_PROT_FORM;
667 }
668
669
670 if (reg_sr & AT91_IRQ_BERR) {
671 dev_dbg(dev->dev.parent, "BERR irq\n");
672 dev->stats.tx_errors++;
673 priv->can.can_stats.bus_error++;
674 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
675 cf->data[2] |= CAN_ERR_PROT_BIT;
676 }
677}
678
679static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
680{
681 struct sk_buff *skb;
682 struct can_frame *cf;
683
684 if (quota == 0)
685 return 0;
686
687 skb = alloc_can_err_skb(dev, &cf);
688 if (unlikely(!skb))
689 return 0;
690
691 at91_poll_err_frame(dev, cf, reg_sr);
692 netif_receive_skb(skb);
693
694 dev->last_rx = jiffies;
695 dev->stats.rx_packets++;
696 dev->stats.rx_bytes += cf->can_dlc;
697
698 return 1;
699}
700
701static int at91_poll(struct napi_struct *napi, int quota)
702{
703 struct net_device *dev = napi->dev;
704 const struct at91_priv *priv = netdev_priv(dev);
705 u32 reg_sr = at91_read(priv, AT91_SR);
706 int work_done = 0;
707
708 if (reg_sr & AT91_IRQ_MB_RX)
709 work_done += at91_poll_rx(dev, quota - work_done);
710
711
712
713
714
715 reg_sr |= priv->reg_sr;
716 if (reg_sr & AT91_IRQ_ERR_FRAME)
717 work_done += at91_poll_err(dev, quota - work_done, reg_sr);
718
719 if (work_done < quota) {
720
721 u32 reg_ier = AT91_IRQ_ERR_FRAME;
722 reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
723
724 napi_complete(napi);
725 at91_write(priv, AT91_IER, reg_ier);
726 }
727
728 return work_done;
729}
730
731
732
733
734
735
736
737
738
739
740
741
742
743static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
744{
745 struct at91_priv *priv = netdev_priv(dev);
746 u32 reg_msr;
747 unsigned int mb;
748
749
750
751 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
752 mb = get_tx_echo_mb(priv);
753
754
755 if (!(reg_sr & (1 << mb)))
756 break;
757
758
759 at91_write(priv, AT91_IDR, 1 << mb);
760
761
762
763
764
765
766
767 reg_msr = at91_read(priv, AT91_MSR(mb));
768 if (likely(reg_msr & AT91_MSR_MRDY &&
769 ~reg_msr & AT91_MSR_MABT)) {
770
771 can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
772 dev->stats.tx_packets++;
773 }
774 }
775
776
777
778
779
780
781 if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
782 (priv->tx_echo & AT91_NEXT_MASK) == 0)
783 netif_wake_queue(dev);
784}
785
786static void at91_irq_err_state(struct net_device *dev,
787 struct can_frame *cf, enum can_state new_state)
788{
789 struct at91_priv *priv = netdev_priv(dev);
790 u32 reg_idr, reg_ier, reg_ecr;
791 u8 tec, rec;
792
793 reg_ecr = at91_read(priv, AT91_ECR);
794 rec = reg_ecr & 0xff;
795 tec = reg_ecr >> 16;
796
797 switch (priv->can.state) {
798 case CAN_STATE_ERROR_ACTIVE:
799
800
801
802
803
804 if (new_state >= CAN_STATE_ERROR_WARNING &&
805 new_state <= CAN_STATE_BUS_OFF) {
806 dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
807 priv->can.can_stats.error_warning++;
808
809 cf->can_id |= CAN_ERR_CRTL;
810 cf->data[1] = (tec > rec) ?
811 CAN_ERR_CRTL_TX_WARNING :
812 CAN_ERR_CRTL_RX_WARNING;
813 }
814 case CAN_STATE_ERROR_WARNING:
815
816
817
818
819
820 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
821 new_state <= CAN_STATE_BUS_OFF) {
822 dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
823 priv->can.can_stats.error_passive++;
824
825 cf->can_id |= CAN_ERR_CRTL;
826 cf->data[1] = (tec > rec) ?
827 CAN_ERR_CRTL_TX_PASSIVE :
828 CAN_ERR_CRTL_RX_PASSIVE;
829 }
830 break;
831 case CAN_STATE_BUS_OFF:
832
833
834
835
836 if (new_state <= CAN_STATE_ERROR_PASSIVE) {
837 cf->can_id |= CAN_ERR_RESTARTED;
838
839 dev_dbg(dev->dev.parent, "restarted\n");
840 priv->can.can_stats.restarts++;
841
842 netif_carrier_on(dev);
843 netif_wake_queue(dev);
844 }
845 break;
846 default:
847 break;
848 }
849
850
851
852 switch (new_state) {
853 case CAN_STATE_ERROR_ACTIVE:
854
855
856
857
858
859
860 dev_dbg(dev->dev.parent, "Error Active\n");
861 cf->can_id |= CAN_ERR_PROT;
862 cf->data[2] = CAN_ERR_PROT_ACTIVE;
863 case CAN_STATE_ERROR_WARNING:
864 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
865 reg_ier = AT91_IRQ_ERRP;
866 break;
867 case CAN_STATE_ERROR_PASSIVE:
868 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
869 reg_ier = AT91_IRQ_BOFF;
870 break;
871 case CAN_STATE_BUS_OFF:
872 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
873 AT91_IRQ_WARN | AT91_IRQ_BOFF;
874 reg_ier = 0;
875
876 cf->can_id |= CAN_ERR_BUSOFF;
877
878 dev_dbg(dev->dev.parent, "bus-off\n");
879 netif_carrier_off(dev);
880 priv->can.can_stats.bus_off++;
881
882
883 if (!priv->can.restart_ms) {
884 at91_chip_stop(dev, CAN_STATE_BUS_OFF);
885 return;
886 }
887 break;
888 default:
889 break;
890 }
891
892 at91_write(priv, AT91_IDR, reg_idr);
893 at91_write(priv, AT91_IER, reg_ier);
894}
895
896static void at91_irq_err(struct net_device *dev)
897{
898 struct at91_priv *priv = netdev_priv(dev);
899 struct sk_buff *skb;
900 struct can_frame *cf;
901 enum can_state new_state;
902 u32 reg_sr;
903
904 reg_sr = at91_read(priv, AT91_SR);
905
906
907 if (unlikely(reg_sr & AT91_IRQ_BOFF))
908 new_state = CAN_STATE_BUS_OFF;
909 else if (unlikely(reg_sr & AT91_IRQ_ERRP))
910 new_state = CAN_STATE_ERROR_PASSIVE;
911 else if (unlikely(reg_sr & AT91_IRQ_WARN))
912 new_state = CAN_STATE_ERROR_WARNING;
913 else if (likely(reg_sr & AT91_IRQ_ERRA))
914 new_state = CAN_STATE_ERROR_ACTIVE;
915 else {
916 dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
917 return;
918 }
919
920
921 if (likely(new_state == priv->can.state))
922 return;
923
924 skb = alloc_can_err_skb(dev, &cf);
925 if (unlikely(!skb))
926 return;
927
928 at91_irq_err_state(dev, cf, new_state);
929 netif_rx(skb);
930
931 dev->last_rx = jiffies;
932 dev->stats.rx_packets++;
933 dev->stats.rx_bytes += cf->can_dlc;
934
935 priv->can.state = new_state;
936}
937
938
939
940
941static irqreturn_t at91_irq(int irq, void *dev_id)
942{
943 struct net_device *dev = dev_id;
944 struct at91_priv *priv = netdev_priv(dev);
945 irqreturn_t handled = IRQ_NONE;
946 u32 reg_sr, reg_imr;
947
948 reg_sr = at91_read(priv, AT91_SR);
949 reg_imr = at91_read(priv, AT91_IMR);
950
951
952 reg_sr &= reg_imr;
953 if (!reg_sr)
954 goto exit;
955
956 handled = IRQ_HANDLED;
957
958
959 if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
960
961
962
963
964 priv->reg_sr = reg_sr;
965 at91_write(priv, AT91_IDR,
966 AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
967 napi_schedule(&priv->napi);
968 }
969
970
971 if (reg_sr & AT91_IRQ_MB_TX)
972 at91_irq_tx(dev, reg_sr);
973
974 at91_irq_err(dev);
975
976 exit:
977 return handled;
978}
979
980static int at91_open(struct net_device *dev)
981{
982 struct at91_priv *priv = netdev_priv(dev);
983 int err;
984
985 clk_enable(priv->clk);
986
987
988 err = open_candev(dev);
989 if (err)
990 goto out;
991
992
993 if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
994 dev->name, dev)) {
995 err = -EAGAIN;
996 goto out_close;
997 }
998
999
1000 at91_chip_start(dev);
1001 napi_enable(&priv->napi);
1002 netif_start_queue(dev);
1003
1004 return 0;
1005
1006 out_close:
1007 close_candev(dev);
1008 out:
1009 clk_disable(priv->clk);
1010
1011 return err;
1012}
1013
1014
1015
1016
1017static int at91_close(struct net_device *dev)
1018{
1019 struct at91_priv *priv = netdev_priv(dev);
1020
1021 netif_stop_queue(dev);
1022 napi_disable(&priv->napi);
1023 at91_chip_stop(dev, CAN_STATE_STOPPED);
1024
1025 free_irq(dev->irq, dev);
1026 clk_disable(priv->clk);
1027
1028 close_candev(dev);
1029
1030 return 0;
1031}
1032
1033static int at91_set_mode(struct net_device *dev, enum can_mode mode)
1034{
1035 switch (mode) {
1036 case CAN_MODE_START:
1037 at91_chip_start(dev);
1038 netif_wake_queue(dev);
1039 break;
1040
1041 default:
1042 return -EOPNOTSUPP;
1043 }
1044
1045 return 0;
1046}
1047
1048static const struct net_device_ops at91_netdev_ops = {
1049 .ndo_open = at91_open,
1050 .ndo_stop = at91_close,
1051 .ndo_start_xmit = at91_start_xmit,
1052};
1053
1054static int __init at91_can_probe(struct platform_device *pdev)
1055{
1056 struct net_device *dev;
1057 struct at91_priv *priv;
1058 struct resource *res;
1059 struct clk *clk;
1060 void __iomem *addr;
1061 int err, irq;
1062
1063 clk = clk_get(&pdev->dev, "can_clk");
1064 if (IS_ERR(clk)) {
1065 dev_err(&pdev->dev, "no clock defined\n");
1066 err = -ENODEV;
1067 goto exit;
1068 }
1069
1070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1071 irq = platform_get_irq(pdev, 0);
1072 if (!res || !irq) {
1073 err = -ENODEV;
1074 goto exit_put;
1075 }
1076
1077 if (!request_mem_region(res->start,
1078 resource_size(res),
1079 pdev->name)) {
1080 err = -EBUSY;
1081 goto exit_put;
1082 }
1083
1084 addr = ioremap_nocache(res->start, resource_size(res));
1085 if (!addr) {
1086 err = -ENOMEM;
1087 goto exit_release;
1088 }
1089
1090 dev = alloc_candev(sizeof(struct at91_priv));
1091 if (!dev) {
1092 err = -ENOMEM;
1093 goto exit_iounmap;
1094 }
1095
1096 dev->netdev_ops = &at91_netdev_ops;
1097 dev->irq = irq;
1098 dev->flags |= IFF_ECHO;
1099
1100 priv = netdev_priv(dev);
1101 priv->can.clock.freq = clk_get_rate(clk);
1102 priv->can.bittiming_const = &at91_bittiming_const;
1103 priv->can.do_set_bittiming = at91_set_bittiming;
1104 priv->can.do_set_mode = at91_set_mode;
1105 priv->reg_base = addr;
1106 priv->dev = dev;
1107 priv->clk = clk;
1108 priv->pdata = pdev->dev.platform_data;
1109
1110 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
1111
1112 dev_set_drvdata(&pdev->dev, dev);
1113 SET_NETDEV_DEV(dev, &pdev->dev);
1114
1115 err = register_candev(dev);
1116 if (err) {
1117 dev_err(&pdev->dev, "registering netdev failed\n");
1118 goto exit_free;
1119 }
1120
1121 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
1122 priv->reg_base, dev->irq);
1123
1124 return 0;
1125
1126 exit_free:
1127 free_netdev(dev);
1128 exit_iounmap:
1129 iounmap(addr);
1130 exit_release:
1131 release_mem_region(res->start, resource_size(res));
1132 exit_put:
1133 clk_put(clk);
1134 exit:
1135 return err;
1136}
1137
1138static int __devexit at91_can_remove(struct platform_device *pdev)
1139{
1140 struct net_device *dev = platform_get_drvdata(pdev);
1141 struct at91_priv *priv = netdev_priv(dev);
1142 struct resource *res;
1143
1144 unregister_netdev(dev);
1145
1146 platform_set_drvdata(pdev, NULL);
1147
1148 free_netdev(dev);
1149
1150 iounmap(priv->reg_base);
1151
1152 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1153 release_mem_region(res->start, resource_size(res));
1154
1155 clk_put(priv->clk);
1156
1157 return 0;
1158}
1159
1160static struct platform_driver at91_can_driver = {
1161 .probe = at91_can_probe,
1162 .remove = __devexit_p(at91_can_remove),
1163 .driver = {
1164 .name = DRV_NAME,
1165 .owner = THIS_MODULE,
1166 },
1167};
1168
1169static int __init at91_can_module_init(void)
1170{
1171 printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
1172 return platform_driver_register(&at91_can_driver);
1173}
1174
1175static void __exit at91_can_module_exit(void)
1176{
1177 platform_driver_unregister(&at91_can_driver);
1178 printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
1179}
1180
1181module_init(at91_can_module_init);
1182module_exit(at91_can_module_exit);
1183
1184MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1185MODULE_LICENSE("GPL v2");
1186MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
1187