1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/clk.h>
25#include <linux/errno.h>
26#include <linux/if_arp.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33#include <linux/rtnetlink.h>
34#include <linux/skbuff.h>
35#include <linux/spinlock.h>
36#include <linux/string.h>
37#include <linux/types.h>
38
39#include <linux/can/dev.h>
40#include <linux/can/error.h>
41
42#include <mach/board.h>
43
44#define AT91_MB_MASK(i) ((1 << (i)) - 1)
45
46
47enum at91_reg {
48 AT91_MR = 0x000,
49 AT91_IER = 0x004,
50 AT91_IDR = 0x008,
51 AT91_IMR = 0x00C,
52 AT91_SR = 0x010,
53 AT91_BR = 0x014,
54 AT91_TIM = 0x018,
55 AT91_TIMESTP = 0x01C,
56 AT91_ECR = 0x020,
57 AT91_TCR = 0x024,
58 AT91_ACR = 0x028,
59};
60
61
62#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
63#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
64#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
65#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
66#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
67#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
68#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
69#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
70
71
72#define AT91_MR_CANEN BIT(0)
73#define AT91_MR_LPM BIT(1)
74#define AT91_MR_ABM BIT(2)
75#define AT91_MR_OVL BIT(3)
76#define AT91_MR_TEOF BIT(4)
77#define AT91_MR_TTM BIT(5)
78#define AT91_MR_TIMFRZ BIT(6)
79#define AT91_MR_DRPT BIT(7)
80
81#define AT91_SR_RBSY BIT(29)
82
83#define AT91_MMR_PRIO_SHIFT (16)
84
85#define AT91_MID_MIDE BIT(29)
86
87#define AT91_MSR_MRTR BIT(20)
88#define AT91_MSR_MABT BIT(22)
89#define AT91_MSR_MRDY BIT(23)
90#define AT91_MSR_MMI BIT(24)
91
92#define AT91_MCR_MRTR BIT(20)
93#define AT91_MCR_MTCR BIT(23)
94
95
96enum at91_mb_mode {
97 AT91_MB_MODE_DISABLED = 0,
98 AT91_MB_MODE_RX = 1,
99 AT91_MB_MODE_RX_OVRWR = 2,
100 AT91_MB_MODE_TX = 3,
101 AT91_MB_MODE_CONSUMER = 4,
102 AT91_MB_MODE_PRODUCER = 5,
103};
104
105
106#define AT91_IRQ_ERRA (1 << 16)
107#define AT91_IRQ_WARN (1 << 17)
108#define AT91_IRQ_ERRP (1 << 18)
109#define AT91_IRQ_BOFF (1 << 19)
110#define AT91_IRQ_SLEEP (1 << 20)
111#define AT91_IRQ_WAKEUP (1 << 21)
112#define AT91_IRQ_TOVF (1 << 22)
113#define AT91_IRQ_TSTP (1 << 23)
114#define AT91_IRQ_CERR (1 << 24)
115#define AT91_IRQ_SERR (1 << 25)
116#define AT91_IRQ_AERR (1 << 26)
117#define AT91_IRQ_FERR (1 << 27)
118#define AT91_IRQ_BERR (1 << 28)
119
120#define AT91_IRQ_ERR_ALL (0x1fff0000)
121#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
122 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
123#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
124 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
125
126#define AT91_IRQ_ALL (0x1fffffff)
127
128enum at91_devtype {
129 AT91_DEVTYPE_SAM9263,
130 AT91_DEVTYPE_SAM9X5,
131};
132
133struct at91_devtype_data {
134 unsigned int rx_first;
135 unsigned int rx_split;
136 unsigned int rx_last;
137 unsigned int tx_shift;
138 enum at91_devtype type;
139};
140
141struct at91_priv {
142 struct can_priv can;
143 struct net_device *dev;
144 struct napi_struct napi;
145
146 void __iomem *reg_base;
147
148 u32 reg_sr;
149 unsigned int tx_next;
150 unsigned int tx_echo;
151 unsigned int rx_next;
152 struct at91_devtype_data devtype_data;
153
154 struct clk *clk;
155 struct at91_can_data *pdata;
156
157 canid_t mb0_id;
158};
159
160static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
161 [AT91_DEVTYPE_SAM9263] = {
162 .rx_first = 1,
163 .rx_split = 8,
164 .rx_last = 11,
165 .tx_shift = 2,
166 },
167 [AT91_DEVTYPE_SAM9X5] = {
168 .rx_first = 0,
169 .rx_split = 4,
170 .rx_last = 5,
171 .tx_shift = 1,
172 },
173};
174
175static struct can_bittiming_const at91_bittiming_const = {
176 .name = KBUILD_MODNAME,
177 .tseg1_min = 4,
178 .tseg1_max = 16,
179 .tseg2_min = 2,
180 .tseg2_max = 8,
181 .sjw_max = 4,
182 .brp_min = 2,
183 .brp_max = 128,
184 .brp_inc = 1,
185};
186
187#define AT91_IS(_model) \
188static inline int at91_is_sam##_model(const struct at91_priv *priv) \
189{ \
190 return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
191}
192
193AT91_IS(9263);
194AT91_IS(9X5);
195
196static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
197{
198 return priv->devtype_data.rx_first;
199}
200
201static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
202{
203 return priv->devtype_data.rx_last;
204}
205
206static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
207{
208 return priv->devtype_data.rx_split;
209}
210
211static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
212{
213 return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
214}
215
216static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
217{
218 return get_mb_rx_split(priv) - 1;
219}
220
221static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
222{
223 return AT91_MB_MASK(get_mb_rx_split(priv)) &
224 ~AT91_MB_MASK(get_mb_rx_first(priv));
225}
226
227static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
228{
229 return priv->devtype_data.tx_shift;
230}
231
232static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
233{
234 return 1 << get_mb_tx_shift(priv);
235}
236
237static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
238{
239 return get_mb_rx_last(priv) + 1;
240}
241
242static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
243{
244 return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
245}
246
247static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
248{
249 return get_mb_tx_shift(priv);
250}
251
252static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
253{
254 return 0xf << get_mb_tx_shift(priv);
255}
256
257static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
258{
259 return AT91_MB_MASK(get_mb_tx_shift(priv));
260}
261
262static inline unsigned int get_next_mask(const struct at91_priv *priv)
263{
264 return get_next_mb_mask(priv) | get_next_prio_mask(priv);
265}
266
267static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
268{
269 return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
270 ~AT91_MB_MASK(get_mb_rx_first(priv));
271}
272
273static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
274{
275 return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
276 ~AT91_MB_MASK(get_mb_tx_first(priv));
277}
278
279static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
280{
281 return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
282}
283
284static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
285{
286 return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
287}
288
289static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
290{
291 return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
292}
293
294static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
295{
296 return __raw_readl(priv->reg_base + reg);
297}
298
299static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
300 u32 value)
301{
302 __raw_writel(value, priv->reg_base + reg);
303}
304
305static inline void set_mb_mode_prio(const struct at91_priv *priv,
306 unsigned int mb, enum at91_mb_mode mode, int prio)
307{
308 at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
309}
310
311static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
312 enum at91_mb_mode mode)
313{
314 set_mb_mode_prio(priv, mb, mode, 0);
315}
316
317static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
318{
319 u32 reg_mid;
320
321 if (can_id & CAN_EFF_FLAG)
322 reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
323 else
324 reg_mid = (can_id & CAN_SFF_MASK) << 18;
325
326 return reg_mid;
327}
328
329
330
331
332static void at91_transceiver_switch(const struct at91_priv *priv, int on)
333{
334 if (priv->pdata && priv->pdata->transceiver_switch)
335 priv->pdata->transceiver_switch(on);
336}
337
338static void at91_setup_mailboxes(struct net_device *dev)
339{
340 struct at91_priv *priv = netdev_priv(dev);
341 unsigned int i;
342 u32 reg_mid;
343
344
345
346
347
348
349
350
351 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
352 for (i = 0; i < get_mb_rx_first(priv); i++) {
353 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
354 at91_write(priv, AT91_MID(i), reg_mid);
355 at91_write(priv, AT91_MCR(i), 0x0);
356 }
357
358 for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
359 set_mb_mode(priv, i, AT91_MB_MODE_RX);
360 set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
361
362
363 for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
364 at91_write(priv, AT91_MAM(i), 0x0);
365 at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
366 }
367
368
369 for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
370 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
371
372
373 priv->tx_next = priv->tx_echo = 0;
374 priv->rx_next = get_mb_rx_first(priv);
375}
376
377static int at91_set_bittiming(struct net_device *dev)
378{
379 const struct at91_priv *priv = netdev_priv(dev);
380 const struct can_bittiming *bt = &priv->can.bittiming;
381 u32 reg_br;
382
383 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
384 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
385 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
386 ((bt->phase_seg2 - 1) << 0);
387
388 netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
389
390 at91_write(priv, AT91_BR, reg_br);
391
392 return 0;
393}
394
395static int at91_get_berr_counter(const struct net_device *dev,
396 struct can_berr_counter *bec)
397{
398 const struct at91_priv *priv = netdev_priv(dev);
399 u32 reg_ecr = at91_read(priv, AT91_ECR);
400
401 bec->rxerr = reg_ecr & 0xff;
402 bec->txerr = reg_ecr >> 16;
403
404 return 0;
405}
406
407static void at91_chip_start(struct net_device *dev)
408{
409 struct at91_priv *priv = netdev_priv(dev);
410 u32 reg_mr, reg_ier;
411
412
413 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
414
415
416 reg_mr = at91_read(priv, AT91_MR);
417 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
418
419 at91_set_bittiming(dev);
420 at91_setup_mailboxes(dev);
421 at91_transceiver_switch(priv, 1);
422
423
424 at91_write(priv, AT91_MR, AT91_MR_CANEN);
425
426 priv->can.state = CAN_STATE_ERROR_ACTIVE;
427
428
429 reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
430 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
431 at91_write(priv, AT91_IER, reg_ier);
432}
433
434static void at91_chip_stop(struct net_device *dev, enum can_state state)
435{
436 struct at91_priv *priv = netdev_priv(dev);
437 u32 reg_mr;
438
439
440 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
441
442 reg_mr = at91_read(priv, AT91_MR);
443 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
444
445 at91_transceiver_switch(priv, 0);
446 priv->can.state = state;
447}
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
473{
474 struct at91_priv *priv = netdev_priv(dev);
475 struct net_device_stats *stats = &dev->stats;
476 struct can_frame *cf = (struct can_frame *)skb->data;
477 unsigned int mb, prio;
478 u32 reg_mid, reg_mcr;
479
480 if (can_dropped_invalid_skb(dev, skb))
481 return NETDEV_TX_OK;
482
483 mb = get_tx_next_mb(priv);
484 prio = get_tx_next_prio(priv);
485
486 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
487 netif_stop_queue(dev);
488
489 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
490 return NETDEV_TX_BUSY;
491 }
492 reg_mid = at91_can_id_to_reg_mid(cf->can_id);
493 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
494 (cf->can_dlc << 16) | AT91_MCR_MTCR;
495
496
497 set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
498 at91_write(priv, AT91_MID(mb), reg_mid);
499 set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
500
501 at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
502 at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
503
504
505 at91_write(priv, AT91_MCR(mb), reg_mcr);
506
507 stats->tx_bytes += cf->can_dlc;
508
509
510 can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
511
512
513
514
515
516
517
518
519
520 priv->tx_next++;
521 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
522 AT91_MSR_MRDY) ||
523 (priv->tx_next & get_next_mask(priv)) == 0)
524 netif_stop_queue(dev);
525
526
527 at91_write(priv, AT91_IER, 1 << mb);
528
529 return NETDEV_TX_OK;
530}
531
532
533
534
535
536
537
538static inline void at91_activate_rx_low(const struct at91_priv *priv)
539{
540 u32 mask = get_mb_rx_low_mask(priv);
541 at91_write(priv, AT91_TCR, mask);
542}
543
544
545
546
547
548
549
550
551static inline void at91_activate_rx_mb(const struct at91_priv *priv,
552 unsigned int mb)
553{
554 u32 mask = 1 << mb;
555 at91_write(priv, AT91_TCR, mask);
556}
557
558
559
560
561
562static void at91_rx_overflow_err(struct net_device *dev)
563{
564 struct net_device_stats *stats = &dev->stats;
565 struct sk_buff *skb;
566 struct can_frame *cf;
567
568 netdev_dbg(dev, "RX buffer overflow\n");
569 stats->rx_over_errors++;
570 stats->rx_errors++;
571
572 skb = alloc_can_err_skb(dev, &cf);
573 if (unlikely(!skb))
574 return;
575
576 cf->can_id |= CAN_ERR_CRTL;
577 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
578 netif_receive_skb(skb);
579
580 stats->rx_packets++;
581 stats->rx_bytes += cf->can_dlc;
582}
583
584
585
586
587
588
589
590
591
592
593static void at91_read_mb(struct net_device *dev, unsigned int mb,
594 struct can_frame *cf)
595{
596 const struct at91_priv *priv = netdev_priv(dev);
597 u32 reg_msr, reg_mid;
598
599 reg_mid = at91_read(priv, AT91_MID(mb));
600 if (reg_mid & AT91_MID_MIDE)
601 cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
602 else
603 cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
604
605 reg_msr = at91_read(priv, AT91_MSR(mb));
606 cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
607
608 if (reg_msr & AT91_MSR_MRTR)
609 cf->can_id |= CAN_RTR_FLAG;
610 else {
611 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
612 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
613 }
614
615
616 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
617
618 if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
619 at91_rx_overflow_err(dev);
620}
621
622
623
624
625
626
627
628
629
630static void at91_read_msg(struct net_device *dev, unsigned int mb)
631{
632 struct net_device_stats *stats = &dev->stats;
633 struct can_frame *cf;
634 struct sk_buff *skb;
635
636 skb = alloc_can_skb(dev, &cf);
637 if (unlikely(!skb)) {
638 stats->rx_dropped++;
639 return;
640 }
641
642 at91_read_mb(dev, mb, cf);
643 netif_receive_skb(skb);
644
645 stats->rx_packets++;
646 stats->rx_bytes += cf->can_dlc;
647}
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698static int at91_poll_rx(struct net_device *dev, int quota)
699{
700 struct at91_priv *priv = netdev_priv(dev);
701 u32 reg_sr = at91_read(priv, AT91_SR);
702 const unsigned long *addr = (unsigned long *)®_sr;
703 unsigned int mb;
704 int received = 0;
705
706 if (priv->rx_next > get_mb_rx_low_last(priv) &&
707 reg_sr & get_mb_rx_low_mask(priv))
708 netdev_info(dev,
709 "order of incoming frames cannot be guaranteed\n");
710
711 again:
712 for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
713 mb < get_mb_tx_first(priv) && quota > 0;
714 reg_sr = at91_read(priv, AT91_SR),
715 mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
716 at91_read_msg(dev, mb);
717
718
719 if (mb == get_mb_rx_low_last(priv))
720
721 at91_activate_rx_low(priv);
722 else if (mb > get_mb_rx_low_last(priv))
723
724 at91_activate_rx_mb(priv, mb);
725
726 received++;
727 quota--;
728 }
729
730
731 if (priv->rx_next > get_mb_rx_low_last(priv) &&
732 quota > 0 && mb > get_mb_rx_last(priv)) {
733 priv->rx_next = get_mb_rx_first(priv);
734 goto again;
735 }
736
737 return received;
738}
739
740static void at91_poll_err_frame(struct net_device *dev,
741 struct can_frame *cf, u32 reg_sr)
742{
743 struct at91_priv *priv = netdev_priv(dev);
744
745
746 if (reg_sr & AT91_IRQ_CERR) {
747 netdev_dbg(dev, "CERR irq\n");
748 dev->stats.rx_errors++;
749 priv->can.can_stats.bus_error++;
750 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
751 }
752
753
754 if (reg_sr & AT91_IRQ_SERR) {
755 netdev_dbg(dev, "SERR irq\n");
756 dev->stats.rx_errors++;
757 priv->can.can_stats.bus_error++;
758 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
759 cf->data[2] |= CAN_ERR_PROT_STUFF;
760 }
761
762
763 if (reg_sr & AT91_IRQ_AERR) {
764 netdev_dbg(dev, "AERR irq\n");
765 dev->stats.tx_errors++;
766 cf->can_id |= CAN_ERR_ACK;
767 }
768
769
770 if (reg_sr & AT91_IRQ_FERR) {
771 netdev_dbg(dev, "FERR irq\n");
772 dev->stats.rx_errors++;
773 priv->can.can_stats.bus_error++;
774 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
775 cf->data[2] |= CAN_ERR_PROT_FORM;
776 }
777
778
779 if (reg_sr & AT91_IRQ_BERR) {
780 netdev_dbg(dev, "BERR irq\n");
781 dev->stats.tx_errors++;
782 priv->can.can_stats.bus_error++;
783 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
784 cf->data[2] |= CAN_ERR_PROT_BIT;
785 }
786}
787
788static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
789{
790 struct sk_buff *skb;
791 struct can_frame *cf;
792
793 if (quota == 0)
794 return 0;
795
796 skb = alloc_can_err_skb(dev, &cf);
797 if (unlikely(!skb))
798 return 0;
799
800 at91_poll_err_frame(dev, cf, reg_sr);
801 netif_receive_skb(skb);
802
803 dev->stats.rx_packets++;
804 dev->stats.rx_bytes += cf->can_dlc;
805
806 return 1;
807}
808
809static int at91_poll(struct napi_struct *napi, int quota)
810{
811 struct net_device *dev = napi->dev;
812 const struct at91_priv *priv = netdev_priv(dev);
813 u32 reg_sr = at91_read(priv, AT91_SR);
814 int work_done = 0;
815
816 if (reg_sr & get_irq_mb_rx(priv))
817 work_done += at91_poll_rx(dev, quota - work_done);
818
819
820
821
822
823 reg_sr |= priv->reg_sr;
824 if (reg_sr & AT91_IRQ_ERR_FRAME)
825 work_done += at91_poll_err(dev, quota - work_done, reg_sr);
826
827 if (work_done < quota) {
828
829 u32 reg_ier = AT91_IRQ_ERR_FRAME;
830 reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
831
832 napi_complete(napi);
833 at91_write(priv, AT91_IER, reg_ier);
834 }
835
836 return work_done;
837}
838
839
840
841
842
843
844
845
846
847
848
849
850
851static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
852{
853 struct at91_priv *priv = netdev_priv(dev);
854 u32 reg_msr;
855 unsigned int mb;
856
857
858
859 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
860 mb = get_tx_echo_mb(priv);
861
862
863 if (!(reg_sr & (1 << mb)))
864 break;
865
866
867 at91_write(priv, AT91_IDR, 1 << mb);
868
869
870
871
872
873
874
875 reg_msr = at91_read(priv, AT91_MSR(mb));
876 if (likely(reg_msr & AT91_MSR_MRDY &&
877 ~reg_msr & AT91_MSR_MABT)) {
878
879 can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
880 dev->stats.tx_packets++;
881 }
882 }
883
884
885
886
887
888
889 if ((priv->tx_next & get_next_mask(priv)) != 0 ||
890 (priv->tx_echo & get_next_mask(priv)) == 0)
891 netif_wake_queue(dev);
892}
893
894static void at91_irq_err_state(struct net_device *dev,
895 struct can_frame *cf, enum can_state new_state)
896{
897 struct at91_priv *priv = netdev_priv(dev);
898 u32 reg_idr = 0, reg_ier = 0;
899 struct can_berr_counter bec;
900
901 at91_get_berr_counter(dev, &bec);
902
903 switch (priv->can.state) {
904 case CAN_STATE_ERROR_ACTIVE:
905
906
907
908
909
910 if (new_state >= CAN_STATE_ERROR_WARNING &&
911 new_state <= CAN_STATE_BUS_OFF) {
912 netdev_dbg(dev, "Error Warning IRQ\n");
913 priv->can.can_stats.error_warning++;
914
915 cf->can_id |= CAN_ERR_CRTL;
916 cf->data[1] = (bec.txerr > bec.rxerr) ?
917 CAN_ERR_CRTL_TX_WARNING :
918 CAN_ERR_CRTL_RX_WARNING;
919 }
920 case CAN_STATE_ERROR_WARNING:
921
922
923
924
925
926 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
927 new_state <= CAN_STATE_BUS_OFF) {
928 netdev_dbg(dev, "Error Passive IRQ\n");
929 priv->can.can_stats.error_passive++;
930
931 cf->can_id |= CAN_ERR_CRTL;
932 cf->data[1] = (bec.txerr > bec.rxerr) ?
933 CAN_ERR_CRTL_TX_PASSIVE :
934 CAN_ERR_CRTL_RX_PASSIVE;
935 }
936 break;
937 case CAN_STATE_BUS_OFF:
938
939
940
941
942 if (new_state <= CAN_STATE_ERROR_PASSIVE) {
943 cf->can_id |= CAN_ERR_RESTARTED;
944
945 netdev_dbg(dev, "restarted\n");
946 priv->can.can_stats.restarts++;
947
948 netif_carrier_on(dev);
949 netif_wake_queue(dev);
950 }
951 break;
952 default:
953 break;
954 }
955
956
957
958 switch (new_state) {
959 case CAN_STATE_ERROR_ACTIVE:
960
961
962
963
964
965
966 netdev_dbg(dev, "Error Active\n");
967 cf->can_id |= CAN_ERR_PROT;
968 cf->data[2] = CAN_ERR_PROT_ACTIVE;
969 case CAN_STATE_ERROR_WARNING:
970 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
971 reg_ier = AT91_IRQ_ERRP;
972 break;
973 case CAN_STATE_ERROR_PASSIVE:
974 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
975 reg_ier = AT91_IRQ_BOFF;
976 break;
977 case CAN_STATE_BUS_OFF:
978 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
979 AT91_IRQ_WARN | AT91_IRQ_BOFF;
980 reg_ier = 0;
981
982 cf->can_id |= CAN_ERR_BUSOFF;
983
984 netdev_dbg(dev, "bus-off\n");
985 netif_carrier_off(dev);
986 priv->can.can_stats.bus_off++;
987
988
989 if (!priv->can.restart_ms) {
990 at91_chip_stop(dev, CAN_STATE_BUS_OFF);
991 return;
992 }
993 break;
994 default:
995 break;
996 }
997
998 at91_write(priv, AT91_IDR, reg_idr);
999 at91_write(priv, AT91_IER, reg_ier);
1000}
1001
1002static int at91_get_state_by_bec(const struct net_device *dev,
1003 enum can_state *state)
1004{
1005 struct can_berr_counter bec;
1006 int err;
1007
1008 err = at91_get_berr_counter(dev, &bec);
1009 if (err)
1010 return err;
1011
1012 if (bec.txerr < 96 && bec.rxerr < 96)
1013 *state = CAN_STATE_ERROR_ACTIVE;
1014 else if (bec.txerr < 128 && bec.rxerr < 128)
1015 *state = CAN_STATE_ERROR_WARNING;
1016 else if (bec.txerr < 256 && bec.rxerr < 256)
1017 *state = CAN_STATE_ERROR_PASSIVE;
1018 else
1019 *state = CAN_STATE_BUS_OFF;
1020
1021 return 0;
1022}
1023
1024
1025static void at91_irq_err(struct net_device *dev)
1026{
1027 struct at91_priv *priv = netdev_priv(dev);
1028 struct sk_buff *skb;
1029 struct can_frame *cf;
1030 enum can_state new_state;
1031 u32 reg_sr;
1032 int err;
1033
1034 if (at91_is_sam9263(priv)) {
1035 reg_sr = at91_read(priv, AT91_SR);
1036
1037
1038 if (unlikely(reg_sr & AT91_IRQ_BOFF))
1039 new_state = CAN_STATE_BUS_OFF;
1040 else if (unlikely(reg_sr & AT91_IRQ_ERRP))
1041 new_state = CAN_STATE_ERROR_PASSIVE;
1042 else if (unlikely(reg_sr & AT91_IRQ_WARN))
1043 new_state = CAN_STATE_ERROR_WARNING;
1044 else if (likely(reg_sr & AT91_IRQ_ERRA))
1045 new_state = CAN_STATE_ERROR_ACTIVE;
1046 else {
1047 netdev_err(dev, "BUG! hardware in undefined state\n");
1048 return;
1049 }
1050 } else {
1051 err = at91_get_state_by_bec(dev, &new_state);
1052 if (err)
1053 return;
1054 }
1055
1056
1057 if (likely(new_state == priv->can.state))
1058 return;
1059
1060 skb = alloc_can_err_skb(dev, &cf);
1061 if (unlikely(!skb))
1062 return;
1063
1064 at91_irq_err_state(dev, cf, new_state);
1065 netif_rx(skb);
1066
1067 dev->stats.rx_packets++;
1068 dev->stats.rx_bytes += cf->can_dlc;
1069
1070 priv->can.state = new_state;
1071}
1072
1073
1074
1075
1076static irqreturn_t at91_irq(int irq, void *dev_id)
1077{
1078 struct net_device *dev = dev_id;
1079 struct at91_priv *priv = netdev_priv(dev);
1080 irqreturn_t handled = IRQ_NONE;
1081 u32 reg_sr, reg_imr;
1082
1083 reg_sr = at91_read(priv, AT91_SR);
1084 reg_imr = at91_read(priv, AT91_IMR);
1085
1086
1087 reg_sr &= reg_imr;
1088 if (!reg_sr)
1089 goto exit;
1090
1091 handled = IRQ_HANDLED;
1092
1093
1094 if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
1095
1096
1097
1098
1099 priv->reg_sr = reg_sr;
1100 at91_write(priv, AT91_IDR,
1101 get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
1102 napi_schedule(&priv->napi);
1103 }
1104
1105
1106 if (reg_sr & get_irq_mb_tx(priv))
1107 at91_irq_tx(dev, reg_sr);
1108
1109 at91_irq_err(dev);
1110
1111 exit:
1112 return handled;
1113}
1114
1115static int at91_open(struct net_device *dev)
1116{
1117 struct at91_priv *priv = netdev_priv(dev);
1118 int err;
1119
1120 clk_enable(priv->clk);
1121
1122
1123 err = open_candev(dev);
1124 if (err)
1125 goto out;
1126
1127
1128 if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
1129 dev->name, dev)) {
1130 err = -EAGAIN;
1131 goto out_close;
1132 }
1133
1134
1135 at91_chip_start(dev);
1136 napi_enable(&priv->napi);
1137 netif_start_queue(dev);
1138
1139 return 0;
1140
1141 out_close:
1142 close_candev(dev);
1143 out:
1144 clk_disable(priv->clk);
1145
1146 return err;
1147}
1148
1149
1150
1151
1152static int at91_close(struct net_device *dev)
1153{
1154 struct at91_priv *priv = netdev_priv(dev);
1155
1156 netif_stop_queue(dev);
1157 napi_disable(&priv->napi);
1158 at91_chip_stop(dev, CAN_STATE_STOPPED);
1159
1160 free_irq(dev->irq, dev);
1161 clk_disable(priv->clk);
1162
1163 close_candev(dev);
1164
1165 return 0;
1166}
1167
1168static int at91_set_mode(struct net_device *dev, enum can_mode mode)
1169{
1170 switch (mode) {
1171 case CAN_MODE_START:
1172 at91_chip_start(dev);
1173 netif_wake_queue(dev);
1174 break;
1175
1176 default:
1177 return -EOPNOTSUPP;
1178 }
1179
1180 return 0;
1181}
1182
1183static const struct net_device_ops at91_netdev_ops = {
1184 .ndo_open = at91_open,
1185 .ndo_stop = at91_close,
1186 .ndo_start_xmit = at91_start_xmit,
1187};
1188
1189static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
1190 struct device_attribute *attr, char *buf)
1191{
1192 struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1193
1194 if (priv->mb0_id & CAN_EFF_FLAG)
1195 return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
1196 else
1197 return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
1198}
1199
1200static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1201 struct device_attribute *attr, const char *buf, size_t count)
1202{
1203 struct net_device *ndev = to_net_dev(dev);
1204 struct at91_priv *priv = netdev_priv(ndev);
1205 unsigned long can_id;
1206 ssize_t ret;
1207 int err;
1208
1209 rtnl_lock();
1210
1211 if (ndev->flags & IFF_UP) {
1212 ret = -EBUSY;
1213 goto out;
1214 }
1215
1216 err = strict_strtoul(buf, 0, &can_id);
1217 if (err) {
1218 ret = err;
1219 goto out;
1220 }
1221
1222 if (can_id & CAN_EFF_FLAG)
1223 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1224 else
1225 can_id &= CAN_SFF_MASK;
1226
1227 priv->mb0_id = can_id;
1228 ret = count;
1229
1230 out:
1231 rtnl_unlock();
1232 return ret;
1233}
1234
1235static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
1236 at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
1237
1238static struct attribute *at91_sysfs_attrs[] = {
1239 &dev_attr_mb0_id.attr,
1240 NULL,
1241};
1242
1243static struct attribute_group at91_sysfs_attr_group = {
1244 .attrs = at91_sysfs_attrs,
1245};
1246
1247static int __devinit at91_can_probe(struct platform_device *pdev)
1248{
1249 const struct at91_devtype_data *devtype_data;
1250 enum at91_devtype devtype;
1251 struct net_device *dev;
1252 struct at91_priv *priv;
1253 struct resource *res;
1254 struct clk *clk;
1255 void __iomem *addr;
1256 int err, irq;
1257
1258 devtype = pdev->id_entry->driver_data;
1259 devtype_data = &at91_devtype_data[devtype];
1260
1261 clk = clk_get(&pdev->dev, "can_clk");
1262 if (IS_ERR(clk)) {
1263 dev_err(&pdev->dev, "no clock defined\n");
1264 err = -ENODEV;
1265 goto exit;
1266 }
1267
1268 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1269 irq = platform_get_irq(pdev, 0);
1270 if (!res || irq <= 0) {
1271 err = -ENODEV;
1272 goto exit_put;
1273 }
1274
1275 if (!request_mem_region(res->start,
1276 resource_size(res),
1277 pdev->name)) {
1278 err = -EBUSY;
1279 goto exit_put;
1280 }
1281
1282 addr = ioremap_nocache(res->start, resource_size(res));
1283 if (!addr) {
1284 err = -ENOMEM;
1285 goto exit_release;
1286 }
1287
1288 dev = alloc_candev(sizeof(struct at91_priv),
1289 1 << devtype_data->tx_shift);
1290 if (!dev) {
1291 err = -ENOMEM;
1292 goto exit_iounmap;
1293 }
1294
1295 dev->netdev_ops = &at91_netdev_ops;
1296 dev->irq = irq;
1297 dev->flags |= IFF_ECHO;
1298
1299 priv = netdev_priv(dev);
1300 priv->can.clock.freq = clk_get_rate(clk);
1301 priv->can.bittiming_const = &at91_bittiming_const;
1302 priv->can.do_set_mode = at91_set_mode;
1303 priv->can.do_get_berr_counter = at91_get_berr_counter;
1304 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1305 priv->dev = dev;
1306 priv->reg_base = addr;
1307 priv->devtype_data = *devtype_data;
1308 priv->devtype_data.type = devtype;
1309 priv->clk = clk;
1310 priv->pdata = pdev->dev.platform_data;
1311 priv->mb0_id = 0x7ff;
1312
1313 netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
1314
1315 if (at91_is_sam9263(priv))
1316 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1317
1318 dev_set_drvdata(&pdev->dev, dev);
1319 SET_NETDEV_DEV(dev, &pdev->dev);
1320
1321 err = register_candev(dev);
1322 if (err) {
1323 dev_err(&pdev->dev, "registering netdev failed\n");
1324 goto exit_free;
1325 }
1326
1327 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
1328 priv->reg_base, dev->irq);
1329
1330 return 0;
1331
1332 exit_free:
1333 free_candev(dev);
1334 exit_iounmap:
1335 iounmap(addr);
1336 exit_release:
1337 release_mem_region(res->start, resource_size(res));
1338 exit_put:
1339 clk_put(clk);
1340 exit:
1341 return err;
1342}
1343
1344static int __devexit at91_can_remove(struct platform_device *pdev)
1345{
1346 struct net_device *dev = platform_get_drvdata(pdev);
1347 struct at91_priv *priv = netdev_priv(dev);
1348 struct resource *res;
1349
1350 unregister_netdev(dev);
1351
1352 platform_set_drvdata(pdev, NULL);
1353
1354 iounmap(priv->reg_base);
1355
1356 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1357 release_mem_region(res->start, resource_size(res));
1358
1359 clk_put(priv->clk);
1360
1361 free_candev(dev);
1362
1363 return 0;
1364}
1365
1366static const struct platform_device_id at91_can_id_table[] = {
1367 {
1368 .name = "at91_can",
1369 .driver_data = AT91_DEVTYPE_SAM9263,
1370 }, {
1371 .name = "at91sam9x5_can",
1372 .driver_data = AT91_DEVTYPE_SAM9X5,
1373 }, {
1374
1375 }
1376};
1377
1378static struct platform_driver at91_can_driver = {
1379 .probe = at91_can_probe,
1380 .remove = __devexit_p(at91_can_remove),
1381 .driver = {
1382 .name = KBUILD_MODNAME,
1383 .owner = THIS_MODULE,
1384 },
1385 .id_table = at91_can_id_table,
1386};
1387
1388static int __init at91_can_module_init(void)
1389{
1390 return platform_driver_register(&at91_can_driver);
1391}
1392
1393static void __exit at91_can_module_exit(void)
1394{
1395 platform_driver_unregister(&at91_can_driver);
1396}
1397
1398module_init(at91_can_module_init);
1399module_exit(at91_can_module_exit);
1400
1401MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1402MODULE_LICENSE("GPL v2");
1403MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
1404