1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bitops.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/platform_device.h>
20
21#include <linux/can/dev.h>
22#include <linux/can/error.h>
23
24#include <asm/bfin_can.h>
25#include <asm/portmux.h>
26
27#define DRV_NAME "bfin_can"
28#define BFIN_CAN_TIMEOUT 100
29#define TX_ECHO_SKB_MAX 1
30
31
32
33
34struct bfin_can_priv {
35 struct can_priv can;
36 struct net_device *dev;
37 void __iomem *membase;
38 int rx_irq;
39 int tx_irq;
40 int err_irq;
41 unsigned short *pin_list;
42};
43
44
45
46
47static struct can_bittiming_const bfin_can_bittiming_const = {
48 .name = DRV_NAME,
49 .tseg1_min = 1,
50 .tseg1_max = 16,
51 .tseg2_min = 1,
52 .tseg2_max = 8,
53 .sjw_max = 4,
54
55
56
57
58
59 .brp_min = 4,
60 .brp_max = 1024,
61 .brp_inc = 1,
62};
63
64static int bfin_can_set_bittiming(struct net_device *dev)
65{
66 struct bfin_can_priv *priv = netdev_priv(dev);
67 struct bfin_can_regs __iomem *reg = priv->membase;
68 struct can_bittiming *bt = &priv->can.bittiming;
69 u16 clk, timing;
70
71 clk = bt->brp - 1;
72 timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
73 ((bt->phase_seg2 - 1) << 4);
74
75
76
77
78
79 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
80 timing |= SAM;
81
82 bfin_write(®->clock, clk);
83 bfin_write(®->timing, timing);
84
85 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
86 clk, timing);
87
88 return 0;
89}
90
91static void bfin_can_set_reset_mode(struct net_device *dev)
92{
93 struct bfin_can_priv *priv = netdev_priv(dev);
94 struct bfin_can_regs __iomem *reg = priv->membase;
95 int timeout = BFIN_CAN_TIMEOUT;
96 int i;
97
98
99 bfin_write(®->mbim1, 0);
100 bfin_write(®->mbim2, 0);
101 bfin_write(®->gim, 0);
102
103
104 bfin_write(®->control, SRS | CCR);
105 SSYNC();
106 bfin_write(®->control, CCR);
107 SSYNC();
108 while (!(bfin_read(®->control) & CCA)) {
109 udelay(10);
110 if (--timeout == 0) {
111 dev_err(dev->dev.parent,
112 "fail to enter configuration mode\n");
113 BUG();
114 }
115 }
116
117
118
119
120
121
122 bfin_write(®->mc1, 0);
123 bfin_write(®->mc2, 0);
124
125
126 bfin_write(®->md1, 0xFFFF);
127 bfin_write(®->md2, 0);
128
129
130 for (i = 0; i < 2; i++) {
131 bfin_write(®->chl[RECEIVE_STD_CHL + i].id0, 0);
132 bfin_write(®->chl[RECEIVE_STD_CHL + i].id1, AME);
133 bfin_write(®->chl[RECEIVE_STD_CHL + i].dlc, 0);
134 bfin_write(®->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
135 bfin_write(®->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
136 }
137
138
139 for (i = 0; i < 2; i++) {
140 bfin_write(®->chl[RECEIVE_EXT_CHL + i].id0, 0);
141 bfin_write(®->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
142 bfin_write(®->chl[RECEIVE_EXT_CHL + i].dlc, 0);
143 bfin_write(®->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
144 bfin_write(®->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
145 }
146
147 bfin_write(®->mc2, BIT(TRANSMIT_CHL - 16));
148 bfin_write(®->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
149 SSYNC();
150
151 priv->can.state = CAN_STATE_STOPPED;
152}
153
154static void bfin_can_set_normal_mode(struct net_device *dev)
155{
156 struct bfin_can_priv *priv = netdev_priv(dev);
157 struct bfin_can_regs __iomem *reg = priv->membase;
158 int timeout = BFIN_CAN_TIMEOUT;
159
160
161
162
163 bfin_write(®->control, bfin_read(®->control) & ~CCR);
164
165 while (bfin_read(®->status) & CCA) {
166 udelay(10);
167 if (--timeout == 0) {
168 dev_err(dev->dev.parent,
169 "fail to leave configuration mode\n");
170 BUG();
171 }
172 }
173
174
175
176
177 bfin_write(®->mbtif1, 0xFFFF);
178 bfin_write(®->mbtif2, 0xFFFF);
179 bfin_write(®->mbrif1, 0xFFFF);
180 bfin_write(®->mbrif2, 0xFFFF);
181
182
183
184
185 bfin_write(®->gis, 0x7FF);
186
187
188
189
190
191
192 bfin_write(®->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
193 bfin_write(®->mbim2, BIT(TRANSMIT_CHL - 16));
194
195 bfin_write(®->gim, EPIM | BOIM | RMLIM);
196 SSYNC();
197}
198
199static void bfin_can_start(struct net_device *dev)
200{
201 struct bfin_can_priv *priv = netdev_priv(dev);
202
203
204 if (priv->can.state != CAN_STATE_STOPPED)
205 bfin_can_set_reset_mode(dev);
206
207
208 bfin_can_set_normal_mode(dev);
209}
210
211static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
212{
213 switch (mode) {
214 case CAN_MODE_START:
215 bfin_can_start(dev);
216 if (netif_queue_stopped(dev))
217 netif_wake_queue(dev);
218 break;
219
220 default:
221 return -EOPNOTSUPP;
222 }
223
224 return 0;
225}
226
227static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
228{
229 struct bfin_can_priv *priv = netdev_priv(dev);
230 struct bfin_can_regs __iomem *reg = priv->membase;
231 struct can_frame *cf = (struct can_frame *)skb->data;
232 u8 dlc = cf->can_dlc;
233 canid_t id = cf->can_id;
234 u8 *data = cf->data;
235 u16 val;
236 int i;
237
238 if (can_dropped_invalid_skb(dev, skb))
239 return NETDEV_TX_OK;
240
241 netif_stop_queue(dev);
242
243
244 if (id & CAN_EFF_FLAG) {
245 bfin_write(®->chl[TRANSMIT_CHL].id0, id);
246 val = ((id & 0x1FFF0000) >> 16) | IDE;
247 } else
248 val = (id << 2);
249 if (id & CAN_RTR_FLAG)
250 val |= RTR;
251 bfin_write(®->chl[TRANSMIT_CHL].id1, val | AME);
252
253
254 for (i = 0; i < 8; i += 2) {
255 val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
256 ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
257 bfin_write(®->chl[TRANSMIT_CHL].data[i], val);
258 }
259
260
261 bfin_write(®->chl[TRANSMIT_CHL].dlc, dlc);
262
263 can_put_echo_skb(skb, dev, 0);
264
265
266 bfin_write(®->trs2, BIT(TRANSMIT_CHL - 16));
267
268 return 0;
269}
270
271static void bfin_can_rx(struct net_device *dev, u16 isrc)
272{
273 struct bfin_can_priv *priv = netdev_priv(dev);
274 struct net_device_stats *stats = &dev->stats;
275 struct bfin_can_regs __iomem *reg = priv->membase;
276 struct can_frame *cf;
277 struct sk_buff *skb;
278 int obj;
279 int i;
280 u16 val;
281
282 skb = alloc_can_skb(dev, &cf);
283 if (skb == NULL)
284 return;
285
286
287 if (isrc & BIT(RECEIVE_EXT_CHL)) {
288
289 cf->can_id = ((bfin_read(®->chl[RECEIVE_EXT_CHL].id1)
290 & 0x1FFF) << 16)
291 + bfin_read(®->chl[RECEIVE_EXT_CHL].id0);
292 cf->can_id |= CAN_EFF_FLAG;
293 obj = RECEIVE_EXT_CHL;
294 } else {
295
296 cf->can_id = (bfin_read(®->chl[RECEIVE_STD_CHL].id1)
297 & 0x1ffc) >> 2;
298 obj = RECEIVE_STD_CHL;
299 }
300 if (bfin_read(®->chl[obj].id1) & RTR)
301 cf->can_id |= CAN_RTR_FLAG;
302
303
304 cf->can_dlc = get_can_dlc(bfin_read(®->chl[obj].dlc) & 0xF);
305
306
307 for (i = 0; i < 8; i += 2) {
308 val = bfin_read(®->chl[obj].data[i]);
309 cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
310 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
311 }
312
313 netif_rx(skb);
314
315 stats->rx_packets++;
316 stats->rx_bytes += cf->can_dlc;
317}
318
319static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
320{
321 struct bfin_can_priv *priv = netdev_priv(dev);
322 struct bfin_can_regs __iomem *reg = priv->membase;
323 struct net_device_stats *stats = &dev->stats;
324 struct can_frame *cf;
325 struct sk_buff *skb;
326 enum can_state state = priv->can.state;
327
328 skb = alloc_can_err_skb(dev, &cf);
329 if (skb == NULL)
330 return -ENOMEM;
331
332 if (isrc & RMLIS) {
333
334 dev_dbg(dev->dev.parent, "data overrun interrupt\n");
335 cf->can_id |= CAN_ERR_CRTL;
336 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
337 stats->rx_over_errors++;
338 stats->rx_errors++;
339 }
340
341 if (isrc & BOIS) {
342 dev_dbg(dev->dev.parent, "bus-off mode interrupt\n");
343 state = CAN_STATE_BUS_OFF;
344 cf->can_id |= CAN_ERR_BUSOFF;
345 can_bus_off(dev);
346 }
347
348 if (isrc & EPIS) {
349
350 dev_dbg(dev->dev.parent, "error passive interrupt\n");
351 state = CAN_STATE_ERROR_PASSIVE;
352 }
353
354 if ((isrc & EWTIS) || (isrc & EWRIS)) {
355 dev_dbg(dev->dev.parent,
356 "Error Warning Transmit/Receive Interrupt\n");
357 state = CAN_STATE_ERROR_WARNING;
358 }
359
360 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
361 state == CAN_STATE_ERROR_PASSIVE)) {
362 u16 cec = bfin_read(®->cec);
363 u8 rxerr = cec;
364 u8 txerr = cec >> 8;
365
366 cf->can_id |= CAN_ERR_CRTL;
367 if (state == CAN_STATE_ERROR_WARNING) {
368 priv->can.can_stats.error_warning++;
369 cf->data[1] = (txerr > rxerr) ?
370 CAN_ERR_CRTL_TX_WARNING :
371 CAN_ERR_CRTL_RX_WARNING;
372 } else {
373 priv->can.can_stats.error_passive++;
374 cf->data[1] = (txerr > rxerr) ?
375 CAN_ERR_CRTL_TX_PASSIVE :
376 CAN_ERR_CRTL_RX_PASSIVE;
377 }
378 }
379
380 if (status) {
381 priv->can.can_stats.bus_error++;
382
383 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
384
385 if (status & BEF)
386 cf->data[2] |= CAN_ERR_PROT_BIT;
387 else if (status & FER)
388 cf->data[2] |= CAN_ERR_PROT_FORM;
389 else if (status & SER)
390 cf->data[2] |= CAN_ERR_PROT_STUFF;
391 else
392 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
393 }
394
395 priv->can.state = state;
396
397 netif_rx(skb);
398
399 stats->rx_packets++;
400 stats->rx_bytes += cf->can_dlc;
401
402 return 0;
403}
404
405irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
406{
407 struct net_device *dev = dev_id;
408 struct bfin_can_priv *priv = netdev_priv(dev);
409 struct bfin_can_regs __iomem *reg = priv->membase;
410 struct net_device_stats *stats = &dev->stats;
411 u16 status, isrc;
412
413 if ((irq == priv->tx_irq) && bfin_read(®->mbtif2)) {
414
415 bfin_write(®->mbtif2, 0xFFFF);
416 stats->tx_packets++;
417 stats->tx_bytes += bfin_read(®->chl[TRANSMIT_CHL].dlc);
418 can_get_echo_skb(dev, 0);
419 netif_wake_queue(dev);
420 } else if ((irq == priv->rx_irq) && bfin_read(®->mbrif1)) {
421
422 isrc = bfin_read(®->mbrif1);
423 bfin_write(®->mbrif1, 0xFFFF);
424 bfin_can_rx(dev, isrc);
425 } else if ((irq == priv->err_irq) && bfin_read(®->gis)) {
426
427 isrc = bfin_read(®->gis);
428 status = bfin_read(®->esr);
429 bfin_write(®->gis, 0x7FF);
430 bfin_can_err(dev, isrc, status);
431 } else {
432 return IRQ_NONE;
433 }
434
435 return IRQ_HANDLED;
436}
437
438static int bfin_can_open(struct net_device *dev)
439{
440 struct bfin_can_priv *priv = netdev_priv(dev);
441 int err;
442
443
444 bfin_can_set_reset_mode(dev);
445
446
447 err = open_candev(dev);
448 if (err)
449 goto exit_open;
450
451
452 err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
453 "bfin-can-rx", dev);
454 if (err)
455 goto exit_rx_irq;
456 err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
457 "bfin-can-tx", dev);
458 if (err)
459 goto exit_tx_irq;
460 err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
461 "bfin-can-err", dev);
462 if (err)
463 goto exit_err_irq;
464
465 bfin_can_start(dev);
466
467 netif_start_queue(dev);
468
469 return 0;
470
471exit_err_irq:
472 free_irq(priv->tx_irq, dev);
473exit_tx_irq:
474 free_irq(priv->rx_irq, dev);
475exit_rx_irq:
476 close_candev(dev);
477exit_open:
478 return err;
479}
480
481static int bfin_can_close(struct net_device *dev)
482{
483 struct bfin_can_priv *priv = netdev_priv(dev);
484
485 netif_stop_queue(dev);
486 bfin_can_set_reset_mode(dev);
487
488 close_candev(dev);
489
490 free_irq(priv->rx_irq, dev);
491 free_irq(priv->tx_irq, dev);
492 free_irq(priv->err_irq, dev);
493
494 return 0;
495}
496
497struct net_device *alloc_bfin_candev(void)
498{
499 struct net_device *dev;
500 struct bfin_can_priv *priv;
501
502 dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
503 if (!dev)
504 return NULL;
505
506 priv = netdev_priv(dev);
507
508 priv->dev = dev;
509 priv->can.bittiming_const = &bfin_can_bittiming_const;
510 priv->can.do_set_bittiming = bfin_can_set_bittiming;
511 priv->can.do_set_mode = bfin_can_set_mode;
512 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
513
514 return dev;
515}
516
517static const struct net_device_ops bfin_can_netdev_ops = {
518 .ndo_open = bfin_can_open,
519 .ndo_stop = bfin_can_close,
520 .ndo_start_xmit = bfin_can_start_xmit,
521};
522
523static int __devinit bfin_can_probe(struct platform_device *pdev)
524{
525 int err;
526 struct net_device *dev;
527 struct bfin_can_priv *priv;
528 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
529 unsigned short *pdata;
530
531 pdata = pdev->dev.platform_data;
532 if (!pdata) {
533 dev_err(&pdev->dev, "No platform data provided!\n");
534 err = -EINVAL;
535 goto exit;
536 }
537
538 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
539 rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
540 tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
541 err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
542 if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
543 err = -EINVAL;
544 goto exit;
545 }
546
547 if (!request_mem_region(res_mem->start, resource_size(res_mem),
548 dev_name(&pdev->dev))) {
549 err = -EBUSY;
550 goto exit;
551 }
552
553
554 err = peripheral_request_list(pdata, dev_name(&pdev->dev));
555 if (err)
556 goto exit_mem_release;
557
558 dev = alloc_bfin_candev();
559 if (!dev) {
560 err = -ENOMEM;
561 goto exit_peri_pin_free;
562 }
563
564 priv = netdev_priv(dev);
565 priv->membase = (void __iomem *)res_mem->start;
566 priv->rx_irq = rx_irq->start;
567 priv->tx_irq = tx_irq->start;
568 priv->err_irq = err_irq->start;
569 priv->pin_list = pdata;
570 priv->can.clock.freq = get_sclk();
571
572 dev_set_drvdata(&pdev->dev, dev);
573 SET_NETDEV_DEV(dev, &pdev->dev);
574
575 dev->flags |= IFF_ECHO;
576 dev->netdev_ops = &bfin_can_netdev_ops;
577
578 bfin_can_set_reset_mode(dev);
579
580 err = register_candev(dev);
581 if (err) {
582 dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
583 goto exit_candev_free;
584 }
585
586 dev_info(&pdev->dev,
587 "%s device registered"
588 "(®_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
589 DRV_NAME, (void *)priv->membase, priv->rx_irq,
590 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
591 return 0;
592
593exit_candev_free:
594 free_candev(dev);
595exit_peri_pin_free:
596 peripheral_free_list(pdata);
597exit_mem_release:
598 release_mem_region(res_mem->start, resource_size(res_mem));
599exit:
600 return err;
601}
602
603static int __devexit bfin_can_remove(struct platform_device *pdev)
604{
605 struct net_device *dev = dev_get_drvdata(&pdev->dev);
606 struct bfin_can_priv *priv = netdev_priv(dev);
607 struct resource *res;
608
609 bfin_can_set_reset_mode(dev);
610
611 unregister_candev(dev);
612
613 dev_set_drvdata(&pdev->dev, NULL);
614
615 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
616 release_mem_region(res->start, resource_size(res));
617
618 peripheral_free_list(priv->pin_list);
619
620 free_candev(dev);
621 return 0;
622}
623
624#ifdef CONFIG_PM
625static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
626{
627 struct net_device *dev = dev_get_drvdata(&pdev->dev);
628 struct bfin_can_priv *priv = netdev_priv(dev);
629 struct bfin_can_regs __iomem *reg = priv->membase;
630 int timeout = BFIN_CAN_TIMEOUT;
631
632 if (netif_running(dev)) {
633
634 bfin_write(®->control, bfin_read(®->control) | SMR);
635 SSYNC();
636 while (!(bfin_read(®->intr) & SMACK)) {
637 udelay(10);
638 if (--timeout == 0) {
639 dev_err(dev->dev.parent,
640 "fail to enter sleep mode\n");
641 BUG();
642 }
643 }
644 }
645
646 return 0;
647}
648
649static int bfin_can_resume(struct platform_device *pdev)
650{
651 struct net_device *dev = dev_get_drvdata(&pdev->dev);
652 struct bfin_can_priv *priv = netdev_priv(dev);
653 struct bfin_can_regs __iomem *reg = priv->membase;
654
655 if (netif_running(dev)) {
656
657 bfin_write(®->intr, 0);
658 SSYNC();
659 }
660
661 return 0;
662}
663#else
664#define bfin_can_suspend NULL
665#define bfin_can_resume NULL
666#endif
667
668static struct platform_driver bfin_can_driver = {
669 .probe = bfin_can_probe,
670 .remove = __devexit_p(bfin_can_remove),
671 .suspend = bfin_can_suspend,
672 .resume = bfin_can_resume,
673 .driver = {
674 .name = DRV_NAME,
675 .owner = THIS_MODULE,
676 },
677};
678
679static int __init bfin_can_init(void)
680{
681 return platform_driver_register(&bfin_can_driver);
682}
683module_init(bfin_can_init);
684
685static void __exit bfin_can_exit(void)
686{
687 platform_driver_unregister(&bfin_can_driver);
688}
689module_exit(bfin_can_exit);
690
691MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
692MODULE_LICENSE("GPL");
693MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
694