1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bitops.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/platform_device.h>
20
21#include <linux/can/dev.h>
22#include <linux/can/error.h>
23
24#include <asm/bfin_can.h>
25#include <asm/portmux.h>
26
27#define DRV_NAME "bfin_can"
28#define BFIN_CAN_TIMEOUT 100
29#define TX_ECHO_SKB_MAX 1
30
31
32
33
34struct bfin_can_priv {
35 struct can_priv can;
36 struct net_device *dev;
37 void __iomem *membase;
38 int rx_irq;
39 int tx_irq;
40 int err_irq;
41 unsigned short *pin_list;
42};
43
44
45
46
47static const struct can_bittiming_const bfin_can_bittiming_const = {
48 .name = DRV_NAME,
49 .tseg1_min = 1,
50 .tseg1_max = 16,
51 .tseg2_min = 1,
52 .tseg2_max = 8,
53 .sjw_max = 4,
54
55
56
57
58
59 .brp_min = 4,
60 .brp_max = 1024,
61 .brp_inc = 1,
62};
63
64static int bfin_can_set_bittiming(struct net_device *dev)
65{
66 struct bfin_can_priv *priv = netdev_priv(dev);
67 struct bfin_can_regs __iomem *reg = priv->membase;
68 struct can_bittiming *bt = &priv->can.bittiming;
69 u16 clk, timing;
70
71 clk = bt->brp - 1;
72 timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
73 ((bt->phase_seg2 - 1) << 4);
74
75
76
77
78
79 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
80 timing |= SAM;
81
82 bfin_write(®->clock, clk);
83 bfin_write(®->timing, timing);
84
85 netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing);
86
87 return 0;
88}
89
90static void bfin_can_set_reset_mode(struct net_device *dev)
91{
92 struct bfin_can_priv *priv = netdev_priv(dev);
93 struct bfin_can_regs __iomem *reg = priv->membase;
94 int timeout = BFIN_CAN_TIMEOUT;
95 int i;
96
97
98 bfin_write(®->mbim1, 0);
99 bfin_write(®->mbim2, 0);
100 bfin_write(®->gim, 0);
101
102
103 bfin_write(®->control, SRS | CCR);
104 SSYNC();
105 bfin_write(®->control, CCR);
106 SSYNC();
107 while (!(bfin_read(®->control) & CCA)) {
108 udelay(10);
109 if (--timeout == 0) {
110 netdev_err(dev, "fail to enter configuration mode\n");
111 BUG();
112 }
113 }
114
115
116
117
118
119
120 bfin_write(®->mc1, 0);
121 bfin_write(®->mc2, 0);
122
123
124 bfin_write(®->md1, 0xFFFF);
125 bfin_write(®->md2, 0);
126
127
128 for (i = 0; i < 2; i++) {
129 bfin_write(®->chl[RECEIVE_STD_CHL + i].id0, 0);
130 bfin_write(®->chl[RECEIVE_STD_CHL + i].id1, AME);
131 bfin_write(®->chl[RECEIVE_STD_CHL + i].dlc, 0);
132 bfin_write(®->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
133 bfin_write(®->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
134 }
135
136
137 for (i = 0; i < 2; i++) {
138 bfin_write(®->chl[RECEIVE_EXT_CHL + i].id0, 0);
139 bfin_write(®->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
140 bfin_write(®->chl[RECEIVE_EXT_CHL + i].dlc, 0);
141 bfin_write(®->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
142 bfin_write(®->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
143 }
144
145 bfin_write(®->mc2, BIT(TRANSMIT_CHL - 16));
146 bfin_write(®->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
147 SSYNC();
148
149 priv->can.state = CAN_STATE_STOPPED;
150}
151
152static void bfin_can_set_normal_mode(struct net_device *dev)
153{
154 struct bfin_can_priv *priv = netdev_priv(dev);
155 struct bfin_can_regs __iomem *reg = priv->membase;
156 int timeout = BFIN_CAN_TIMEOUT;
157
158
159
160
161 bfin_write(®->control, bfin_read(®->control) & ~CCR);
162
163 while (bfin_read(®->status) & CCA) {
164 udelay(10);
165 if (--timeout == 0) {
166 netdev_err(dev, "fail to leave configuration mode\n");
167 BUG();
168 }
169 }
170
171
172
173
174 bfin_write(®->mbtif1, 0xFFFF);
175 bfin_write(®->mbtif2, 0xFFFF);
176 bfin_write(®->mbrif1, 0xFFFF);
177 bfin_write(®->mbrif2, 0xFFFF);
178
179
180
181
182 bfin_write(®->gis, 0x7FF);
183
184
185
186
187
188
189 bfin_write(®->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
190 bfin_write(®->mbim2, BIT(TRANSMIT_CHL - 16));
191
192 bfin_write(®->gim, EPIM | BOIM | RMLIM);
193 SSYNC();
194}
195
196static void bfin_can_start(struct net_device *dev)
197{
198 struct bfin_can_priv *priv = netdev_priv(dev);
199
200
201 if (priv->can.state != CAN_STATE_STOPPED)
202 bfin_can_set_reset_mode(dev);
203
204
205 bfin_can_set_normal_mode(dev);
206}
207
208static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
209{
210 switch (mode) {
211 case CAN_MODE_START:
212 bfin_can_start(dev);
213 if (netif_queue_stopped(dev))
214 netif_wake_queue(dev);
215 break;
216
217 default:
218 return -EOPNOTSUPP;
219 }
220
221 return 0;
222}
223
224static int bfin_can_get_berr_counter(const struct net_device *dev,
225 struct can_berr_counter *bec)
226{
227 struct bfin_can_priv *priv = netdev_priv(dev);
228 struct bfin_can_regs __iomem *reg = priv->membase;
229
230 u16 cec = bfin_read(®->cec);
231
232 bec->txerr = cec >> 8;
233 bec->rxerr = cec;
234
235 return 0;
236}
237
238static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
239{
240 struct bfin_can_priv *priv = netdev_priv(dev);
241 struct bfin_can_regs __iomem *reg = priv->membase;
242 struct can_frame *cf = (struct can_frame *)skb->data;
243 u8 dlc = cf->can_dlc;
244 canid_t id = cf->can_id;
245 u8 *data = cf->data;
246 u16 val;
247 int i;
248
249 if (can_dropped_invalid_skb(dev, skb))
250 return NETDEV_TX_OK;
251
252 netif_stop_queue(dev);
253
254
255 if (id & CAN_EFF_FLAG) {
256 bfin_write(®->chl[TRANSMIT_CHL].id0, id);
257 val = ((id & 0x1FFF0000) >> 16) | IDE;
258 } else
259 val = (id << 2);
260 if (id & CAN_RTR_FLAG)
261 val |= RTR;
262 bfin_write(®->chl[TRANSMIT_CHL].id1, val | AME);
263
264
265 for (i = 0; i < 8; i += 2) {
266 val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
267 ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
268 bfin_write(®->chl[TRANSMIT_CHL].data[i], val);
269 }
270
271
272 bfin_write(®->chl[TRANSMIT_CHL].dlc, dlc);
273
274 can_put_echo_skb(skb, dev, 0);
275
276
277 bfin_write(®->trs2, BIT(TRANSMIT_CHL - 16));
278
279 return 0;
280}
281
282static void bfin_can_rx(struct net_device *dev, u16 isrc)
283{
284 struct bfin_can_priv *priv = netdev_priv(dev);
285 struct net_device_stats *stats = &dev->stats;
286 struct bfin_can_regs __iomem *reg = priv->membase;
287 struct can_frame *cf;
288 struct sk_buff *skb;
289 int obj;
290 int i;
291 u16 val;
292
293 skb = alloc_can_skb(dev, &cf);
294 if (skb == NULL)
295 return;
296
297
298 if (isrc & BIT(RECEIVE_EXT_CHL)) {
299
300 cf->can_id = ((bfin_read(®->chl[RECEIVE_EXT_CHL].id1)
301 & 0x1FFF) << 16)
302 + bfin_read(®->chl[RECEIVE_EXT_CHL].id0);
303 cf->can_id |= CAN_EFF_FLAG;
304 obj = RECEIVE_EXT_CHL;
305 } else {
306
307 cf->can_id = (bfin_read(®->chl[RECEIVE_STD_CHL].id1)
308 & 0x1ffc) >> 2;
309 obj = RECEIVE_STD_CHL;
310 }
311 if (bfin_read(®->chl[obj].id1) & RTR)
312 cf->can_id |= CAN_RTR_FLAG;
313
314
315 cf->can_dlc = get_can_dlc(bfin_read(®->chl[obj].dlc) & 0xF);
316
317
318 for (i = 0; i < 8; i += 2) {
319 val = bfin_read(®->chl[obj].data[i]);
320 cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
321 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
322 }
323
324 netif_rx(skb);
325
326 stats->rx_packets++;
327 stats->rx_bytes += cf->can_dlc;
328}
329
330static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
331{
332 struct bfin_can_priv *priv = netdev_priv(dev);
333 struct bfin_can_regs __iomem *reg = priv->membase;
334 struct net_device_stats *stats = &dev->stats;
335 struct can_frame *cf;
336 struct sk_buff *skb;
337 enum can_state state = priv->can.state;
338
339 skb = alloc_can_err_skb(dev, &cf);
340 if (skb == NULL)
341 return -ENOMEM;
342
343 if (isrc & RMLIS) {
344
345 netdev_dbg(dev, "data overrun interrupt\n");
346 cf->can_id |= CAN_ERR_CRTL;
347 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
348 stats->rx_over_errors++;
349 stats->rx_errors++;
350 }
351
352 if (isrc & BOIS) {
353 netdev_dbg(dev, "bus-off mode interrupt\n");
354 state = CAN_STATE_BUS_OFF;
355 cf->can_id |= CAN_ERR_BUSOFF;
356 can_bus_off(dev);
357 }
358
359 if (isrc & EPIS) {
360
361 netdev_dbg(dev, "error passive interrupt\n");
362 state = CAN_STATE_ERROR_PASSIVE;
363 }
364
365 if ((isrc & EWTIS) || (isrc & EWRIS)) {
366 netdev_dbg(dev, "Error Warning Transmit/Receive Interrupt\n");
367 state = CAN_STATE_ERROR_WARNING;
368 }
369
370 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
371 state == CAN_STATE_ERROR_PASSIVE)) {
372 u16 cec = bfin_read(®->cec);
373 u8 rxerr = cec;
374 u8 txerr = cec >> 8;
375
376 cf->can_id |= CAN_ERR_CRTL;
377 if (state == CAN_STATE_ERROR_WARNING) {
378 priv->can.can_stats.error_warning++;
379 cf->data[1] = (txerr > rxerr) ?
380 CAN_ERR_CRTL_TX_WARNING :
381 CAN_ERR_CRTL_RX_WARNING;
382 } else {
383 priv->can.can_stats.error_passive++;
384 cf->data[1] = (txerr > rxerr) ?
385 CAN_ERR_CRTL_TX_PASSIVE :
386 CAN_ERR_CRTL_RX_PASSIVE;
387 }
388 }
389
390 if (status) {
391 priv->can.can_stats.bus_error++;
392
393 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
394
395 if (status & BEF)
396 cf->data[2] |= CAN_ERR_PROT_BIT;
397 else if (status & FER)
398 cf->data[2] |= CAN_ERR_PROT_FORM;
399 else if (status & SER)
400 cf->data[2] |= CAN_ERR_PROT_STUFF;
401 else
402 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
403 }
404
405 priv->can.state = state;
406
407 netif_rx(skb);
408
409 stats->rx_packets++;
410 stats->rx_bytes += cf->can_dlc;
411
412 return 0;
413}
414
415static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
416{
417 struct net_device *dev = dev_id;
418 struct bfin_can_priv *priv = netdev_priv(dev);
419 struct bfin_can_regs __iomem *reg = priv->membase;
420 struct net_device_stats *stats = &dev->stats;
421 u16 status, isrc;
422
423 if ((irq == priv->tx_irq) && bfin_read(®->mbtif2)) {
424
425 bfin_write(®->mbtif2, 0xFFFF);
426 stats->tx_packets++;
427 stats->tx_bytes += bfin_read(®->chl[TRANSMIT_CHL].dlc);
428 can_get_echo_skb(dev, 0);
429 netif_wake_queue(dev);
430 } else if ((irq == priv->rx_irq) && bfin_read(®->mbrif1)) {
431
432 isrc = bfin_read(®->mbrif1);
433 bfin_write(®->mbrif1, 0xFFFF);
434 bfin_can_rx(dev, isrc);
435 } else if ((irq == priv->err_irq) && bfin_read(®->gis)) {
436
437 isrc = bfin_read(®->gis);
438 status = bfin_read(®->esr);
439 bfin_write(®->gis, 0x7FF);
440 bfin_can_err(dev, isrc, status);
441 } else {
442 return IRQ_NONE;
443 }
444
445 return IRQ_HANDLED;
446}
447
448static int bfin_can_open(struct net_device *dev)
449{
450 struct bfin_can_priv *priv = netdev_priv(dev);
451 int err;
452
453
454 bfin_can_set_reset_mode(dev);
455
456
457 err = open_candev(dev);
458 if (err)
459 goto exit_open;
460
461
462 err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
463 "bfin-can-rx", dev);
464 if (err)
465 goto exit_rx_irq;
466 err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
467 "bfin-can-tx", dev);
468 if (err)
469 goto exit_tx_irq;
470 err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
471 "bfin-can-err", dev);
472 if (err)
473 goto exit_err_irq;
474
475 bfin_can_start(dev);
476
477 netif_start_queue(dev);
478
479 return 0;
480
481exit_err_irq:
482 free_irq(priv->tx_irq, dev);
483exit_tx_irq:
484 free_irq(priv->rx_irq, dev);
485exit_rx_irq:
486 close_candev(dev);
487exit_open:
488 return err;
489}
490
491static int bfin_can_close(struct net_device *dev)
492{
493 struct bfin_can_priv *priv = netdev_priv(dev);
494
495 netif_stop_queue(dev);
496 bfin_can_set_reset_mode(dev);
497
498 close_candev(dev);
499
500 free_irq(priv->rx_irq, dev);
501 free_irq(priv->tx_irq, dev);
502 free_irq(priv->err_irq, dev);
503
504 return 0;
505}
506
507static struct net_device *alloc_bfin_candev(void)
508{
509 struct net_device *dev;
510 struct bfin_can_priv *priv;
511
512 dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
513 if (!dev)
514 return NULL;
515
516 priv = netdev_priv(dev);
517
518 priv->dev = dev;
519 priv->can.bittiming_const = &bfin_can_bittiming_const;
520 priv->can.do_set_bittiming = bfin_can_set_bittiming;
521 priv->can.do_set_mode = bfin_can_set_mode;
522 priv->can.do_get_berr_counter = bfin_can_get_berr_counter;
523 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
524
525 return dev;
526}
527
528static const struct net_device_ops bfin_can_netdev_ops = {
529 .ndo_open = bfin_can_open,
530 .ndo_stop = bfin_can_close,
531 .ndo_start_xmit = bfin_can_start_xmit,
532};
533
534static int bfin_can_probe(struct platform_device *pdev)
535{
536 int err;
537 struct net_device *dev;
538 struct bfin_can_priv *priv;
539 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
540 unsigned short *pdata;
541
542 pdata = pdev->dev.platform_data;
543 if (!pdata) {
544 dev_err(&pdev->dev, "No platform data provided!\n");
545 err = -EINVAL;
546 goto exit;
547 }
548
549 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
550 rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
551 tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
552 err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
553 if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
554 err = -EINVAL;
555 goto exit;
556 }
557
558 if (!request_mem_region(res_mem->start, resource_size(res_mem),
559 dev_name(&pdev->dev))) {
560 err = -EBUSY;
561 goto exit;
562 }
563
564
565 err = peripheral_request_list(pdata, dev_name(&pdev->dev));
566 if (err)
567 goto exit_mem_release;
568
569 dev = alloc_bfin_candev();
570 if (!dev) {
571 err = -ENOMEM;
572 goto exit_peri_pin_free;
573 }
574
575 priv = netdev_priv(dev);
576 priv->membase = (void __iomem *)res_mem->start;
577 priv->rx_irq = rx_irq->start;
578 priv->tx_irq = tx_irq->start;
579 priv->err_irq = err_irq->start;
580 priv->pin_list = pdata;
581 priv->can.clock.freq = get_sclk();
582
583 platform_set_drvdata(pdev, dev);
584 SET_NETDEV_DEV(dev, &pdev->dev);
585
586 dev->flags |= IFF_ECHO;
587 dev->netdev_ops = &bfin_can_netdev_ops;
588
589 bfin_can_set_reset_mode(dev);
590
591 err = register_candev(dev);
592 if (err) {
593 dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
594 goto exit_candev_free;
595 }
596
597 dev_info(&pdev->dev,
598 "%s device registered"
599 "(®_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
600 DRV_NAME, priv->membase, priv->rx_irq,
601 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
602 return 0;
603
604exit_candev_free:
605 free_candev(dev);
606exit_peri_pin_free:
607 peripheral_free_list(pdata);
608exit_mem_release:
609 release_mem_region(res_mem->start, resource_size(res_mem));
610exit:
611 return err;
612}
613
614static int bfin_can_remove(struct platform_device *pdev)
615{
616 struct net_device *dev = platform_get_drvdata(pdev);
617 struct bfin_can_priv *priv = netdev_priv(dev);
618 struct resource *res;
619
620 bfin_can_set_reset_mode(dev);
621
622 unregister_candev(dev);
623
624 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
625 release_mem_region(res->start, resource_size(res));
626
627 peripheral_free_list(priv->pin_list);
628
629 free_candev(dev);
630 return 0;
631}
632
633#ifdef CONFIG_PM
634static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
635{
636 struct net_device *dev = platform_get_drvdata(pdev);
637 struct bfin_can_priv *priv = netdev_priv(dev);
638 struct bfin_can_regs __iomem *reg = priv->membase;
639 int timeout = BFIN_CAN_TIMEOUT;
640
641 if (netif_running(dev)) {
642
643 bfin_write(®->control, bfin_read(®->control) | SMR);
644 SSYNC();
645 while (!(bfin_read(®->intr) & SMACK)) {
646 udelay(10);
647 if (--timeout == 0) {
648 netdev_err(dev, "fail to enter sleep mode\n");
649 BUG();
650 }
651 }
652 }
653
654 return 0;
655}
656
657static int bfin_can_resume(struct platform_device *pdev)
658{
659 struct net_device *dev = platform_get_drvdata(pdev);
660 struct bfin_can_priv *priv = netdev_priv(dev);
661 struct bfin_can_regs __iomem *reg = priv->membase;
662
663 if (netif_running(dev)) {
664
665 bfin_write(®->intr, 0);
666 SSYNC();
667 }
668
669 return 0;
670}
671#else
672#define bfin_can_suspend NULL
673#define bfin_can_resume NULL
674#endif
675
676static struct platform_driver bfin_can_driver = {
677 .probe = bfin_can_probe,
678 .remove = bfin_can_remove,
679 .suspend = bfin_can_suspend,
680 .resume = bfin_can_resume,
681 .driver = {
682 .name = DRV_NAME,
683 .owner = THIS_MODULE,
684 },
685};
686
687module_platform_driver(bfin_can_driver);
688
689MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
690MODULE_LICENSE("GPL");
691MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
692MODULE_ALIAS("platform:" DRV_NAME);
693