1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/iopoll.h>
28#include <linux/can/dev.h>
29#include <linux/pinctrl/consumer.h>
30
31
32#define M_CAN_NAPI_WEIGHT 64
33
34
35#define MRAM_CFG_LEN 8
36
37
38enum m_can_reg {
39 M_CAN_CREL = 0x0,
40 M_CAN_ENDN = 0x4,
41 M_CAN_CUST = 0x8,
42 M_CAN_DBTP = 0xc,
43 M_CAN_TEST = 0x10,
44 M_CAN_RWD = 0x14,
45 M_CAN_CCCR = 0x18,
46 M_CAN_NBTP = 0x1c,
47 M_CAN_TSCC = 0x20,
48 M_CAN_TSCV = 0x24,
49 M_CAN_TOCC = 0x28,
50 M_CAN_TOCV = 0x2c,
51 M_CAN_ECR = 0x40,
52 M_CAN_PSR = 0x44,
53
54 M_CAN_TDCR = 0x48,
55 M_CAN_IR = 0x50,
56 M_CAN_IE = 0x54,
57 M_CAN_ILS = 0x58,
58 M_CAN_ILE = 0x5c,
59 M_CAN_GFC = 0x80,
60 M_CAN_SIDFC = 0x84,
61 M_CAN_XIDFC = 0x88,
62 M_CAN_XIDAM = 0x90,
63 M_CAN_HPMS = 0x94,
64 M_CAN_NDAT1 = 0x98,
65 M_CAN_NDAT2 = 0x9c,
66 M_CAN_RXF0C = 0xa0,
67 M_CAN_RXF0S = 0xa4,
68 M_CAN_RXF0A = 0xa8,
69 M_CAN_RXBC = 0xac,
70 M_CAN_RXF1C = 0xb0,
71 M_CAN_RXF1S = 0xb4,
72 M_CAN_RXF1A = 0xb8,
73 M_CAN_RXESC = 0xbc,
74 M_CAN_TXBC = 0xc0,
75 M_CAN_TXFQS = 0xc4,
76 M_CAN_TXESC = 0xc8,
77 M_CAN_TXBRP = 0xcc,
78 M_CAN_TXBAR = 0xd0,
79 M_CAN_TXBCR = 0xd4,
80 M_CAN_TXBTO = 0xd8,
81 M_CAN_TXBCF = 0xdc,
82 M_CAN_TXBTIE = 0xe0,
83 M_CAN_TXBCIE = 0xe4,
84 M_CAN_TXEFC = 0xf0,
85 M_CAN_TXEFS = 0xf4,
86 M_CAN_TXEFA = 0xf8,
87};
88
89
90enum m_can_lec_type {
91 LEC_NO_ERROR = 0,
92 LEC_STUFF_ERROR,
93 LEC_FORM_ERROR,
94 LEC_ACK_ERROR,
95 LEC_BIT1_ERROR,
96 LEC_BIT0_ERROR,
97 LEC_CRC_ERROR,
98 LEC_UNUSED,
99};
100
101enum m_can_mram_cfg {
102 MRAM_SIDF = 0,
103 MRAM_XIDF,
104 MRAM_RXF0,
105 MRAM_RXF1,
106 MRAM_RXB,
107 MRAM_TXE,
108 MRAM_TXB,
109 MRAM_CFG_NUM,
110};
111
112
113#define CREL_REL_SHIFT 28
114#define CREL_REL_MASK (0xF << CREL_REL_SHIFT)
115#define CREL_STEP_SHIFT 24
116#define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT)
117#define CREL_SUBSTEP_SHIFT 20
118#define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT)
119
120
121#define DBTP_TDC BIT(23)
122#define DBTP_DBRP_SHIFT 16
123#define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT)
124#define DBTP_DTSEG1_SHIFT 8
125#define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT)
126#define DBTP_DTSEG2_SHIFT 4
127#define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT)
128#define DBTP_DSJW_SHIFT 0
129#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
130
131
132#define TDCR_TDCO_SHIFT 8
133#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
134#define TDCR_TDCF_SHIFT 0
135#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
136
137
138#define TEST_LBCK BIT(4)
139
140
141#define CCCR_CMR_MASK 0x3
142#define CCCR_CMR_SHIFT 10
143#define CCCR_CMR_CANFD 0x1
144#define CCCR_CMR_CANFD_BRS 0x2
145#define CCCR_CMR_CAN 0x3
146#define CCCR_CME_MASK 0x3
147#define CCCR_CME_SHIFT 8
148#define CCCR_CME_CAN 0
149#define CCCR_CME_CANFD 0x1
150#define CCCR_CME_CANFD_BRS 0x2
151#define CCCR_TXP BIT(14)
152#define CCCR_TEST BIT(7)
153#define CCCR_MON BIT(5)
154#define CCCR_CSR BIT(4)
155#define CCCR_CSA BIT(3)
156#define CCCR_ASM BIT(2)
157#define CCCR_CCE BIT(1)
158#define CCCR_INIT BIT(0)
159#define CCCR_CANFD 0x10
160
161#define CCCR_EFBI BIT(13)
162#define CCCR_PXHD BIT(12)
163#define CCCR_BRSE BIT(9)
164#define CCCR_FDOE BIT(8)
165
166#define CCCR_NISO BIT(15)
167
168
169#define NBTP_NSJW_SHIFT 25
170#define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT)
171#define NBTP_NBRP_SHIFT 16
172#define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT)
173#define NBTP_NTSEG1_SHIFT 8
174#define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT)
175#define NBTP_NTSEG2_SHIFT 0
176#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
177
178
179#define ECR_RP BIT(15)
180#define ECR_REC_SHIFT 8
181#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
182#define ECR_TEC_SHIFT 0
183#define ECR_TEC_MASK 0xff
184
185
186#define PSR_BO BIT(7)
187#define PSR_EW BIT(6)
188#define PSR_EP BIT(5)
189#define PSR_LEC_MASK 0x7
190
191
192#define IR_ALL_INT 0xffffffff
193
194
195#define IR_ARA BIT(29)
196#define IR_PED BIT(28)
197#define IR_PEA BIT(27)
198
199
200#define IR_STE BIT(31)
201#define IR_FOE BIT(30)
202#define IR_ACKE BIT(29)
203#define IR_BE BIT(28)
204#define IR_CRCE BIT(27)
205#define IR_WDI BIT(26)
206#define IR_BO BIT(25)
207#define IR_EW BIT(24)
208#define IR_EP BIT(23)
209#define IR_ELO BIT(22)
210#define IR_BEU BIT(21)
211#define IR_BEC BIT(20)
212#define IR_DRX BIT(19)
213#define IR_TOO BIT(18)
214#define IR_MRAF BIT(17)
215#define IR_TSW BIT(16)
216#define IR_TEFL BIT(15)
217#define IR_TEFF BIT(14)
218#define IR_TEFW BIT(13)
219#define IR_TEFN BIT(12)
220#define IR_TFE BIT(11)
221#define IR_TCF BIT(10)
222#define IR_TC BIT(9)
223#define IR_HPM BIT(8)
224#define IR_RF1L BIT(7)
225#define IR_RF1F BIT(6)
226#define IR_RF1W BIT(5)
227#define IR_RF1N BIT(4)
228#define IR_RF0L BIT(3)
229#define IR_RF0F BIT(2)
230#define IR_RF0W BIT(1)
231#define IR_RF0N BIT(0)
232#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
233
234
235#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
236#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
237 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
238 IR_RF1L | IR_RF0L)
239#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
240
241#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
242#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
243 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
244 IR_RF1L | IR_RF0L)
245#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
246
247
248#define ILS_ALL_INT0 0x0
249#define ILS_ALL_INT1 0xFFFFFFFF
250
251
252#define ILE_EINT1 BIT(1)
253#define ILE_EINT0 BIT(0)
254
255
256#define RXFC_FWM_SHIFT 24
257#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
258#define RXFC_FS_SHIFT 16
259#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
260
261
262#define RXFS_RFL BIT(25)
263#define RXFS_FF BIT(24)
264#define RXFS_FPI_SHIFT 16
265#define RXFS_FPI_MASK 0x3f0000
266#define RXFS_FGI_SHIFT 8
267#define RXFS_FGI_MASK 0x3f00
268#define RXFS_FFL_MASK 0x7f
269
270
271#define M_CAN_RXESC_8BYTES 0x0
272#define M_CAN_RXESC_64BYTES 0x777
273
274
275#define TXBC_NDTB_SHIFT 16
276#define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT)
277#define TXBC_TFQS_SHIFT 24
278#define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT)
279
280
281#define TXFQS_TFQF BIT(21)
282#define TXFQS_TFQPI_SHIFT 16
283#define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT)
284#define TXFQS_TFGI_SHIFT 8
285#define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT)
286#define TXFQS_TFFL_SHIFT 0
287#define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT)
288
289
290#define TXESC_TBDS_8BYTES 0x0
291#define TXESC_TBDS_64BYTES 0x7
292
293
294#define TXEFC_EFS_SHIFT 16
295#define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT)
296
297
298#define TXEFS_TEFL BIT(25)
299#define TXEFS_EFF BIT(24)
300#define TXEFS_EFGI_SHIFT 8
301#define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT)
302#define TXEFS_EFFL_SHIFT 0
303#define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT)
304
305
306#define TXEFA_EFAI_SHIFT 0
307#define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT)
308
309
310#define SIDF_ELEMENT_SIZE 4
311#define XIDF_ELEMENT_SIZE 8
312#define RXF0_ELEMENT_SIZE 72
313#define RXF1_ELEMENT_SIZE 72
314#define RXB_ELEMENT_SIZE 72
315#define TXE_ELEMENT_SIZE 8
316#define TXB_ELEMENT_SIZE 72
317
318
319#define M_CAN_FIFO_ID 0x0
320#define M_CAN_FIFO_DLC 0x4
321#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
322
323
324
325#define RX_BUF_ESI BIT(31)
326#define RX_BUF_XTD BIT(30)
327#define RX_BUF_RTR BIT(29)
328
329#define RX_BUF_ANMF BIT(31)
330#define RX_BUF_FDF BIT(21)
331#define RX_BUF_BRS BIT(20)
332
333
334
335#define TX_BUF_ESI BIT(31)
336#define TX_BUF_XTD BIT(30)
337#define TX_BUF_RTR BIT(29)
338
339#define TX_BUF_EFC BIT(23)
340#define TX_BUF_FDF BIT(21)
341#define TX_BUF_BRS BIT(20)
342#define TX_BUF_MM_SHIFT 24
343#define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT)
344
345
346
347#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
348#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
349
350
351struct mram_cfg {
352 u16 off;
353 u8 num;
354};
355
356
357struct m_can_priv {
358 struct can_priv can;
359 struct napi_struct napi;
360 struct net_device *dev;
361 struct device *device;
362 struct clk *hclk;
363 struct clk *cclk;
364 void __iomem *base;
365 u32 irqstatus;
366 int version;
367
368
369 void __iomem *mram_base;
370 struct mram_cfg mcfg[MRAM_CFG_NUM];
371};
372
373static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg)
374{
375 return readl(priv->base + reg);
376}
377
378static inline void m_can_write(const struct m_can_priv *priv,
379 enum m_can_reg reg, u32 val)
380{
381 writel(val, priv->base + reg);
382}
383
384static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
385 u32 fgi, unsigned int offset)
386{
387 return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off +
388 fgi * RXF0_ELEMENT_SIZE + offset);
389}
390
391static inline void m_can_fifo_write(const struct m_can_priv *priv,
392 u32 fpi, unsigned int offset, u32 val)
393{
394 writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
395 fpi * TXB_ELEMENT_SIZE + offset);
396}
397
398static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv,
399 u32 fgi,
400 u32 offset) {
401 return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off +
402 fgi * TXE_ELEMENT_SIZE + offset);
403}
404
405static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv)
406{
407 return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF);
408}
409
410static inline void m_can_config_endisable(const struct m_can_priv *priv,
411 bool enable)
412{
413 u32 cccr = m_can_read(priv, M_CAN_CCCR);
414 u32 timeout = 10;
415 u32 val = 0;
416
417 if (enable) {
418
419 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
420 udelay(5);
421
422 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
423 } else {
424 m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
425 }
426
427
428 if (enable)
429 val = CCCR_INIT | CCCR_CCE;
430
431 while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
432 if (timeout == 0) {
433 netdev_warn(priv->dev, "Failed to init module\n");
434 return;
435 }
436 timeout--;
437 udelay(1);
438 }
439}
440
441static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv)
442{
443
444 m_can_write(priv, M_CAN_ILE, ILE_EINT0);
445}
446
447static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
448{
449 m_can_write(priv, M_CAN_ILE, 0x0);
450}
451
452static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
453{
454 struct net_device_stats *stats = &dev->stats;
455 struct m_can_priv *priv = netdev_priv(dev);
456 struct canfd_frame *cf;
457 struct sk_buff *skb;
458 u32 id, fgi, dlc;
459 int i;
460
461
462 fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT;
463 dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
464 if (dlc & RX_BUF_FDF)
465 skb = alloc_canfd_skb(dev, &cf);
466 else
467 skb = alloc_can_skb(dev, (struct can_frame **)&cf);
468 if (!skb) {
469 stats->rx_dropped++;
470 return;
471 }
472
473 if (dlc & RX_BUF_FDF)
474 cf->len = can_dlc2len((dlc >> 16) & 0x0F);
475 else
476 cf->len = get_can_dlc((dlc >> 16) & 0x0F);
477
478 id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
479 if (id & RX_BUF_XTD)
480 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
481 else
482 cf->can_id = (id >> 18) & CAN_SFF_MASK;
483
484 if (id & RX_BUF_ESI) {
485 cf->flags |= CANFD_ESI;
486 netdev_dbg(dev, "ESI Error\n");
487 }
488
489 if (!(dlc & RX_BUF_FDF) && (id & RX_BUF_RTR)) {
490 cf->can_id |= CAN_RTR_FLAG;
491 } else {
492 if (dlc & RX_BUF_BRS)
493 cf->flags |= CANFD_BRS;
494
495 for (i = 0; i < cf->len; i += 4)
496 *(u32 *)(cf->data + i) =
497 m_can_fifo_read(priv, fgi,
498 M_CAN_FIFO_DATA(i / 4));
499 }
500
501
502 m_can_write(priv, M_CAN_RXF0A, fgi);
503
504 stats->rx_packets++;
505 stats->rx_bytes += cf->len;
506
507 netif_receive_skb(skb);
508}
509
510static int m_can_do_rx_poll(struct net_device *dev, int quota)
511{
512 struct m_can_priv *priv = netdev_priv(dev);
513 u32 pkts = 0;
514 u32 rxfs;
515
516 rxfs = m_can_read(priv, M_CAN_RXF0S);
517 if (!(rxfs & RXFS_FFL_MASK)) {
518 netdev_dbg(dev, "no messages in fifo0\n");
519 return 0;
520 }
521
522 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
523 if (rxfs & RXFS_RFL)
524 netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
525
526 m_can_read_fifo(dev, rxfs);
527
528 quota--;
529 pkts++;
530 rxfs = m_can_read(priv, M_CAN_RXF0S);
531 }
532
533 if (pkts)
534 can_led_event(dev, CAN_LED_EVENT_RX);
535
536 return pkts;
537}
538
539static int m_can_handle_lost_msg(struct net_device *dev)
540{
541 struct net_device_stats *stats = &dev->stats;
542 struct sk_buff *skb;
543 struct can_frame *frame;
544
545 netdev_err(dev, "msg lost in rxf0\n");
546
547 stats->rx_errors++;
548 stats->rx_over_errors++;
549
550 skb = alloc_can_err_skb(dev, &frame);
551 if (unlikely(!skb))
552 return 0;
553
554 frame->can_id |= CAN_ERR_CRTL;
555 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
556
557 netif_receive_skb(skb);
558
559 return 1;
560}
561
562static int m_can_handle_lec_err(struct net_device *dev,
563 enum m_can_lec_type lec_type)
564{
565 struct m_can_priv *priv = netdev_priv(dev);
566 struct net_device_stats *stats = &dev->stats;
567 struct can_frame *cf;
568 struct sk_buff *skb;
569
570 priv->can.can_stats.bus_error++;
571 stats->rx_errors++;
572
573
574 skb = alloc_can_err_skb(dev, &cf);
575 if (unlikely(!skb))
576 return 0;
577
578
579
580
581 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
582
583 switch (lec_type) {
584 case LEC_STUFF_ERROR:
585 netdev_dbg(dev, "stuff error\n");
586 cf->data[2] |= CAN_ERR_PROT_STUFF;
587 break;
588 case LEC_FORM_ERROR:
589 netdev_dbg(dev, "form error\n");
590 cf->data[2] |= CAN_ERR_PROT_FORM;
591 break;
592 case LEC_ACK_ERROR:
593 netdev_dbg(dev, "ack error\n");
594 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
595 break;
596 case LEC_BIT1_ERROR:
597 netdev_dbg(dev, "bit1 error\n");
598 cf->data[2] |= CAN_ERR_PROT_BIT1;
599 break;
600 case LEC_BIT0_ERROR:
601 netdev_dbg(dev, "bit0 error\n");
602 cf->data[2] |= CAN_ERR_PROT_BIT0;
603 break;
604 case LEC_CRC_ERROR:
605 netdev_dbg(dev, "CRC error\n");
606 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
607 break;
608 default:
609 break;
610 }
611
612 stats->rx_packets++;
613 stats->rx_bytes += cf->can_dlc;
614 netif_receive_skb(skb);
615
616 return 1;
617}
618
619static int __m_can_get_berr_counter(const struct net_device *dev,
620 struct can_berr_counter *bec)
621{
622 struct m_can_priv *priv = netdev_priv(dev);
623 unsigned int ecr;
624
625 ecr = m_can_read(priv, M_CAN_ECR);
626 bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
627 bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT;
628
629 return 0;
630}
631
632static int m_can_clk_start(struct m_can_priv *priv)
633{
634 int err;
635
636 err = pm_runtime_get_sync(priv->device);
637 if (err < 0) {
638 pm_runtime_put_noidle(priv->device);
639 return err;
640 }
641
642 return 0;
643}
644
645static void m_can_clk_stop(struct m_can_priv *priv)
646{
647 pm_runtime_put_sync(priv->device);
648}
649
650static int m_can_get_berr_counter(const struct net_device *dev,
651 struct can_berr_counter *bec)
652{
653 struct m_can_priv *priv = netdev_priv(dev);
654 int err;
655
656 err = m_can_clk_start(priv);
657 if (err)
658 return err;
659
660 __m_can_get_berr_counter(dev, bec);
661
662 m_can_clk_stop(priv);
663
664 return 0;
665}
666
667static int m_can_handle_state_change(struct net_device *dev,
668 enum can_state new_state)
669{
670 struct m_can_priv *priv = netdev_priv(dev);
671 struct net_device_stats *stats = &dev->stats;
672 struct can_frame *cf;
673 struct sk_buff *skb;
674 struct can_berr_counter bec;
675 unsigned int ecr;
676
677 switch (new_state) {
678 case CAN_STATE_ERROR_ACTIVE:
679
680 priv->can.can_stats.error_warning++;
681 priv->can.state = CAN_STATE_ERROR_WARNING;
682 break;
683 case CAN_STATE_ERROR_PASSIVE:
684
685 priv->can.can_stats.error_passive++;
686 priv->can.state = CAN_STATE_ERROR_PASSIVE;
687 break;
688 case CAN_STATE_BUS_OFF:
689
690 priv->can.state = CAN_STATE_BUS_OFF;
691 m_can_disable_all_interrupts(priv);
692 priv->can.can_stats.bus_off++;
693 can_bus_off(dev);
694 break;
695 default:
696 break;
697 }
698
699
700 skb = alloc_can_err_skb(dev, &cf);
701 if (unlikely(!skb))
702 return 0;
703
704 __m_can_get_berr_counter(dev, &bec);
705
706 switch (new_state) {
707 case CAN_STATE_ERROR_ACTIVE:
708
709 cf->can_id |= CAN_ERR_CRTL;
710 cf->data[1] = (bec.txerr > bec.rxerr) ?
711 CAN_ERR_CRTL_TX_WARNING :
712 CAN_ERR_CRTL_RX_WARNING;
713 cf->data[6] = bec.txerr;
714 cf->data[7] = bec.rxerr;
715 break;
716 case CAN_STATE_ERROR_PASSIVE:
717
718 cf->can_id |= CAN_ERR_CRTL;
719 ecr = m_can_read(priv, M_CAN_ECR);
720 if (ecr & ECR_RP)
721 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
722 if (bec.txerr > 127)
723 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
724 cf->data[6] = bec.txerr;
725 cf->data[7] = bec.rxerr;
726 break;
727 case CAN_STATE_BUS_OFF:
728
729 cf->can_id |= CAN_ERR_BUSOFF;
730 break;
731 default:
732 break;
733 }
734
735 stats->rx_packets++;
736 stats->rx_bytes += cf->can_dlc;
737 netif_receive_skb(skb);
738
739 return 1;
740}
741
742static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
743{
744 struct m_can_priv *priv = netdev_priv(dev);
745 int work_done = 0;
746
747 if ((psr & PSR_EW) &&
748 (priv->can.state != CAN_STATE_ERROR_WARNING)) {
749 netdev_dbg(dev, "entered error warning state\n");
750 work_done += m_can_handle_state_change(dev,
751 CAN_STATE_ERROR_WARNING);
752 }
753
754 if ((psr & PSR_EP) &&
755 (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
756 netdev_dbg(dev, "entered error passive state\n");
757 work_done += m_can_handle_state_change(dev,
758 CAN_STATE_ERROR_PASSIVE);
759 }
760
761 if ((psr & PSR_BO) &&
762 (priv->can.state != CAN_STATE_BUS_OFF)) {
763 netdev_dbg(dev, "entered error bus off state\n");
764 work_done += m_can_handle_state_change(dev,
765 CAN_STATE_BUS_OFF);
766 }
767
768 return work_done;
769}
770
771static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
772{
773 if (irqstatus & IR_WDI)
774 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
775 if (irqstatus & IR_ELO)
776 netdev_err(dev, "Error Logging Overflow\n");
777 if (irqstatus & IR_BEU)
778 netdev_err(dev, "Bit Error Uncorrected\n");
779 if (irqstatus & IR_BEC)
780 netdev_err(dev, "Bit Error Corrected\n");
781 if (irqstatus & IR_TOO)
782 netdev_err(dev, "Timeout reached\n");
783 if (irqstatus & IR_MRAF)
784 netdev_err(dev, "Message RAM access failure occurred\n");
785}
786
787static inline bool is_lec_err(u32 psr)
788{
789 psr &= LEC_UNUSED;
790
791 return psr && (psr != LEC_UNUSED);
792}
793
794static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
795 u32 psr)
796{
797 struct m_can_priv *priv = netdev_priv(dev);
798 int work_done = 0;
799
800 if (irqstatus & IR_RF0L)
801 work_done += m_can_handle_lost_msg(dev);
802
803
804 if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
805 is_lec_err(psr))
806 work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
807
808
809 m_can_handle_other_err(dev, irqstatus);
810
811 return work_done;
812}
813
814static int m_can_poll(struct napi_struct *napi, int quota)
815{
816 struct net_device *dev = napi->dev;
817 struct m_can_priv *priv = netdev_priv(dev);
818 int work_done = 0;
819 u32 irqstatus, psr;
820
821 irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR);
822 if (!irqstatus)
823 goto end;
824
825 psr = m_can_read(priv, M_CAN_PSR);
826 if (irqstatus & IR_ERR_STATE)
827 work_done += m_can_handle_state_errors(dev, psr);
828
829 if (irqstatus & IR_ERR_BUS_30X)
830 work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
831
832 if (irqstatus & IR_RF0N)
833 work_done += m_can_do_rx_poll(dev, (quota - work_done));
834
835 if (work_done < quota) {
836 napi_complete_done(napi, work_done);
837 m_can_enable_all_interrupts(priv);
838 }
839
840end:
841 return work_done;
842}
843
844static void m_can_echo_tx_event(struct net_device *dev)
845{
846 u32 txe_count = 0;
847 u32 m_can_txefs;
848 u32 fgi = 0;
849 int i = 0;
850 unsigned int msg_mark;
851
852 struct m_can_priv *priv = netdev_priv(dev);
853 struct net_device_stats *stats = &dev->stats;
854
855
856 m_can_txefs = m_can_read(priv, M_CAN_TXEFS);
857
858
859 txe_count = (m_can_txefs & TXEFS_EFFL_MASK)
860 >> TXEFS_EFFL_SHIFT;
861
862
863 for (i = 0; i < txe_count; i++) {
864
865 fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK)
866 >> TXEFS_EFGI_SHIFT;
867
868
869 msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) &
870 TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
871
872
873 m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
874 (fgi << TXEFA_EFAI_SHIFT)));
875
876
877 stats->tx_bytes += can_get_echo_skb(dev, msg_mark);
878 stats->tx_packets++;
879 }
880}
881
882static irqreturn_t m_can_isr(int irq, void *dev_id)
883{
884 struct net_device *dev = (struct net_device *)dev_id;
885 struct m_can_priv *priv = netdev_priv(dev);
886 struct net_device_stats *stats = &dev->stats;
887 u32 ir;
888
889 ir = m_can_read(priv, M_CAN_IR);
890 if (!ir)
891 return IRQ_NONE;
892
893
894 if (ir & IR_ALL_INT)
895 m_can_write(priv, M_CAN_IR, ir);
896
897
898
899
900
901
902 if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
903 priv->irqstatus = ir;
904 m_can_disable_all_interrupts(priv);
905 napi_schedule(&priv->napi);
906 }
907
908 if (priv->version == 30) {
909 if (ir & IR_TC) {
910
911 stats->tx_bytes += can_get_echo_skb(dev, 0);
912 stats->tx_packets++;
913 can_led_event(dev, CAN_LED_EVENT_TX);
914 netif_wake_queue(dev);
915 }
916 } else {
917 if (ir & IR_TEFN) {
918
919 m_can_echo_tx_event(dev);
920 can_led_event(dev, CAN_LED_EVENT_TX);
921 if (netif_queue_stopped(dev) &&
922 !m_can_tx_fifo_full(priv))
923 netif_wake_queue(dev);
924 }
925 }
926
927 return IRQ_HANDLED;
928}
929
930static const struct can_bittiming_const m_can_bittiming_const_30X = {
931 .name = KBUILD_MODNAME,
932 .tseg1_min = 2,
933 .tseg1_max = 64,
934 .tseg2_min = 1,
935 .tseg2_max = 16,
936 .sjw_max = 16,
937 .brp_min = 1,
938 .brp_max = 1024,
939 .brp_inc = 1,
940};
941
942static const struct can_bittiming_const m_can_data_bittiming_const_30X = {
943 .name = KBUILD_MODNAME,
944 .tseg1_min = 2,
945 .tseg1_max = 16,
946 .tseg2_min = 1,
947 .tseg2_max = 8,
948 .sjw_max = 4,
949 .brp_min = 1,
950 .brp_max = 32,
951 .brp_inc = 1,
952};
953
954static const struct can_bittiming_const m_can_bittiming_const_31X = {
955 .name = KBUILD_MODNAME,
956 .tseg1_min = 2,
957 .tseg1_max = 256,
958 .tseg2_min = 1,
959 .tseg2_max = 128,
960 .sjw_max = 128,
961 .brp_min = 1,
962 .brp_max = 512,
963 .brp_inc = 1,
964};
965
966static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
967 .name = KBUILD_MODNAME,
968 .tseg1_min = 1,
969 .tseg1_max = 32,
970 .tseg2_min = 1,
971 .tseg2_max = 16,
972 .sjw_max = 16,
973 .brp_min = 1,
974 .brp_max = 32,
975 .brp_inc = 1,
976};
977
978static int m_can_set_bittiming(struct net_device *dev)
979{
980 struct m_can_priv *priv = netdev_priv(dev);
981 const struct can_bittiming *bt = &priv->can.bittiming;
982 const struct can_bittiming *dbt = &priv->can.data_bittiming;
983 u16 brp, sjw, tseg1, tseg2;
984 u32 reg_btp;
985
986 brp = bt->brp - 1;
987 sjw = bt->sjw - 1;
988 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
989 tseg2 = bt->phase_seg2 - 1;
990 reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) |
991 (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT);
992 m_can_write(priv, M_CAN_NBTP, reg_btp);
993
994 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
995 reg_btp = 0;
996 brp = dbt->brp - 1;
997 sjw = dbt->sjw - 1;
998 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
999 tseg2 = dbt->phase_seg2 - 1;
1000
1001
1002
1003
1004
1005 if (dbt->bitrate > 2500000) {
1006 u32 tdco, ssp;
1007
1008
1009
1010
1011 ssp = dbt->sample_point;
1012
1013
1014
1015
1016 tdco = (priv->can.clock.freq / 1000) *
1017 ssp / dbt->bitrate;
1018
1019
1020 if (tdco > 127) {
1021 netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
1022 tdco);
1023 tdco = 127;
1024 }
1025
1026 reg_btp |= DBTP_TDC;
1027 m_can_write(priv, M_CAN_TDCR,
1028 tdco << TDCR_TDCO_SHIFT);
1029 }
1030
1031 reg_btp |= (brp << DBTP_DBRP_SHIFT) |
1032 (sjw << DBTP_DSJW_SHIFT) |
1033 (tseg1 << DBTP_DTSEG1_SHIFT) |
1034 (tseg2 << DBTP_DTSEG2_SHIFT);
1035
1036 m_can_write(priv, M_CAN_DBTP, reg_btp);
1037 }
1038
1039 return 0;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051static void m_can_chip_config(struct net_device *dev)
1052{
1053 struct m_can_priv *priv = netdev_priv(dev);
1054 u32 cccr, test;
1055
1056 m_can_config_endisable(priv, true);
1057
1058
1059 m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
1060
1061
1062 m_can_write(priv, M_CAN_GFC, 0x0);
1063
1064 if (priv->version == 30) {
1065
1066 m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
1067 priv->mcfg[MRAM_TXB].off);
1068 } else {
1069
1070 m_can_write(priv, M_CAN_TXBC,
1071 (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
1072 (priv->mcfg[MRAM_TXB].off));
1073 }
1074
1075
1076 m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
1077
1078
1079 if (priv->version == 30) {
1080 m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
1081 priv->mcfg[MRAM_TXE].off);
1082 } else {
1083
1084 m_can_write(priv, M_CAN_TXEFC,
1085 ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
1086 & TXEFC_EFS_MASK) |
1087 priv->mcfg[MRAM_TXE].off);
1088 }
1089
1090
1091 m_can_write(priv, M_CAN_RXF0C,
1092 (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
1093 priv->mcfg[MRAM_RXF0].off);
1094
1095 m_can_write(priv, M_CAN_RXF1C,
1096 (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
1097 priv->mcfg[MRAM_RXF1].off);
1098
1099 cccr = m_can_read(priv, M_CAN_CCCR);
1100 test = m_can_read(priv, M_CAN_TEST);
1101 test &= ~TEST_LBCK;
1102 if (priv->version == 30) {
1103
1104
1105 cccr &= ~(CCCR_TEST | CCCR_MON |
1106 (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
1107 (CCCR_CME_MASK << CCCR_CME_SHIFT));
1108
1109 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
1110 cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
1111
1112 } else {
1113
1114 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
1115 CCCR_NISO);
1116
1117
1118 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
1119 cccr |= CCCR_NISO;
1120
1121 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
1122 cccr |= (CCCR_BRSE | CCCR_FDOE);
1123 }
1124
1125
1126 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
1127 cccr |= CCCR_TEST | CCCR_MON;
1128 test |= TEST_LBCK;
1129 }
1130
1131
1132 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
1133 cccr |= CCCR_MON;
1134
1135
1136 m_can_write(priv, M_CAN_CCCR, cccr);
1137 m_can_write(priv, M_CAN_TEST, test);
1138
1139
1140 m_can_write(priv, M_CAN_IR, IR_ALL_INT);
1141 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
1142 if (priv->version == 30)
1143 m_can_write(priv, M_CAN_IE, IR_ALL_INT &
1144 ~(IR_ERR_LEC_30X));
1145 else
1146 m_can_write(priv, M_CAN_IE, IR_ALL_INT &
1147 ~(IR_ERR_LEC_31X));
1148 else
1149 m_can_write(priv, M_CAN_IE, IR_ALL_INT);
1150
1151
1152 m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0);
1153
1154
1155 m_can_set_bittiming(dev);
1156
1157 m_can_config_endisable(priv, false);
1158}
1159
1160static void m_can_start(struct net_device *dev)
1161{
1162 struct m_can_priv *priv = netdev_priv(dev);
1163
1164
1165 m_can_chip_config(dev);
1166
1167 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1168
1169 m_can_enable_all_interrupts(priv);
1170}
1171
1172static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
1173{
1174 switch (mode) {
1175 case CAN_MODE_START:
1176 m_can_start(dev);
1177 netif_wake_queue(dev);
1178 break;
1179 default:
1180 return -EOPNOTSUPP;
1181 }
1182
1183 return 0;
1184}
1185
1186
1187
1188
1189
1190
1191static int m_can_check_core_release(void __iomem *m_can_base)
1192{
1193 u32 crel_reg;
1194 u8 rel;
1195 u8 step;
1196 int res;
1197 struct m_can_priv temp_priv = {
1198 .base = m_can_base
1199 };
1200
1201
1202
1203
1204 crel_reg = m_can_read(&temp_priv, M_CAN_CREL);
1205 rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT);
1206 step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT);
1207
1208 if (rel == 3) {
1209
1210 res = 30 + step;
1211 } else {
1212
1213 res = 0;
1214 }
1215
1216 return res;
1217}
1218
1219
1220
1221
1222static bool m_can_niso_supported(const struct m_can_priv *priv)
1223{
1224 u32 cccr_reg, cccr_poll;
1225 int niso_timeout;
1226
1227 m_can_config_endisable(priv, true);
1228 cccr_reg = m_can_read(priv, M_CAN_CCCR);
1229 cccr_reg |= CCCR_NISO;
1230 m_can_write(priv, M_CAN_CCCR, cccr_reg);
1231
1232 niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll,
1233 (cccr_poll == cccr_reg), 0, 10);
1234
1235
1236 cccr_reg &= ~(CCCR_NISO);
1237 m_can_write(priv, M_CAN_CCCR, cccr_reg);
1238
1239 m_can_config_endisable(priv, false);
1240
1241
1242 return !niso_timeout;
1243}
1244
1245static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev,
1246 void __iomem *addr)
1247{
1248 struct m_can_priv *priv;
1249 int m_can_version;
1250
1251 m_can_version = m_can_check_core_release(addr);
1252
1253 if (!m_can_version) {
1254 dev_err(&pdev->dev, "Unsupported version number: %2d",
1255 m_can_version);
1256 return -EINVAL;
1257 }
1258
1259 priv = netdev_priv(dev);
1260 netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
1261
1262
1263 priv->version = m_can_version;
1264 priv->dev = dev;
1265 priv->base = addr;
1266 priv->can.do_set_mode = m_can_set_mode;
1267 priv->can.do_get_berr_counter = m_can_get_berr_counter;
1268
1269
1270 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1271 CAN_CTRLMODE_LISTENONLY |
1272 CAN_CTRLMODE_BERR_REPORTING |
1273 CAN_CTRLMODE_FD;
1274
1275
1276 switch (priv->version) {
1277 case 30:
1278
1279 can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
1280 priv->can.bittiming_const = &m_can_bittiming_const_30X;
1281 priv->can.data_bittiming_const =
1282 &m_can_data_bittiming_const_30X;
1283 break;
1284 case 31:
1285
1286 can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
1287 priv->can.bittiming_const = &m_can_bittiming_const_31X;
1288 priv->can.data_bittiming_const =
1289 &m_can_data_bittiming_const_31X;
1290 break;
1291 case 32:
1292 priv->can.bittiming_const = &m_can_bittiming_const_31X;
1293 priv->can.data_bittiming_const =
1294 &m_can_data_bittiming_const_31X;
1295 priv->can.ctrlmode_supported |= (m_can_niso_supported(priv)
1296 ? CAN_CTRLMODE_FD_NON_ISO
1297 : 0);
1298 break;
1299 default:
1300 dev_err(&pdev->dev, "Unsupported version number: %2d",
1301 priv->version);
1302 return -EINVAL;
1303 }
1304
1305 return 0;
1306}
1307
1308static int m_can_open(struct net_device *dev)
1309{
1310 struct m_can_priv *priv = netdev_priv(dev);
1311 int err;
1312
1313 err = m_can_clk_start(priv);
1314 if (err)
1315 return err;
1316
1317
1318 err = open_candev(dev);
1319 if (err) {
1320 netdev_err(dev, "failed to open can device\n");
1321 goto exit_disable_clks;
1322 }
1323
1324
1325 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
1326 dev);
1327 if (err < 0) {
1328 netdev_err(dev, "failed to request interrupt\n");
1329 goto exit_irq_fail;
1330 }
1331
1332
1333 m_can_start(dev);
1334
1335 can_led_event(dev, CAN_LED_EVENT_OPEN);
1336 napi_enable(&priv->napi);
1337 netif_start_queue(dev);
1338
1339 return 0;
1340
1341exit_irq_fail:
1342 close_candev(dev);
1343exit_disable_clks:
1344 m_can_clk_stop(priv);
1345 return err;
1346}
1347
1348static void m_can_stop(struct net_device *dev)
1349{
1350 struct m_can_priv *priv = netdev_priv(dev);
1351
1352
1353 m_can_disable_all_interrupts(priv);
1354
1355
1356 priv->can.state = CAN_STATE_STOPPED;
1357}
1358
1359static int m_can_close(struct net_device *dev)
1360{
1361 struct m_can_priv *priv = netdev_priv(dev);
1362
1363 netif_stop_queue(dev);
1364 napi_disable(&priv->napi);
1365 m_can_stop(dev);
1366 m_can_clk_stop(priv);
1367 free_irq(dev->irq, dev);
1368 close_candev(dev);
1369 can_led_event(dev, CAN_LED_EVENT_STOP);
1370
1371 return 0;
1372}
1373
1374static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
1375{
1376 struct m_can_priv *priv = netdev_priv(dev);
1377
1378 unsigned int wrap = priv->can.echo_skb_max;
1379 int next_idx;
1380
1381
1382 next_idx = (++putidx >= wrap ? 0 : putidx);
1383
1384
1385 return !!priv->can.echo_skb[next_idx];
1386}
1387
1388static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
1389 struct net_device *dev)
1390{
1391 struct m_can_priv *priv = netdev_priv(dev);
1392 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1393 u32 id, cccr, fdflags;
1394 int i;
1395 int putidx;
1396
1397 if (can_dropped_invalid_skb(dev, skb))
1398 return NETDEV_TX_OK;
1399
1400
1401
1402 if (cf->can_id & CAN_EFF_FLAG) {
1403 id = cf->can_id & CAN_EFF_MASK;
1404 id |= TX_BUF_XTD;
1405 } else {
1406 id = ((cf->can_id & CAN_SFF_MASK) << 18);
1407 }
1408
1409 if (cf->can_id & CAN_RTR_FLAG)
1410 id |= TX_BUF_RTR;
1411
1412 if (priv->version == 30) {
1413 netif_stop_queue(dev);
1414
1415
1416 m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
1417 m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC,
1418 can_len2dlc(cf->len) << 16);
1419
1420 for (i = 0; i < cf->len; i += 4)
1421 m_can_fifo_write(priv, 0,
1422 M_CAN_FIFO_DATA(i / 4),
1423 *(u32 *)(cf->data + i));
1424
1425 can_put_echo_skb(skb, dev, 0);
1426
1427 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1428 cccr = m_can_read(priv, M_CAN_CCCR);
1429 cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
1430 if (can_is_canfd_skb(skb)) {
1431 if (cf->flags & CANFD_BRS)
1432 cccr |= CCCR_CMR_CANFD_BRS <<
1433 CCCR_CMR_SHIFT;
1434 else
1435 cccr |= CCCR_CMR_CANFD <<
1436 CCCR_CMR_SHIFT;
1437 } else {
1438 cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
1439 }
1440 m_can_write(priv, M_CAN_CCCR, cccr);
1441 }
1442 m_can_write(priv, M_CAN_TXBTIE, 0x1);
1443 m_can_write(priv, M_CAN_TXBAR, 0x1);
1444
1445 } else {
1446
1447
1448
1449 if (m_can_tx_fifo_full(priv)) {
1450
1451 netif_stop_queue(dev);
1452 netdev_warn(dev,
1453 "TX queue active although FIFO is full.");
1454 return NETDEV_TX_BUSY;
1455 }
1456
1457
1458 putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
1459 >> TXFQS_TFQPI_SHIFT);
1460
1461 m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id);
1462
1463
1464 fdflags = 0;
1465 if (can_is_canfd_skb(skb)) {
1466 fdflags |= TX_BUF_FDF;
1467 if (cf->flags & CANFD_BRS)
1468 fdflags |= TX_BUF_BRS;
1469 }
1470
1471
1472
1473
1474
1475
1476 m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC,
1477 ((putidx << TX_BUF_MM_SHIFT) &
1478 TX_BUF_MM_MASK) |
1479 (can_len2dlc(cf->len) << 16) |
1480 fdflags | TX_BUF_EFC);
1481
1482 for (i = 0; i < cf->len; i += 4)
1483 m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4),
1484 *(u32 *)(cf->data + i));
1485
1486
1487
1488
1489 can_put_echo_skb(skb, dev, putidx);
1490
1491
1492 m_can_write(priv, M_CAN_TXBAR, (1 << putidx));
1493
1494
1495 if (m_can_tx_fifo_full(priv) ||
1496 m_can_next_echo_skb_occupied(dev, putidx))
1497 netif_stop_queue(dev);
1498 }
1499
1500 return NETDEV_TX_OK;
1501}
1502
1503static const struct net_device_ops m_can_netdev_ops = {
1504 .ndo_open = m_can_open,
1505 .ndo_stop = m_can_close,
1506 .ndo_start_xmit = m_can_start_xmit,
1507 .ndo_change_mtu = can_change_mtu,
1508};
1509
1510static int register_m_can_dev(struct net_device *dev)
1511{
1512 dev->flags |= IFF_ECHO;
1513 dev->netdev_ops = &m_can_netdev_ops;
1514
1515 return register_candev(dev);
1516}
1517
1518static void m_can_init_ram(struct m_can_priv *priv)
1519{
1520 int end, i, start;
1521
1522
1523
1524
1525 start = priv->mcfg[MRAM_SIDF].off;
1526 end = priv->mcfg[MRAM_TXB].off +
1527 priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
1528 for (i = start; i < end; i += 4)
1529 writel(0x0, priv->mram_base + i);
1530}
1531
1532static void m_can_of_parse_mram(struct m_can_priv *priv,
1533 const u32 *mram_config_vals)
1534{
1535 priv->mcfg[MRAM_SIDF].off = mram_config_vals[0];
1536 priv->mcfg[MRAM_SIDF].num = mram_config_vals[1];
1537 priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
1538 priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
1539 priv->mcfg[MRAM_XIDF].num = mram_config_vals[2];
1540 priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off +
1541 priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
1542 priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
1543 (RXFC_FS_MASK >> RXFC_FS_SHIFT);
1544 priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off +
1545 priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
1546 priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
1547 (RXFC_FS_MASK >> RXFC_FS_SHIFT);
1548 priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off +
1549 priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
1550 priv->mcfg[MRAM_RXB].num = mram_config_vals[5];
1551 priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off +
1552 priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
1553 priv->mcfg[MRAM_TXE].num = mram_config_vals[6];
1554 priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off +
1555 priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
1556 priv->mcfg[MRAM_TXB].num = mram_config_vals[7] &
1557 (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT);
1558
1559 dev_dbg(priv->device,
1560 "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
1561 priv->mram_base,
1562 priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num,
1563 priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num,
1564 priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num,
1565 priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num,
1566 priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num,
1567 priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
1568 priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
1569
1570 m_can_init_ram(priv);
1571}
1572
1573static int m_can_plat_probe(struct platform_device *pdev)
1574{
1575 struct net_device *dev;
1576 struct m_can_priv *priv;
1577 struct resource *res;
1578 void __iomem *addr;
1579 void __iomem *mram_addr;
1580 struct clk *hclk, *cclk;
1581 int irq, ret;
1582 struct device_node *np;
1583 u32 mram_config_vals[MRAM_CFG_LEN];
1584 u32 tx_fifo_size;
1585
1586 np = pdev->dev.of_node;
1587
1588 hclk = devm_clk_get(&pdev->dev, "hclk");
1589 cclk = devm_clk_get(&pdev->dev, "cclk");
1590
1591 if (IS_ERR(hclk) || IS_ERR(cclk)) {
1592 dev_err(&pdev->dev, "no clock found\n");
1593 ret = -ENODEV;
1594 goto failed_ret;
1595 }
1596
1597 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
1598 addr = devm_ioremap_resource(&pdev->dev, res);
1599 irq = platform_get_irq_byname(pdev, "int0");
1600
1601 if (IS_ERR(addr) || irq < 0) {
1602 ret = -EINVAL;
1603 goto failed_ret;
1604 }
1605
1606
1607 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
1608 if (!res) {
1609 ret = -ENODEV;
1610 goto failed_ret;
1611 }
1612
1613 mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1614 if (!mram_addr) {
1615 ret = -ENOMEM;
1616 goto failed_ret;
1617 }
1618
1619
1620 ret = of_property_read_u32_array(np, "bosch,mram-cfg",
1621 mram_config_vals,
1622 sizeof(mram_config_vals) / 4);
1623 if (ret) {
1624 dev_err(&pdev->dev, "Could not get Message RAM configuration.");
1625 goto failed_ret;
1626 }
1627
1628
1629
1630
1631 tx_fifo_size = mram_config_vals[7];
1632
1633
1634 dev = alloc_candev(sizeof(*priv), tx_fifo_size);
1635 if (!dev) {
1636 ret = -ENOMEM;
1637 goto failed_ret;
1638 }
1639
1640 priv = netdev_priv(dev);
1641 dev->irq = irq;
1642 priv->device = &pdev->dev;
1643 priv->hclk = hclk;
1644 priv->cclk = cclk;
1645 priv->can.clock.freq = clk_get_rate(cclk);
1646 priv->mram_base = mram_addr;
1647
1648 platform_set_drvdata(pdev, dev);
1649 SET_NETDEV_DEV(dev, &pdev->dev);
1650
1651
1652
1653
1654 pm_runtime_enable(&pdev->dev);
1655 ret = m_can_clk_start(priv);
1656 if (ret)
1657 goto pm_runtime_fail;
1658
1659 ret = m_can_dev_setup(pdev, dev, addr);
1660 if (ret)
1661 goto clk_disable;
1662
1663 ret = register_m_can_dev(dev);
1664 if (ret) {
1665 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
1666 KBUILD_MODNAME, ret);
1667 goto clk_disable;
1668 }
1669
1670 m_can_of_parse_mram(priv, mram_config_vals);
1671
1672 devm_can_led_init(dev);
1673
1674 of_can_transceiver(dev);
1675
1676 dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
1677 KBUILD_MODNAME, dev->irq, priv->version);
1678
1679
1680
1681
1682clk_disable:
1683 m_can_clk_stop(priv);
1684pm_runtime_fail:
1685 if (ret) {
1686 pm_runtime_disable(&pdev->dev);
1687 free_candev(dev);
1688 }
1689failed_ret:
1690 return ret;
1691}
1692
1693static __maybe_unused int m_can_suspend(struct device *dev)
1694{
1695 struct net_device *ndev = dev_get_drvdata(dev);
1696 struct m_can_priv *priv = netdev_priv(ndev);
1697
1698 if (netif_running(ndev)) {
1699 netif_stop_queue(ndev);
1700 netif_device_detach(ndev);
1701 m_can_stop(ndev);
1702 m_can_clk_stop(priv);
1703 }
1704
1705 pinctrl_pm_select_sleep_state(dev);
1706
1707 priv->can.state = CAN_STATE_SLEEPING;
1708
1709 return 0;
1710}
1711
1712static __maybe_unused int m_can_resume(struct device *dev)
1713{
1714 struct net_device *ndev = dev_get_drvdata(dev);
1715 struct m_can_priv *priv = netdev_priv(ndev);
1716
1717 pinctrl_pm_select_default_state(dev);
1718
1719 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1720
1721 if (netif_running(ndev)) {
1722 int ret;
1723
1724 ret = m_can_clk_start(priv);
1725 if (ret)
1726 return ret;
1727
1728 m_can_init_ram(priv);
1729 m_can_start(ndev);
1730 netif_device_attach(ndev);
1731 netif_start_queue(ndev);
1732 }
1733
1734 return 0;
1735}
1736
1737static void unregister_m_can_dev(struct net_device *dev)
1738{
1739 unregister_candev(dev);
1740}
1741
1742static int m_can_plat_remove(struct platform_device *pdev)
1743{
1744 struct net_device *dev = platform_get_drvdata(pdev);
1745
1746 unregister_m_can_dev(dev);
1747
1748 pm_runtime_disable(&pdev->dev);
1749
1750 platform_set_drvdata(pdev, NULL);
1751
1752 free_candev(dev);
1753
1754 return 0;
1755}
1756
1757static int __maybe_unused m_can_runtime_suspend(struct device *dev)
1758{
1759 struct net_device *ndev = dev_get_drvdata(dev);
1760 struct m_can_priv *priv = netdev_priv(ndev);
1761
1762 clk_disable_unprepare(priv->cclk);
1763 clk_disable_unprepare(priv->hclk);
1764
1765 return 0;
1766}
1767
1768static int __maybe_unused m_can_runtime_resume(struct device *dev)
1769{
1770 struct net_device *ndev = dev_get_drvdata(dev);
1771 struct m_can_priv *priv = netdev_priv(ndev);
1772 int err;
1773
1774 err = clk_prepare_enable(priv->hclk);
1775 if (err)
1776 return err;
1777
1778 err = clk_prepare_enable(priv->cclk);
1779 if (err)
1780 clk_disable_unprepare(priv->hclk);
1781
1782 return err;
1783}
1784
1785static const struct dev_pm_ops m_can_pmops = {
1786 SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
1787 m_can_runtime_resume, NULL)
1788 SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
1789};
1790
1791static const struct of_device_id m_can_of_table[] = {
1792 { .compatible = "bosch,m_can", .data = NULL },
1793 { },
1794};
1795MODULE_DEVICE_TABLE(of, m_can_of_table);
1796
1797static struct platform_driver m_can_plat_driver = {
1798 .driver = {
1799 .name = KBUILD_MODNAME,
1800 .of_match_table = m_can_of_table,
1801 .pm = &m_can_pmops,
1802 },
1803 .probe = m_can_plat_probe,
1804 .remove = m_can_plat_remove,
1805};
1806
1807module_platform_driver(m_can_plat_driver);
1808
1809MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
1810MODULE_LICENSE("GPL v2");
1811MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
1812