1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitfield.h>
16#include <linux/clk.h>
17#include <linux/device.h>
18#include <linux/mod_devicetable.h>
19#include <linux/module.h>
20#include <linux/pm_runtime.h>
21#include <linux/property.h>
22
23#include <asm/unaligned.h>
24
25#include "mcp251xfd.h"
26
27#define DEVICE_NAME "mcp251xfd"
28
29static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
30 .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
31 MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
32 MCP251XFD_QUIRK_ECC,
33 .model = MCP251XFD_MODEL_MCP2517FD,
34};
35
36static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
37 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
38 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
39 .model = MCP251XFD_MODEL_MCP2518FD,
40};
41
42
43static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
44 .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
45 MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
46 .model = MCP251XFD_MODEL_MCP251XFD,
47};
48
49static const struct can_bittiming_const mcp251xfd_bittiming_const = {
50 .name = DEVICE_NAME,
51 .tseg1_min = 2,
52 .tseg1_max = 256,
53 .tseg2_min = 1,
54 .tseg2_max = 128,
55 .sjw_max = 128,
56 .brp_min = 1,
57 .brp_max = 256,
58 .brp_inc = 1,
59};
60
61static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
62 .name = DEVICE_NAME,
63 .tseg1_min = 1,
64 .tseg1_max = 32,
65 .tseg2_min = 1,
66 .tseg2_max = 16,
67 .sjw_max = 16,
68 .brp_min = 1,
69 .brp_max = 256,
70 .brp_inc = 1,
71};
72
73static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
74{
75 switch (model) {
76 case MCP251XFD_MODEL_MCP2517FD:
77 return "MCP2517FD";
78 case MCP251XFD_MODEL_MCP2518FD:
79 return "MCP2518FD";
80 case MCP251XFD_MODEL_MCP251XFD:
81 return "MCP251xFD";
82 }
83
84 return "<unknown>";
85}
86
87static inline const char *
88mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
89{
90 return __mcp251xfd_get_model_str(priv->devtype_data.model);
91}
92
93static const char *mcp251xfd_get_mode_str(const u8 mode)
94{
95 switch (mode) {
96 case MCP251XFD_REG_CON_MODE_MIXED:
97 return "Mixed (CAN FD/CAN 2.0)";
98 case MCP251XFD_REG_CON_MODE_SLEEP:
99 return "Sleep";
100 case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
101 return "Internal Loopback";
102 case MCP251XFD_REG_CON_MODE_LISTENONLY:
103 return "Listen Only";
104 case MCP251XFD_REG_CON_MODE_CONFIG:
105 return "Configuration";
106 case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
107 return "External Loopback";
108 case MCP251XFD_REG_CON_MODE_CAN2_0:
109 return "CAN 2.0";
110 case MCP251XFD_REG_CON_MODE_RESTRICTED:
111 return "Restricted Operation";
112 }
113
114 return "<unknown>";
115}
116
117static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
118{
119 if (!priv->reg_vdd)
120 return 0;
121
122 return regulator_enable(priv->reg_vdd);
123}
124
125static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
126{
127 if (!priv->reg_vdd)
128 return 0;
129
130 return regulator_disable(priv->reg_vdd);
131}
132
133static inline int
134mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
135{
136 if (!priv->reg_xceiver)
137 return 0;
138
139 return regulator_enable(priv->reg_xceiver);
140}
141
142static inline int
143mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
144{
145 if (!priv->reg_xceiver)
146 return 0;
147
148 return regulator_disable(priv->reg_xceiver);
149}
150
151static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
152{
153 int err;
154
155 err = clk_prepare_enable(priv->clk);
156 if (err)
157 return err;
158
159 err = mcp251xfd_vdd_enable(priv);
160 if (err)
161 clk_disable_unprepare(priv->clk);
162
163
164 usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
165 2 * MCP251XFD_OSC_STAB_SLEEP_US);
166
167 return err;
168}
169
170static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
171{
172 int err;
173
174 err = mcp251xfd_vdd_disable(priv);
175 if (err)
176 return err;
177
178 clk_disable_unprepare(priv->clk);
179
180 return 0;
181}
182
183static inline u8
184mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
185 union mcp251xfd_write_reg_buf *write_reg_buf,
186 const u16 reg, const u32 mask, const u32 val)
187{
188 u8 first_byte, last_byte, len;
189 u8 *data;
190 __le32 val_le32;
191
192 first_byte = mcp251xfd_first_byte_set(mask);
193 last_byte = mcp251xfd_last_byte_set(mask);
194 len = last_byte - first_byte + 1;
195
196 data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
197 val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
198 memcpy(data, &val_le32, len);
199
200 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
201 u16 crc;
202
203 mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
204 len);
205
206 len += sizeof(write_reg_buf->crc.cmd);
207 crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
208 put_unaligned_be16(crc, (void *)write_reg_buf + len);
209
210
211 len += sizeof(write_reg_buf->crc.crc);
212 } else {
213 len += sizeof(write_reg_buf->nocrc.cmd);
214 }
215
216 return len;
217}
218
219static inline int
220mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
221 u8 *tef_tail)
222{
223 u32 tef_ua;
224 int err;
225
226 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
227 if (err)
228 return err;
229
230 *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
231
232 return 0;
233}
234
235static inline int
236mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
237 u8 *tx_tail)
238{
239 u32 fifo_sta;
240 int err;
241
242 err = regmap_read(priv->map_reg,
243 MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
244 &fifo_sta);
245 if (err)
246 return err;
247
248 *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
249
250 return 0;
251}
252
253static inline int
254mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
255 const struct mcp251xfd_rx_ring *ring,
256 u8 *rx_head)
257{
258 u32 fifo_sta;
259 int err;
260
261 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
262 &fifo_sta);
263 if (err)
264 return err;
265
266 *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
267
268 return 0;
269}
270
271static inline int
272mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
273 const struct mcp251xfd_rx_ring *ring,
274 u8 *rx_tail)
275{
276 u32 fifo_ua;
277 int err;
278
279 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
280 &fifo_ua);
281 if (err)
282 return err;
283
284 fifo_ua -= ring->base - MCP251XFD_RAM_START;
285 *rx_tail = fifo_ua / ring->obj_size;
286
287 return 0;
288}
289
290static void
291mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
292 const struct mcp251xfd_tx_ring *ring,
293 struct mcp251xfd_tx_obj *tx_obj,
294 const u8 rts_buf_len,
295 const u8 n)
296{
297 struct spi_transfer *xfer;
298 u16 addr;
299
300
301 addr = mcp251xfd_get_tx_obj_addr(ring, n);
302 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
303 mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
304 addr);
305 else
306 mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
307 addr);
308
309 xfer = &tx_obj->xfer[0];
310 xfer->tx_buf = &tx_obj->buf;
311 xfer->len = 0;
312 xfer->cs_change = 1;
313 xfer->cs_change_delay.value = 0;
314 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
315
316
317 xfer = &tx_obj->xfer[1];
318 xfer->tx_buf = &ring->rts_buf;
319 xfer->len = rts_buf_len;
320
321
322 spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
323 ARRAY_SIZE(tx_obj->xfer));
324}
325
326static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
327{
328 struct mcp251xfd_tef_ring *tef_ring;
329 struct mcp251xfd_tx_ring *tx_ring;
330 struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
331 struct mcp251xfd_tx_obj *tx_obj;
332 struct spi_transfer *xfer;
333 u32 val;
334 u16 addr;
335 u8 len;
336 int i, j;
337
338 netdev_reset_queue(priv->ndev);
339
340
341 tef_ring = priv->tef;
342 tef_ring->head = 0;
343 tef_ring->tail = 0;
344
345
346 addr = MCP251XFD_REG_TEFCON;
347 val = MCP251XFD_REG_TEFCON_UINC;
348 len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
349 addr, val, val);
350
351 for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
352 xfer = &tef_ring->uinc_xfer[j];
353 xfer->tx_buf = &tef_ring->uinc_buf;
354 xfer->len = len;
355 xfer->cs_change = 1;
356 xfer->cs_change_delay.value = 0;
357 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
358 }
359
360
361
362
363
364
365
366
367 xfer->cs_change = 0;
368
369
370 tx_ring = priv->tx;
371 tx_ring->head = 0;
372 tx_ring->tail = 0;
373 tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
374
375
376 addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
377 val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
378 len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
379 addr, val, val);
380
381 mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
382 mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
383
384
385 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
386 rx_ring->head = 0;
387 rx_ring->tail = 0;
388 rx_ring->nr = i;
389 rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
390
391 if (!prev_rx_ring)
392 rx_ring->base =
393 mcp251xfd_get_tx_obj_addr(tx_ring,
394 tx_ring->obj_num);
395 else
396 rx_ring->base = prev_rx_ring->base +
397 prev_rx_ring->obj_size *
398 prev_rx_ring->obj_num;
399
400 prev_rx_ring = rx_ring;
401
402
403 addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
404 val = MCP251XFD_REG_FIFOCON_UINC;
405 len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
406 addr, val, val);
407
408 for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
409 xfer = &rx_ring->uinc_xfer[j];
410 xfer->tx_buf = &rx_ring->uinc_buf;
411 xfer->len = len;
412 xfer->cs_change = 1;
413 xfer->cs_change_delay.value = 0;
414 xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
415 }
416
417
418
419
420
421
422
423
424 xfer->cs_change = 0;
425 }
426}
427
428static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
429{
430 int i;
431
432 for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
433 kfree(priv->rx[i]);
434 priv->rx[i] = NULL;
435 }
436}
437
438static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
439{
440 struct mcp251xfd_tx_ring *tx_ring;
441 struct mcp251xfd_rx_ring *rx_ring;
442 int tef_obj_size, tx_obj_size, rx_obj_size;
443 int tx_obj_num;
444 int ram_free, i;
445
446 tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
447
448 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
449 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
450 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
451 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
452 } else {
453 tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
454 tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
455 rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
456 }
457
458 tx_ring = priv->tx;
459 tx_ring->obj_num = tx_obj_num;
460 tx_ring->obj_size = tx_obj_size;
461
462 ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
463 (tef_obj_size + tx_obj_size);
464
465 for (i = 0;
466 i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
467 i++) {
468 int rx_obj_num;
469
470 rx_obj_num = ram_free / rx_obj_size;
471 rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
472 MCP251XFD_RX_OBJ_NUM_MAX);
473
474 rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
475 GFP_KERNEL);
476 if (!rx_ring) {
477 mcp251xfd_ring_free(priv);
478 return -ENOMEM;
479 }
480 rx_ring->obj_num = rx_obj_num;
481 rx_ring->obj_size = rx_obj_size;
482 priv->rx[i] = rx_ring;
483
484 ram_free -= rx_ring->obj_num * rx_ring->obj_size;
485 }
486 priv->rx_ring_num = i;
487
488 netdev_dbg(priv->ndev,
489 "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
490 tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
491 tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
492
493 mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
494 netdev_dbg(priv->ndev,
495 "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
496 i, rx_ring->obj_num, rx_ring->obj_size,
497 rx_ring->obj_size * rx_ring->obj_num);
498 }
499
500 netdev_dbg(priv->ndev,
501 "FIFO setup: free: %d bytes\n",
502 ram_free);
503
504 return 0;
505}
506
507static inline int
508mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
509{
510 u32 val;
511 int err;
512
513 err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
514 if (err)
515 return err;
516
517 *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
518
519 return 0;
520}
521
522static int
523__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
524 const u8 mode_req, bool nowait)
525{
526 u32 con, con_reqop;
527 int err;
528
529 con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
530 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
531 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
532 if (err)
533 return err;
534
535 if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
536 return 0;
537
538 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
539 FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
540 con) == mode_req,
541 MCP251XFD_POLL_SLEEP_US,
542 MCP251XFD_POLL_TIMEOUT_US);
543 if (err) {
544 u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
545
546 netdev_err(priv->ndev,
547 "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
548 mcp251xfd_get_mode_str(mode_req), mode_req,
549 mcp251xfd_get_mode_str(mode), mode);
550 return err;
551 }
552
553 return 0;
554}
555
556static inline int
557mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
558 const u8 mode_req)
559{
560 return __mcp251xfd_chip_set_mode(priv, mode_req, false);
561}
562
563static inline int __maybe_unused
564mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
565 const u8 mode_req)
566{
567 return __mcp251xfd_chip_set_mode(priv, mode_req, true);
568}
569
570static inline bool mcp251xfd_osc_invalid(u32 reg)
571{
572 return reg == 0x0 || reg == 0xffffffff;
573}
574
575static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
576{
577 u32 osc, osc_reference, osc_mask;
578 int err;
579
580
581
582
583 osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
584 MCP251XFD_REG_OSC_CLKODIV_10);
585 osc_reference = MCP251XFD_REG_OSC_OSCRDY;
586 osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
587
588
589
590
591
592
593
594 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
595 if (err)
596 return err;
597
598
599 err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
600 (osc & osc_mask) == osc_reference,
601 MCP251XFD_OSC_STAB_SLEEP_US,
602 MCP251XFD_OSC_STAB_TIMEOUT_US);
603 if (mcp251xfd_osc_invalid(osc)) {
604 netdev_err(priv->ndev,
605 "Failed to detect %s (osc=0x%08x).\n",
606 mcp251xfd_get_model_str(priv), osc);
607 return -ENODEV;
608 } else if (err == -ETIMEDOUT) {
609 netdev_err(priv->ndev,
610 "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
611 osc, osc_reference);
612 return -ETIMEDOUT;
613 }
614
615 return err;
616}
617
618static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
619{
620 const __be16 cmd = mcp251xfd_cmd_reset();
621 int err;
622
623
624
625
626 err = mcp251xfd_chip_clock_enable(priv);
627 if (err)
628 return err;
629
630 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
631 if (err)
632 return err;
633
634
635 return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
636}
637
638static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
639{
640 u32 osc, osc_reference;
641 u8 mode;
642 int err;
643
644 err = mcp251xfd_chip_get_mode(priv, &mode);
645 if (err)
646 return err;
647
648 if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
649 netdev_info(priv->ndev,
650 "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
651 mcp251xfd_get_mode_str(mode), mode);
652 return -ETIMEDOUT;
653 }
654
655 osc_reference = MCP251XFD_REG_OSC_OSCRDY |
656 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
657 MCP251XFD_REG_OSC_CLKODIV_10);
658
659
660 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
661 if (err)
662 return err;
663
664 if (osc != osc_reference) {
665 netdev_info(priv->ndev,
666 "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
667 osc, osc_reference);
668 return -ETIMEDOUT;
669 }
670
671 return 0;
672}
673
674static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
675{
676 int err, i;
677
678 for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
679 if (i)
680 netdev_info(priv->ndev,
681 "Retrying to reset controller.\n");
682
683 err = mcp251xfd_chip_softreset_do(priv);
684 if (err == -ETIMEDOUT)
685 continue;
686 if (err)
687 return err;
688
689 err = mcp251xfd_chip_softreset_check(priv);
690 if (err == -ETIMEDOUT)
691 continue;
692 if (err)
693 return err;
694
695 return 0;
696 }
697
698 return err;
699}
700
701static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
702{
703 u32 osc;
704 int err;
705
706
707
708
709
710 osc = MCP251XFD_REG_OSC_LPMEN |
711 FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
712 MCP251XFD_REG_OSC_CLKODIV_10);
713 err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
714 if (err)
715 return err;
716
717
718
719
720
721
722 return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
723 MCP251XFD_REG_TSCON_TBCEN);
724}
725
726static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
727{
728 const struct can_bittiming *bt = &priv->can.bittiming;
729 const struct can_bittiming *dbt = &priv->can.data_bittiming;
730 u32 val = 0;
731 s8 tdco;
732 int err;
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750 val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
751 MCP251XFD_REG_CON_MODE_CONFIG) |
752 MCP251XFD_REG_CON_STEF |
753 MCP251XFD_REG_CON_ESIGM |
754 MCP251XFD_REG_CON_RTXAT |
755 FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
756 MCP251XFD_REG_CON_WFT_T11FILTER) |
757 MCP251XFD_REG_CON_WAKFIL |
758 MCP251XFD_REG_CON_PXEDIS;
759
760 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
761 val |= MCP251XFD_REG_CON_ISOCRCEN;
762
763 err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
764 if (err)
765 return err;
766
767
768 val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
769 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
770 bt->prop_seg + bt->phase_seg1 - 1) |
771 FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
772 bt->phase_seg2 - 1) |
773 FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
774
775 err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
776 if (err)
777 return err;
778
779 if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
780 return 0;
781
782
783 val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
784 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
785 dbt->prop_seg + dbt->phase_seg1 - 1) |
786 FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
787 dbt->phase_seg2 - 1) |
788 FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
789
790 err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
791 if (err)
792 return err;
793
794
795 tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
796 -64, 63);
797 val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
798 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
799 FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
800
801 return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
802}
803
804static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
805{
806 u32 val;
807
808 if (!priv->rx_int)
809 return 0;
810
811
812
813
814
815
816
817
818
819
820 val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
821 MCP251XFD_REG_IOCON_TRIS0;
822 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
823}
824
825static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
826{
827 u32 val;
828
829 if (!priv->rx_int)
830 return 0;
831
832
833
834
835
836 val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
837 MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
838 return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
839}
840
841static int
842mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
843 const struct mcp251xfd_rx_ring *ring)
844{
845 u32 fifo_con;
846
847
848
849
850
851
852
853 fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
854 ring->obj_num - 1) |
855 MCP251XFD_REG_FIFOCON_RXTSEN |
856 MCP251XFD_REG_FIFOCON_RXOVIE |
857 MCP251XFD_REG_FIFOCON_TFNRFNIE;
858
859 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
860 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
861 MCP251XFD_REG_FIFOCON_PLSIZE_64);
862 else
863 fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
864 MCP251XFD_REG_FIFOCON_PLSIZE_8);
865
866 return regmap_write(priv->map_reg,
867 MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
868}
869
870static int
871mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
872 const struct mcp251xfd_rx_ring *ring)
873{
874 u32 fltcon;
875
876 fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
877 MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
878
879 return regmap_update_bits(priv->map_reg,
880 MCP251XFD_REG_FLTCON(ring->nr >> 2),
881 MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
882 fltcon);
883}
884
885static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
886{
887 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
888 const struct mcp251xfd_rx_ring *rx_ring;
889 u32 val;
890 int err, n;
891
892
893 val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
894 tx_ring->obj_num - 1) |
895 MCP251XFD_REG_TEFCON_TEFTSEN |
896 MCP251XFD_REG_TEFCON_TEFOVIE |
897 MCP251XFD_REG_TEFCON_TEFNEIE;
898
899 err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
900 if (err)
901 return err;
902
903
904 val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
905 tx_ring->obj_num - 1) |
906 MCP251XFD_REG_FIFOCON_TXEN |
907 MCP251XFD_REG_FIFOCON_TXATIE;
908
909 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
910 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
911 MCP251XFD_REG_FIFOCON_PLSIZE_64);
912 else
913 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
914 MCP251XFD_REG_FIFOCON_PLSIZE_8);
915
916 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
917 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
918 MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
919 else
920 val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
921 MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
922
923 err = regmap_write(priv->map_reg,
924 MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
925 val);
926 if (err)
927 return err;
928
929
930 mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
931 err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
932 if (err)
933 return err;
934
935 err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
936 if (err)
937 return err;
938 }
939
940 return 0;
941}
942
943static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
944{
945 struct mcp251xfd_ecc *ecc = &priv->ecc;
946 void *ram;
947 u32 val = 0;
948 int err;
949
950 ecc->ecc_stat = 0;
951
952 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
953 val = MCP251XFD_REG_ECCCON_ECCEN;
954
955 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
956 MCP251XFD_REG_ECCCON_ECCEN, val);
957 if (err)
958 return err;
959
960 ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
961 if (!ram)
962 return -ENOMEM;
963
964 err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
965 MCP251XFD_RAM_SIZE);
966 kfree(ram);
967
968 return err;
969}
970
971static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
972{
973 struct mcp251xfd_ecc *ecc = &priv->ecc;
974
975 ecc->ecc_stat = 0;
976}
977
978static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
979{
980 u8 mode;
981
982
983 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
984 mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
985 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
986 mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
987 else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
988 mode = MCP251XFD_REG_CON_MODE_MIXED;
989 else
990 mode = MCP251XFD_REG_CON_MODE_CAN2_0;
991
992 return mode;
993}
994
995static int
996__mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
997 bool nowait)
998{
999 u8 mode;
1000
1001 mode = mcp251xfd_get_normal_mode(priv);
1002
1003 return __mcp251xfd_chip_set_mode(priv, mode, nowait);
1004}
1005
1006static inline int
1007mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
1008{
1009 return __mcp251xfd_chip_set_normal_mode(priv, false);
1010}
1011
1012static inline int
1013mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
1014{
1015 return __mcp251xfd_chip_set_normal_mode(priv, true);
1016}
1017
1018static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
1019{
1020 u32 val;
1021 int err;
1022
1023 val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
1024 err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
1025 if (err)
1026 return err;
1027
1028 val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1029 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
1030 if (err)
1031 return err;
1032
1033 val = MCP251XFD_REG_INT_CERRIE |
1034 MCP251XFD_REG_INT_SERRIE |
1035 MCP251XFD_REG_INT_RXOVIE |
1036 MCP251XFD_REG_INT_TXATIE |
1037 MCP251XFD_REG_INT_SPICRCIE |
1038 MCP251XFD_REG_INT_ECCIE |
1039 MCP251XFD_REG_INT_TEFIE |
1040 MCP251XFD_REG_INT_MODIE |
1041 MCP251XFD_REG_INT_RXIE;
1042
1043 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1044 val |= MCP251XFD_REG_INT_IVMIE;
1045
1046 return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
1047}
1048
1049static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
1050{
1051 int err;
1052 u32 mask;
1053
1054 err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1055 if (err)
1056 return err;
1057
1058 mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1059 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1060 mask, 0x0);
1061 if (err)
1062 return err;
1063
1064 return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1065}
1066
1067static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1068 const enum can_state state)
1069{
1070 priv->can.state = state;
1071
1072 mcp251xfd_chip_interrupts_disable(priv);
1073 mcp251xfd_chip_rx_int_disable(priv);
1074 return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1075}
1076
1077static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1078{
1079 int err;
1080
1081 err = mcp251xfd_chip_softreset(priv);
1082 if (err)
1083 goto out_chip_stop;
1084
1085 err = mcp251xfd_chip_clock_init(priv);
1086 if (err)
1087 goto out_chip_stop;
1088
1089 err = mcp251xfd_set_bittiming(priv);
1090 if (err)
1091 goto out_chip_stop;
1092
1093 err = mcp251xfd_chip_rx_int_enable(priv);
1094 if (err)
1095 return err;
1096
1097 err = mcp251xfd_chip_ecc_init(priv);
1098 if (err)
1099 goto out_chip_stop;
1100
1101 mcp251xfd_ring_init(priv);
1102
1103 err = mcp251xfd_chip_fifo_init(priv);
1104 if (err)
1105 goto out_chip_stop;
1106
1107 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1108
1109 err = mcp251xfd_chip_set_normal_mode(priv);
1110 if (err)
1111 goto out_chip_stop;
1112
1113 return 0;
1114
1115 out_chip_stop:
1116 mcp251xfd_dump(priv);
1117 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1118
1119 return err;
1120}
1121
1122static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1123{
1124 struct mcp251xfd_priv *priv = netdev_priv(ndev);
1125 int err;
1126
1127 switch (mode) {
1128 case CAN_MODE_START:
1129 err = mcp251xfd_chip_start(priv);
1130 if (err)
1131 return err;
1132
1133 err = mcp251xfd_chip_interrupts_enable(priv);
1134 if (err) {
1135 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1136 return err;
1137 }
1138
1139 netif_wake_queue(ndev);
1140 break;
1141
1142 default:
1143 return -EOPNOTSUPP;
1144 }
1145
1146 return 0;
1147}
1148
1149static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1150 struct can_berr_counter *bec)
1151{
1152 const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1153 u32 trec;
1154 int err;
1155
1156 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1157 if (err)
1158 return err;
1159
1160 if (trec & MCP251XFD_REG_TREC_TXBO)
1161 bec->txerr = 256;
1162 else
1163 bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1164 bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1165
1166 return 0;
1167}
1168
1169static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1170 struct can_berr_counter *bec)
1171{
1172 const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1173
1174
1175 if (!(ndev->flags & IFF_UP))
1176 return 0;
1177
1178
1179
1180
1181 if (priv->can.state == CAN_STATE_BUS_OFF) {
1182 *bec = priv->bec;
1183 return 0;
1184 }
1185
1186 return __mcp251xfd_get_berr_counter(ndev, bec);
1187}
1188
1189static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1190{
1191 u8 tef_tail_chip, tef_tail;
1192 int err;
1193
1194 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1195 return 0;
1196
1197 err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1198 if (err)
1199 return err;
1200
1201 tef_tail = mcp251xfd_get_tef_tail(priv);
1202 if (tef_tail_chip != tef_tail) {
1203 netdev_err(priv->ndev,
1204 "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1205 tef_tail_chip, tef_tail);
1206 return -EILSEQ;
1207 }
1208
1209 return 0;
1210}
1211
1212static int
1213mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1214 const struct mcp251xfd_rx_ring *ring)
1215{
1216 u8 rx_tail_chip, rx_tail;
1217 int err;
1218
1219 if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1220 return 0;
1221
1222 err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1223 if (err)
1224 return err;
1225
1226 rx_tail = mcp251xfd_get_rx_tail(ring);
1227 if (rx_tail_chip != rx_tail) {
1228 netdev_err(priv->ndev,
1229 "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1230 rx_tail_chip, rx_tail);
1231 return -EILSEQ;
1232 }
1233
1234 return 0;
1235}
1236
1237static int
1238mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1239{
1240 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1241 u32 tef_sta;
1242 int err;
1243
1244 err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1245 if (err)
1246 return err;
1247
1248 if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1249 netdev_err(priv->ndev,
1250 "Transmit Event FIFO buffer overflow.\n");
1251 return -ENOBUFS;
1252 }
1253
1254 netdev_info(priv->ndev,
1255 "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
1256 tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1257 "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1258 "not empty" : "empty",
1259 seq, priv->tef->tail, priv->tef->head, tx_ring->head);
1260
1261
1262 return -EAGAIN;
1263}
1264
1265static int
1266mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1267 const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1268 unsigned int *frame_len_ptr)
1269{
1270 struct net_device_stats *stats = &priv->ndev->stats;
1271 struct sk_buff *skb;
1272 u32 seq, seq_masked, tef_tail_masked, tef_tail;
1273
1274 seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1275 hw_tef_obj->flags);
1276
1277
1278
1279
1280
1281 seq_masked = seq &
1282 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1283 tef_tail_masked = priv->tef->tail &
1284 field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1285 if (seq_masked != tef_tail_masked)
1286 return mcp251xfd_handle_tefif_recover(priv, seq);
1287
1288 tef_tail = mcp251xfd_get_tef_tail(priv);
1289 skb = priv->can.echo_skb[tef_tail];
1290 if (skb)
1291 mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
1292 stats->tx_bytes +=
1293 can_rx_offload_get_echo_skb(&priv->offload,
1294 tef_tail, hw_tef_obj->ts,
1295 frame_len_ptr);
1296 stats->tx_packets++;
1297 priv->tef->tail++;
1298
1299 return 0;
1300}
1301
1302static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1303{
1304 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1305 unsigned int new_head;
1306 u8 chip_tx_tail;
1307 int err;
1308
1309 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1310 if (err)
1311 return err;
1312
1313
1314
1315
1316 new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
1317 if (new_head <= priv->tef->head)
1318 new_head += tx_ring->obj_num;
1319
1320
1321 priv->tef->head = min(new_head, tx_ring->head);
1322
1323 return mcp251xfd_check_tef_tail(priv);
1324}
1325
1326static inline int
1327mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1328 struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1329 const u8 offset, const u8 len)
1330{
1331 const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1332 const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1333
1334 if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1335 (offset > tx_ring->obj_num ||
1336 len > tx_ring->obj_num ||
1337 offset + len > tx_ring->obj_num)) {
1338 netdev_err(priv->ndev,
1339 "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1340 tx_ring->obj_num, offset, len);
1341 return -ERANGE;
1342 }
1343
1344 return regmap_bulk_read(priv->map_rx,
1345 mcp251xfd_get_tef_obj_addr(offset),
1346 hw_tef_obj,
1347 sizeof(*hw_tef_obj) / val_bytes * len);
1348}
1349
1350static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1351{
1352 struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1353 unsigned int total_frame_len = 0;
1354 u8 tef_tail, len, l;
1355 int err, i;
1356
1357 err = mcp251xfd_tef_ring_update(priv);
1358 if (err)
1359 return err;
1360
1361 tef_tail = mcp251xfd_get_tef_tail(priv);
1362 len = mcp251xfd_get_tef_len(priv);
1363 l = mcp251xfd_get_tef_linear_len(priv);
1364 err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1365 if (err)
1366 return err;
1367
1368 if (l < len) {
1369 err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1370 if (err)
1371 return err;
1372 }
1373
1374 for (i = 0; i < len; i++) {
1375 unsigned int frame_len = 0;
1376
1377 err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
1378
1379
1380
1381
1382
1383 if (err == -EAGAIN)
1384 goto out_netif_wake_queue;
1385 if (err)
1386 return err;
1387
1388 total_frame_len += frame_len;
1389 }
1390
1391 out_netif_wake_queue:
1392 len = i;
1393 if (len) {
1394 struct mcp251xfd_tef_ring *ring = priv->tef;
1395 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1396 int offset;
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1408 err = spi_sync_transfer(priv->spi,
1409 ring->uinc_xfer + offset, len);
1410 if (err)
1411 return err;
1412
1413 tx_ring->tail += len;
1414 netdev_completed_queue(priv->ndev, len, total_frame_len);
1415
1416 err = mcp251xfd_check_tef_tail(priv);
1417 if (err)
1418 return err;
1419 }
1420
1421 mcp251xfd_ecc_tefif_successful(priv);
1422
1423 if (mcp251xfd_get_tx_free(priv->tx)) {
1424
1425
1426
1427 smp_mb();
1428 netif_wake_queue(priv->ndev);
1429 }
1430
1431 return 0;
1432}
1433
1434static int
1435mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1436 struct mcp251xfd_rx_ring *ring)
1437{
1438 u32 new_head;
1439 u8 chip_rx_head;
1440 int err;
1441
1442 err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1443 if (err)
1444 return err;
1445
1446
1447
1448
1449 new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1450 if (new_head <= ring->head)
1451 new_head += ring->obj_num;
1452
1453 ring->head = new_head;
1454
1455 return mcp251xfd_check_rx_tail(priv, ring);
1456}
1457
1458static void
1459mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
1460 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1461 struct sk_buff *skb)
1462{
1463 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1464 u8 dlc;
1465
1466 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1467 u32 sid, eid;
1468
1469 eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1470 sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1471
1472 cfd->can_id = CAN_EFF_FLAG |
1473 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1474 FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1475 } else {
1476 cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1477 hw_rx_obj->id);
1478 }
1479
1480 dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
1481
1482
1483 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1484
1485 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1486 cfd->flags |= CANFD_ESI;
1487
1488 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1489 cfd->flags |= CANFD_BRS;
1490
1491 cfd->len = can_fd_dlc2len(dlc);
1492 } else {
1493 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1494 cfd->can_id |= CAN_RTR_FLAG;
1495
1496 can_frame_set_cc_len((struct can_frame *)cfd, dlc,
1497 priv->can.ctrlmode);
1498 }
1499
1500 if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
1501 memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1502
1503 mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
1504}
1505
1506static int
1507mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1508 struct mcp251xfd_rx_ring *ring,
1509 const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1510{
1511 struct net_device_stats *stats = &priv->ndev->stats;
1512 struct sk_buff *skb;
1513 struct canfd_frame *cfd;
1514 int err;
1515
1516 if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1517 skb = alloc_canfd_skb(priv->ndev, &cfd);
1518 else
1519 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1520
1521 if (!skb) {
1522 stats->rx_dropped++;
1523 return 0;
1524 }
1525
1526 mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1527 err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1528 if (err)
1529 stats->rx_fifo_errors++;
1530
1531 return 0;
1532}
1533
1534static inline int
1535mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1536 const struct mcp251xfd_rx_ring *ring,
1537 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1538 const u8 offset, const u8 len)
1539{
1540 const int val_bytes = regmap_get_val_bytes(priv->map_rx);
1541 int err;
1542
1543 err = regmap_bulk_read(priv->map_rx,
1544 mcp251xfd_get_rx_obj_addr(ring, offset),
1545 hw_rx_obj,
1546 len * ring->obj_size / val_bytes);
1547
1548 return err;
1549}
1550
1551static int
1552mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1553 struct mcp251xfd_rx_ring *ring)
1554{
1555 struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1556 u8 rx_tail, len;
1557 int err, i;
1558
1559 err = mcp251xfd_rx_ring_update(priv, ring);
1560 if (err)
1561 return err;
1562
1563 while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1564 int offset;
1565
1566 rx_tail = mcp251xfd_get_rx_tail(ring);
1567
1568 err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1569 rx_tail, len);
1570 if (err)
1571 return err;
1572
1573 for (i = 0; i < len; i++) {
1574 err = mcp251xfd_handle_rxif_one(priv, ring,
1575 (void *)hw_rx_obj +
1576 i * ring->obj_size);
1577 if (err)
1578 return err;
1579 }
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590 offset = ARRAY_SIZE(ring->uinc_xfer) - len;
1591 err = spi_sync_transfer(priv->spi,
1592 ring->uinc_xfer + offset, len);
1593 if (err)
1594 return err;
1595
1596 ring->tail += len;
1597 }
1598
1599 return 0;
1600}
1601
1602static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1603{
1604 struct mcp251xfd_rx_ring *ring;
1605 int err, n;
1606
1607 mcp251xfd_for_each_rx_ring(priv, ring, n) {
1608 err = mcp251xfd_handle_rxif_ring(priv, ring);
1609 if (err)
1610 return err;
1611 }
1612
1613 return 0;
1614}
1615
1616static struct sk_buff *
1617mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
1618 struct can_frame **cf, u32 *timestamp)
1619{
1620 struct sk_buff *skb;
1621 int err;
1622
1623 err = mcp251xfd_get_timestamp(priv, timestamp);
1624 if (err)
1625 return NULL;
1626
1627 skb = alloc_can_err_skb(priv->ndev, cf);
1628 if (skb)
1629 mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
1630
1631 return skb;
1632}
1633
1634static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1635{
1636 struct net_device_stats *stats = &priv->ndev->stats;
1637 struct mcp251xfd_rx_ring *ring;
1638 struct sk_buff *skb;
1639 struct can_frame *cf;
1640 u32 timestamp, rxovif;
1641 int err, i;
1642
1643 stats->rx_over_errors++;
1644 stats->rx_errors++;
1645
1646 err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1647 if (err)
1648 return err;
1649
1650 mcp251xfd_for_each_rx_ring(priv, ring, i) {
1651 if (!(rxovif & BIT(ring->fifo_nr)))
1652 continue;
1653
1654
1655 if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1656 netdev_info(priv->ndev,
1657 "RX-%d: MAB overflow detected.\n",
1658 ring->nr);
1659 } else {
1660 netdev_info(priv->ndev,
1661 "RX-%d: FIFO overflow.\n", ring->nr);
1662 }
1663
1664 err = regmap_update_bits(priv->map_reg,
1665 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1666 MCP251XFD_REG_FIFOSTA_RXOVIF,
1667 0x0);
1668 if (err)
1669 return err;
1670 }
1671
1672 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
1673 if (!skb)
1674 return 0;
1675
1676 cf->can_id |= CAN_ERR_CRTL;
1677 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1678
1679 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1680 if (err)
1681 stats->rx_fifo_errors++;
1682
1683 return 0;
1684}
1685
1686static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1687{
1688 netdev_info(priv->ndev, "%s\n", __func__);
1689
1690 return 0;
1691}
1692
1693static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1694{
1695 struct net_device_stats *stats = &priv->ndev->stats;
1696 u32 bdiag1, timestamp;
1697 struct sk_buff *skb;
1698 struct can_frame *cf = NULL;
1699 int err;
1700
1701 err = mcp251xfd_get_timestamp(priv, ×tamp);
1702 if (err)
1703 return err;
1704
1705 err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1706 if (err)
1707 return err;
1708
1709
1710
1711
1712 err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1713 if (err)
1714 return err;
1715
1716 priv->can.can_stats.bus_error++;
1717
1718 skb = alloc_can_err_skb(priv->ndev, &cf);
1719 if (cf)
1720 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1721
1722
1723 if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1724 netdev_err(priv->ndev,
1725 "recv'd DLC is larger than PLSIZE of FIFO element.");
1726
1727
1728 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1729 MCP251XFD_REG_BDIAG1_NCRCERR)) {
1730 netdev_dbg(priv->ndev, "CRC error\n");
1731
1732 stats->rx_errors++;
1733 if (cf)
1734 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1735 }
1736 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1737 MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1738 netdev_dbg(priv->ndev, "Stuff error\n");
1739
1740 stats->rx_errors++;
1741 if (cf)
1742 cf->data[2] |= CAN_ERR_PROT_STUFF;
1743 }
1744 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1745 MCP251XFD_REG_BDIAG1_NFORMERR)) {
1746 netdev_dbg(priv->ndev, "Format error\n");
1747
1748 stats->rx_errors++;
1749 if (cf)
1750 cf->data[2] |= CAN_ERR_PROT_FORM;
1751 }
1752
1753
1754 if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1755 netdev_dbg(priv->ndev, "NACK error\n");
1756
1757 stats->tx_errors++;
1758 if (cf) {
1759 cf->can_id |= CAN_ERR_ACK;
1760 cf->data[2] |= CAN_ERR_PROT_TX;
1761 }
1762 }
1763 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1764 MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1765 netdev_dbg(priv->ndev, "Bit1 error\n");
1766
1767 stats->tx_errors++;
1768 if (cf)
1769 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1770 }
1771 if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1772 MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1773 netdev_dbg(priv->ndev, "Bit0 error\n");
1774
1775 stats->tx_errors++;
1776 if (cf)
1777 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1778 }
1779
1780 if (!cf)
1781 return 0;
1782
1783 mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
1784 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1785 if (err)
1786 stats->rx_fifo_errors++;
1787
1788 return 0;
1789}
1790
1791static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1792{
1793 struct net_device_stats *stats = &priv->ndev->stats;
1794 struct sk_buff *skb;
1795 struct can_frame *cf = NULL;
1796 enum can_state new_state, rx_state, tx_state;
1797 u32 trec, timestamp;
1798 int err;
1799
1800 err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1801 if (err)
1802 return err;
1803
1804 if (trec & MCP251XFD_REG_TREC_TXBO)
1805 tx_state = CAN_STATE_BUS_OFF;
1806 else if (trec & MCP251XFD_REG_TREC_TXBP)
1807 tx_state = CAN_STATE_ERROR_PASSIVE;
1808 else if (trec & MCP251XFD_REG_TREC_TXWARN)
1809 tx_state = CAN_STATE_ERROR_WARNING;
1810 else
1811 tx_state = CAN_STATE_ERROR_ACTIVE;
1812
1813 if (trec & MCP251XFD_REG_TREC_RXBP)
1814 rx_state = CAN_STATE_ERROR_PASSIVE;
1815 else if (trec & MCP251XFD_REG_TREC_RXWARN)
1816 rx_state = CAN_STATE_ERROR_WARNING;
1817 else
1818 rx_state = CAN_STATE_ERROR_ACTIVE;
1819
1820 new_state = max(tx_state, rx_state);
1821 if (new_state == priv->can.state)
1822 return 0;
1823
1824
1825
1826
1827 skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp);
1828 can_change_state(priv->ndev, cf, tx_state, rx_state);
1829
1830 if (new_state == CAN_STATE_BUS_OFF) {
1831
1832
1833
1834
1835
1836 err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1837 if (err)
1838 return err;
1839
1840 mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1841 can_bus_off(priv->ndev);
1842 }
1843
1844 if (!skb)
1845 return 0;
1846
1847 if (new_state != CAN_STATE_BUS_OFF) {
1848 struct can_berr_counter bec;
1849
1850 err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1851 if (err)
1852 return err;
1853 cf->data[6] = bec.txerr;
1854 cf->data[7] = bec.rxerr;
1855 }
1856
1857 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1858 if (err)
1859 stats->rx_fifo_errors++;
1860
1861 return 0;
1862}
1863
1864static int
1865mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1866{
1867 const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1868 u8 mode;
1869 int err;
1870
1871 err = mcp251xfd_chip_get_mode(priv, &mode);
1872 if (err)
1873 return err;
1874
1875 if (mode == mode_reference) {
1876 netdev_dbg(priv->ndev,
1877 "Controller changed into %s Mode (%u).\n",
1878 mcp251xfd_get_mode_str(mode), mode);
1879 return 0;
1880 }
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892 if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1893 (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1894 mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1895 netdev_dbg(priv->ndev,
1896 "Controller changed into %s Mode (%u).\n",
1897 mcp251xfd_get_mode_str(mode), mode);
1898 else
1899 netdev_err(priv->ndev,
1900 "Controller changed into %s Mode (%u).\n",
1901 mcp251xfd_get_mode_str(mode), mode);
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1912 *set_normal_mode = true;
1913 return 0;
1914 }
1915
1916 return mcp251xfd_chip_set_normal_mode_nowait(priv);
1917}
1918
1919static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1920{
1921 struct mcp251xfd_ecc *ecc = &priv->ecc;
1922 struct net_device_stats *stats = &priv->ndev->stats;
1923 bool handled = false;
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947 if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1948 priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1949 priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1950 ecc->cnt) {
1951 const char *msg;
1952
1953 if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1954 ecc->cnt)
1955 msg = "TX MAB underflow due to ECC error detected.";
1956 else
1957 msg = "TX MAB underflow detected.";
1958
1959 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1960 netdev_dbg(priv->ndev, "%s\n", msg);
1961 else
1962 netdev_info(priv->ndev, "%s\n", msg);
1963
1964 stats->tx_aborted_errors++;
1965 stats->tx_errors++;
1966 handled = true;
1967 }
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983 if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1984 priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1985 stats->rx_dropped++;
1986 handled = true;
1987 }
1988
1989 if (!handled)
1990 netdev_err(priv->ndev,
1991 "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1992 priv->regs_status.intf);
1993
1994 return 0;
1995}
1996
1997static int
1998mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1999{
2000 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2001 struct mcp251xfd_ecc *ecc = &priv->ecc;
2002 struct mcp251xfd_tx_obj *tx_obj;
2003 u8 chip_tx_tail, tx_tail, offset;
2004 u16 addr;
2005 int err;
2006
2007 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
2008
2009 err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
2010 if (err)
2011 return err;
2012
2013 tx_tail = mcp251xfd_get_tx_tail(tx_ring);
2014 offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
2015
2016
2017
2018
2019
2020
2021 if (chip_tx_tail != tx_tail ||
2022 !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
2023 netdev_err(priv->ndev,
2024 "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
2025 addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
2026 offset);
2027 return -EINVAL;
2028 }
2029
2030 netdev_info(priv->ndev,
2031 "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
2032 ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
2033 "Single" : "Double",
2034 addr, nr, tx_ring->tail, tx_tail, offset);
2035
2036
2037 tx_obj = &tx_ring->obj[nr];
2038 err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
2039 if (err)
2040 return err;
2041
2042
2043 return mcp251xfd_chip_set_normal_mode(priv);
2044}
2045
2046static int
2047mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
2048{
2049 struct mcp251xfd_ecc *ecc = &priv->ecc;
2050 const char *msg;
2051 bool in_tx_ram;
2052 u32 ecc_stat;
2053 u16 addr;
2054 u8 nr;
2055 int err;
2056
2057 err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
2058 if (err)
2059 return err;
2060
2061 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
2062 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
2063 if (err)
2064 return err;
2065
2066
2067 addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
2068 err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
2069 if (!err)
2070 in_tx_ram = true;
2071 else if (err == -ENOENT)
2072 in_tx_ram = false;
2073 else
2074 return err;
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
2089 msg = "Single ECC Error detected at address";
2090 else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
2091 msg = "Double ECC Error detected at address";
2092 else
2093 return -EINVAL;
2094
2095 if (!in_tx_ram) {
2096 ecc->ecc_stat = 0;
2097
2098 netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
2099 } else {
2100
2101 if (ecc->ecc_stat == ecc_stat) {
2102 ecc->cnt++;
2103 } else {
2104 ecc->ecc_stat = ecc_stat;
2105 ecc->cnt = 1;
2106 }
2107
2108 netdev_info(priv->ndev,
2109 "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2110 msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2111
2112 if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2113 return mcp251xfd_handle_eccif_recover(priv, nr);
2114 }
2115
2116 if (set_normal_mode)
2117 return mcp251xfd_chip_set_normal_mode_nowait(priv);
2118
2119 return 0;
2120}
2121
2122static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2123{
2124 int err;
2125 u32 crc;
2126
2127 err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2128 if (err)
2129 return err;
2130
2131 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2132 MCP251XFD_REG_CRC_IF_MASK,
2133 ~crc);
2134 if (err)
2135 return err;
2136
2137 if (crc & MCP251XFD_REG_CRC_FERRIF)
2138 netdev_notice(priv->ndev, "CRC write command format error.\n");
2139 else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2140 netdev_notice(priv->ndev,
2141 "CRC write error detected. CRC=0x%04lx.\n",
2142 FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2143
2144 return 0;
2145}
2146
2147#define mcp251xfd_handle(priv, irq, ...) \
2148({ \
2149 struct mcp251xfd_priv *_priv = (priv); \
2150 int err; \
2151\
2152 err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2153 if (err) \
2154 netdev_err(_priv->ndev, \
2155 "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2156 __stringify(irq), err); \
2157 err; \
2158})
2159
2160static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2161{
2162 struct mcp251xfd_priv *priv = dev_id;
2163 const int val_bytes = regmap_get_val_bytes(priv->map_reg);
2164 irqreturn_t handled = IRQ_NONE;
2165 int err;
2166
2167 if (priv->rx_int)
2168 do {
2169 int rx_pending;
2170
2171 rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2172 if (!rx_pending)
2173 break;
2174
2175 err = mcp251xfd_handle(priv, rxif);
2176 if (err)
2177 goto out_fail;
2178
2179 handled = IRQ_HANDLED;
2180 } while (1);
2181
2182 do {
2183 u32 intf_pending, intf_pending_clearable;
2184 bool set_normal_mode = false;
2185
2186 err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2187 &priv->regs_status,
2188 sizeof(priv->regs_status) /
2189 val_bytes);
2190 if (err)
2191 goto out_fail;
2192
2193 intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2194 priv->regs_status.intf) &
2195 FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2196 priv->regs_status.intf);
2197
2198 if (!(intf_pending)) {
2199 can_rx_offload_threaded_irq_finish(&priv->offload);
2200 return handled;
2201 }
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211 intf_pending_clearable = intf_pending &
2212 MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2213 if (intf_pending_clearable) {
2214 err = regmap_update_bits(priv->map_reg,
2215 MCP251XFD_REG_INT,
2216 MCP251XFD_REG_INT_IF_MASK,
2217 ~intf_pending_clearable);
2218 if (err)
2219 goto out_fail;
2220 }
2221
2222 if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2223 err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2224 if (err)
2225 goto out_fail;
2226 }
2227
2228 if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2229 err = mcp251xfd_handle(priv, rxif);
2230 if (err)
2231 goto out_fail;
2232 }
2233
2234 if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2235 err = mcp251xfd_handle(priv, tefif);
2236 if (err)
2237 goto out_fail;
2238 }
2239
2240 if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2241 err = mcp251xfd_handle(priv, rxovif);
2242 if (err)
2243 goto out_fail;
2244 }
2245
2246 if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2247 err = mcp251xfd_handle(priv, txatif);
2248 if (err)
2249 goto out_fail;
2250 }
2251
2252 if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2253 err = mcp251xfd_handle(priv, ivmif);
2254 if (err)
2255 goto out_fail;
2256 }
2257
2258 if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2259 err = mcp251xfd_handle(priv, serrif);
2260 if (err)
2261 goto out_fail;
2262 }
2263
2264 if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2265 err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2266 if (err)
2267 goto out_fail;
2268 }
2269
2270 if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2271 err = mcp251xfd_handle(priv, spicrcif);
2272 if (err)
2273 goto out_fail;
2274 }
2275
2276
2277
2278
2279
2280 if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2281 priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2282 err = mcp251xfd_handle(priv, cerrif);
2283 if (err)
2284 goto out_fail;
2285
2286
2287
2288
2289
2290
2291
2292
2293 if (priv->can.state == CAN_STATE_BUS_OFF)
2294 return IRQ_HANDLED;
2295 }
2296
2297 handled = IRQ_HANDLED;
2298 } while (1);
2299
2300 out_fail:
2301 can_rx_offload_threaded_irq_finish(&priv->offload);
2302
2303 netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2304 err, priv->regs_status.intf);
2305 mcp251xfd_dump(priv);
2306 mcp251xfd_chip_interrupts_disable(priv);
2307 mcp251xfd_timestamp_stop(priv);
2308
2309 return handled;
2310}
2311
2312static inline struct
2313mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2314{
2315 u8 tx_head;
2316
2317 tx_head = mcp251xfd_get_tx_head(tx_ring);
2318
2319 return &tx_ring->obj[tx_head];
2320}
2321
2322static void
2323mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2324 struct mcp251xfd_tx_obj *tx_obj,
2325 const struct sk_buff *skb,
2326 unsigned int seq)
2327{
2328 const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2329 struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2330 union mcp251xfd_tx_obj_load_buf *load_buf;
2331 u8 dlc;
2332 u32 id, flags;
2333 int len_sanitized = 0, len;
2334
2335 if (cfd->can_id & CAN_EFF_FLAG) {
2336 u32 sid, eid;
2337
2338 sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2339 eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2340
2341 id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2342 FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2343
2344 flags = MCP251XFD_OBJ_FLAGS_IDE;
2345 } else {
2346 id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2347 flags = 0;
2348 }
2349
2350
2351
2352
2353
2354 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
2355
2356 if (cfd->can_id & CAN_RTR_FLAG)
2357 flags |= MCP251XFD_OBJ_FLAGS_RTR;
2358 else
2359 len_sanitized = canfd_sanitize_len(cfd->len);
2360
2361
2362 if (can_is_canfd_skb(skb)) {
2363 if (cfd->flags & CANFD_ESI)
2364 flags |= MCP251XFD_OBJ_FLAGS_ESI;
2365
2366 flags |= MCP251XFD_OBJ_FLAGS_FDF;
2367
2368 if (cfd->flags & CANFD_BRS)
2369 flags |= MCP251XFD_OBJ_FLAGS_BRS;
2370
2371 dlc = can_fd_len2dlc(cfd->len);
2372 } else {
2373 dlc = can_get_cc_dlc((struct can_frame *)cfd,
2374 priv->can.ctrlmode);
2375 }
2376
2377 flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
2378
2379 load_buf = &tx_obj->buf;
2380 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2381 hw_tx_obj = &load_buf->crc.hw_tx_obj;
2382 else
2383 hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2384
2385 put_unaligned_le32(id, &hw_tx_obj->id);
2386 put_unaligned_le32(flags, &hw_tx_obj->flags);
2387
2388
2389 memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2390
2391
2392 if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
2393 int pad_len;
2394
2395 pad_len = len_sanitized - cfd->len;
2396 if (pad_len)
2397 memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
2398 }
2399
2400
2401 len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2402 if (MCP251XFD_SANITIZE_CAN)
2403 len += round_up(len_sanitized, sizeof(u32));
2404 else
2405 len += round_up(cfd->len, sizeof(u32));
2406
2407 if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2408 u16 crc;
2409
2410 mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2411 len);
2412
2413 len += sizeof(load_buf->crc.cmd);
2414 crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2415 put_unaligned_be16(crc, (void *)load_buf + len);
2416
2417
2418 len += sizeof(load_buf->crc.crc);
2419 } else {
2420 len += sizeof(load_buf->nocrc.cmd);
2421 }
2422
2423 tx_obj->xfer[0].len = len;
2424}
2425
2426static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2427 struct mcp251xfd_tx_obj *tx_obj)
2428{
2429 return spi_async(priv->spi, &tx_obj->msg);
2430}
2431
2432static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2433 struct mcp251xfd_tx_ring *tx_ring)
2434{
2435 if (mcp251xfd_get_tx_free(tx_ring) > 0)
2436 return false;
2437
2438 netif_stop_queue(priv->ndev);
2439
2440
2441 smp_mb();
2442
2443 if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2444 netdev_dbg(priv->ndev,
2445 "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2446 tx_ring->head, tx_ring->tail,
2447 tx_ring->head - tx_ring->tail);
2448
2449 return true;
2450 }
2451
2452 netif_start_queue(priv->ndev);
2453
2454 return false;
2455}
2456
2457static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2458 struct net_device *ndev)
2459{
2460 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2461 struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2462 struct mcp251xfd_tx_obj *tx_obj;
2463 unsigned int frame_len;
2464 u8 tx_head;
2465 int err;
2466
2467 if (can_dropped_invalid_skb(ndev, skb))
2468 return NETDEV_TX_OK;
2469
2470 if (mcp251xfd_tx_busy(priv, tx_ring))
2471 return NETDEV_TX_BUSY;
2472
2473 tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2474 mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2475
2476
2477 tx_head = mcp251xfd_get_tx_head(tx_ring);
2478 tx_ring->head++;
2479 if (mcp251xfd_get_tx_free(tx_ring) == 0)
2480 netif_stop_queue(ndev);
2481
2482 frame_len = can_skb_get_frame_len(skb);
2483 err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
2484 if (!err)
2485 netdev_sent_queue(priv->ndev, frame_len);
2486
2487 err = mcp251xfd_tx_obj_write(priv, tx_obj);
2488 if (err)
2489 goto out_err;
2490
2491 return NETDEV_TX_OK;
2492
2493 out_err:
2494 netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2495
2496 return NETDEV_TX_OK;
2497}
2498
2499static int mcp251xfd_open(struct net_device *ndev)
2500{
2501 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2502 const struct spi_device *spi = priv->spi;
2503 int err;
2504
2505 err = pm_runtime_get_sync(ndev->dev.parent);
2506 if (err < 0) {
2507 pm_runtime_put_noidle(ndev->dev.parent);
2508 return err;
2509 }
2510
2511 err = open_candev(ndev);
2512 if (err)
2513 goto out_pm_runtime_put;
2514
2515 err = mcp251xfd_ring_alloc(priv);
2516 if (err)
2517 goto out_close_candev;
2518
2519 err = mcp251xfd_transceiver_enable(priv);
2520 if (err)
2521 goto out_mcp251xfd_ring_free;
2522
2523 err = mcp251xfd_chip_start(priv);
2524 if (err)
2525 goto out_transceiver_disable;
2526
2527 mcp251xfd_timestamp_init(priv);
2528 can_rx_offload_enable(&priv->offload);
2529
2530 err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2531 IRQF_SHARED | IRQF_ONESHOT,
2532 dev_name(&spi->dev), priv);
2533 if (err)
2534 goto out_can_rx_offload_disable;
2535
2536 err = mcp251xfd_chip_interrupts_enable(priv);
2537 if (err)
2538 goto out_free_irq;
2539
2540 netif_start_queue(ndev);
2541
2542 return 0;
2543
2544 out_free_irq:
2545 free_irq(spi->irq, priv);
2546 out_can_rx_offload_disable:
2547 can_rx_offload_disable(&priv->offload);
2548 mcp251xfd_timestamp_stop(priv);
2549 out_transceiver_disable:
2550 mcp251xfd_transceiver_disable(priv);
2551 out_mcp251xfd_ring_free:
2552 mcp251xfd_ring_free(priv);
2553 out_close_candev:
2554 close_candev(ndev);
2555 out_pm_runtime_put:
2556 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2557 pm_runtime_put(ndev->dev.parent);
2558
2559 return err;
2560}
2561
2562static int mcp251xfd_stop(struct net_device *ndev)
2563{
2564 struct mcp251xfd_priv *priv = netdev_priv(ndev);
2565
2566 netif_stop_queue(ndev);
2567 mcp251xfd_chip_interrupts_disable(priv);
2568 free_irq(ndev->irq, priv);
2569 can_rx_offload_disable(&priv->offload);
2570 mcp251xfd_timestamp_stop(priv);
2571 mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2572 mcp251xfd_transceiver_disable(priv);
2573 mcp251xfd_ring_free(priv);
2574 close_candev(ndev);
2575
2576 pm_runtime_put(ndev->dev.parent);
2577
2578 return 0;
2579}
2580
2581static const struct net_device_ops mcp251xfd_netdev_ops = {
2582 .ndo_open = mcp251xfd_open,
2583 .ndo_stop = mcp251xfd_stop,
2584 .ndo_start_xmit = mcp251xfd_start_xmit,
2585 .ndo_change_mtu = can_change_mtu,
2586};
2587
2588static void
2589mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2590{
2591 const struct spi_device *spi = priv->spi;
2592 const struct spi_controller *ctlr = spi->controller;
2593
2594 if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2595 priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2596}
2597
2598static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2599{
2600 const struct net_device *ndev = priv->ndev;
2601 const struct mcp251xfd_devtype_data *devtype_data;
2602 u32 osc;
2603 int err;
2604
2605
2606
2607
2608 err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2609 MCP251XFD_REG_OSC_LPMEN,
2610 MCP251XFD_REG_OSC_LPMEN);
2611 if (err)
2612 return err;
2613
2614 err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2615 if (err)
2616 return err;
2617
2618 if (osc & MCP251XFD_REG_OSC_LPMEN)
2619 devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2620 else
2621 devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2622
2623 if (!mcp251xfd_is_251X(priv) &&
2624 priv->devtype_data.model != devtype_data->model) {
2625 netdev_info(ndev,
2626 "Detected %s, but firmware specifies a %s. Fixing up.",
2627 __mcp251xfd_get_model_str(devtype_data->model),
2628 mcp251xfd_get_model_str(priv));
2629 }
2630 priv->devtype_data = *devtype_data;
2631
2632
2633 mcp251xfd_register_quirks(priv);
2634
2635
2636 return mcp251xfd_regmap_init(priv);
2637}
2638
2639static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2640{
2641 int err, rx_pending;
2642
2643 if (!priv->rx_int)
2644 return 0;
2645
2646 err = mcp251xfd_chip_rx_int_enable(priv);
2647 if (err)
2648 return err;
2649
2650
2651
2652
2653 rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2654
2655 err = mcp251xfd_chip_rx_int_disable(priv);
2656 if (err)
2657 return err;
2658
2659 if (!rx_pending)
2660 return 0;
2661
2662 netdev_info(priv->ndev,
2663 "RX_INT active after softreset, disabling RX_INT support.");
2664 devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2665 priv->rx_int = NULL;
2666
2667 return 0;
2668}
2669
2670static int
2671mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2672 u32 *dev_id, u32 *effective_speed_hz)
2673{
2674 struct mcp251xfd_map_buf_nocrc *buf_rx;
2675 struct mcp251xfd_map_buf_nocrc *buf_tx;
2676 struct spi_transfer xfer[2] = { };
2677 int err;
2678
2679 buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2680 if (!buf_rx)
2681 return -ENOMEM;
2682
2683 buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2684 if (!buf_tx) {
2685 err = -ENOMEM;
2686 goto out_kfree_buf_rx;
2687 }
2688
2689 xfer[0].tx_buf = buf_tx;
2690 xfer[0].len = sizeof(buf_tx->cmd);
2691 xfer[1].rx_buf = buf_rx->data;
2692 xfer[1].len = sizeof(dev_id);
2693
2694 mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2695 err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2696 if (err)
2697 goto out_kfree_buf_tx;
2698
2699 *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2700 *effective_speed_hz = xfer->effective_speed_hz;
2701
2702 out_kfree_buf_tx:
2703 kfree(buf_tx);
2704 out_kfree_buf_rx:
2705 kfree(buf_rx);
2706
2707 return 0;
2708}
2709
2710#define MCP251XFD_QUIRK_ACTIVE(quirk) \
2711 (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2712
2713static int
2714mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2715{
2716 u32 dev_id, effective_speed_hz;
2717 int err;
2718
2719 err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2720 &effective_speed_hz);
2721 if (err)
2722 return err;
2723
2724 netdev_info(priv->ndev,
2725 "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2726 mcp251xfd_get_model_str(priv),
2727 FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2728 FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2729 priv->rx_int ? '+' : '-',
2730 MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2731 MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2732 MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2733 MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2734 MCP251XFD_QUIRK_ACTIVE(ECC),
2735 MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2736 priv->can.clock.freq / 1000000,
2737 priv->can.clock.freq % 1000000 / 1000 / 10,
2738 priv->spi_max_speed_hz_orig / 1000000,
2739 priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2740 priv->spi->max_speed_hz / 1000000,
2741 priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2742 effective_speed_hz / 1000000,
2743 effective_speed_hz % 1000000 / 1000 / 10);
2744
2745 return 0;
2746}
2747
2748static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2749{
2750 struct net_device *ndev = priv->ndev;
2751 int err;
2752
2753 err = mcp251xfd_clks_and_vdd_enable(priv);
2754 if (err)
2755 return err;
2756
2757 pm_runtime_get_noresume(ndev->dev.parent);
2758 err = pm_runtime_set_active(ndev->dev.parent);
2759 if (err)
2760 goto out_runtime_put_noidle;
2761 pm_runtime_enable(ndev->dev.parent);
2762
2763 mcp251xfd_register_quirks(priv);
2764
2765 err = mcp251xfd_chip_softreset(priv);
2766 if (err == -ENODEV)
2767 goto out_runtime_disable;
2768 if (err)
2769 goto out_chip_set_mode_sleep;
2770
2771 err = mcp251xfd_register_chip_detect(priv);
2772 if (err)
2773 goto out_chip_set_mode_sleep;
2774
2775 err = mcp251xfd_register_check_rx_int(priv);
2776 if (err)
2777 goto out_chip_set_mode_sleep;
2778
2779 err = register_candev(ndev);
2780 if (err)
2781 goto out_chip_set_mode_sleep;
2782
2783 err = mcp251xfd_register_done(priv);
2784 if (err)
2785 goto out_unregister_candev;
2786
2787
2788
2789
2790
2791 err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2792 if (err)
2793 goto out_unregister_candev;
2794
2795 pm_runtime_put(ndev->dev.parent);
2796
2797 return 0;
2798
2799 out_unregister_candev:
2800 unregister_candev(ndev);
2801 out_chip_set_mode_sleep:
2802 mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2803 out_runtime_disable:
2804 pm_runtime_disable(ndev->dev.parent);
2805 out_runtime_put_noidle:
2806 pm_runtime_put_noidle(ndev->dev.parent);
2807 mcp251xfd_clks_and_vdd_disable(priv);
2808
2809 return err;
2810}
2811
2812static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2813{
2814 struct net_device *ndev = priv->ndev;
2815
2816 unregister_candev(ndev);
2817
2818 pm_runtime_get_sync(ndev->dev.parent);
2819 pm_runtime_put_noidle(ndev->dev.parent);
2820 mcp251xfd_clks_and_vdd_disable(priv);
2821 pm_runtime_disable(ndev->dev.parent);
2822}
2823
2824static const struct of_device_id mcp251xfd_of_match[] = {
2825 {
2826 .compatible = "microchip,mcp2517fd",
2827 .data = &mcp251xfd_devtype_data_mcp2517fd,
2828 }, {
2829 .compatible = "microchip,mcp2518fd",
2830 .data = &mcp251xfd_devtype_data_mcp2518fd,
2831 }, {
2832 .compatible = "microchip,mcp251xfd",
2833 .data = &mcp251xfd_devtype_data_mcp251xfd,
2834 }, {
2835
2836 },
2837};
2838MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2839
2840static const struct spi_device_id mcp251xfd_id_table[] = {
2841 {
2842 .name = "mcp2517fd",
2843 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2844 }, {
2845 .name = "mcp2518fd",
2846 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2847 }, {
2848 .name = "mcp251xfd",
2849 .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2850 }, {
2851
2852 },
2853};
2854MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2855
2856static int mcp251xfd_probe(struct spi_device *spi)
2857{
2858 const void *match;
2859 struct net_device *ndev;
2860 struct mcp251xfd_priv *priv;
2861 struct gpio_desc *rx_int;
2862 struct regulator *reg_vdd, *reg_xceiver;
2863 struct clk *clk;
2864 u32 freq = 0;
2865 int err;
2866
2867 if (!spi->irq)
2868 return dev_err_probe(&spi->dev, -ENXIO,
2869 "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2870
2871 rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2872 GPIOD_IN);
2873 if (IS_ERR(rx_int))
2874 return dev_err_probe(&spi->dev, PTR_ERR(rx_int),
2875 "Failed to get RX-INT!\n");
2876
2877 reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2878 if (PTR_ERR(reg_vdd) == -ENODEV)
2879 reg_vdd = NULL;
2880 else if (IS_ERR(reg_vdd))
2881 return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd),
2882 "Failed to get VDD regulator!\n");
2883
2884 reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2885 if (PTR_ERR(reg_xceiver) == -ENODEV)
2886 reg_xceiver = NULL;
2887 else if (IS_ERR(reg_xceiver))
2888 return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver),
2889 "Failed to get Transceiver regulator!\n");
2890
2891 clk = devm_clk_get_optional(&spi->dev, NULL);
2892 if (IS_ERR(clk))
2893 return dev_err_probe(&spi->dev, PTR_ERR(clk),
2894 "Failed to get Oscillator (clock)!\n");
2895 if (clk) {
2896 freq = clk_get_rate(clk);
2897 } else {
2898 err = device_property_read_u32(&spi->dev, "clock-frequency",
2899 &freq);
2900 if (err)
2901 return dev_err_probe(&spi->dev, err,
2902 "Failed to get clock-frequency!\n");
2903 }
2904
2905
2906 if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2907 freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2908 dev_err(&spi->dev,
2909 "Oscillator frequency (%u Hz) is too low or high.\n",
2910 freq);
2911 return -ERANGE;
2912 }
2913
2914 if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2915 dev_err(&spi->dev,
2916 "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2917 freq);
2918 return -ERANGE;
2919 }
2920
2921 ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2922 MCP251XFD_TX_OBJ_NUM_MAX);
2923 if (!ndev)
2924 return -ENOMEM;
2925
2926 SET_NETDEV_DEV(ndev, &spi->dev);
2927
2928 ndev->netdev_ops = &mcp251xfd_netdev_ops;
2929 ndev->irq = spi->irq;
2930 ndev->flags |= IFF_ECHO;
2931
2932 priv = netdev_priv(ndev);
2933 spi_set_drvdata(spi, priv);
2934 priv->can.clock.freq = freq;
2935 priv->can.do_set_mode = mcp251xfd_set_mode;
2936 priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2937 priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2938 priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2939 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2940 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
2941 CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
2942 CAN_CTRLMODE_CC_LEN8_DLC;
2943 priv->ndev = ndev;
2944 priv->spi = spi;
2945 priv->rx_int = rx_int;
2946 priv->clk = clk;
2947 priv->reg_vdd = reg_vdd;
2948 priv->reg_xceiver = reg_xceiver;
2949
2950 match = device_get_match_data(&spi->dev);
2951 if (match)
2952 priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2953 else
2954 priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2955 spi_get_device_id(spi)->driver_data;
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983 priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2984 spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2985 spi->bits_per_word = 8;
2986 spi->rt = true;
2987 err = spi_setup(spi);
2988 if (err)
2989 goto out_free_candev;
2990
2991 err = mcp251xfd_regmap_init(priv);
2992 if (err)
2993 goto out_free_candev;
2994
2995 err = can_rx_offload_add_manual(ndev, &priv->offload,
2996 MCP251XFD_NAPI_WEIGHT);
2997 if (err)
2998 goto out_free_candev;
2999
3000 err = mcp251xfd_register(priv);
3001 if (err)
3002 goto out_can_rx_offload_del;
3003
3004 return 0;
3005
3006 out_can_rx_offload_del:
3007 can_rx_offload_del(&priv->offload);
3008 out_free_candev:
3009 spi->max_speed_hz = priv->spi_max_speed_hz_orig;
3010
3011 free_candev(ndev);
3012
3013 return err;
3014}
3015
3016static int mcp251xfd_remove(struct spi_device *spi)
3017{
3018 struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
3019 struct net_device *ndev = priv->ndev;
3020
3021 can_rx_offload_del(&priv->offload);
3022 mcp251xfd_unregister(priv);
3023 spi->max_speed_hz = priv->spi_max_speed_hz_orig;
3024 free_candev(ndev);
3025
3026 return 0;
3027}
3028
3029static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
3030{
3031 const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3032
3033 return mcp251xfd_clks_and_vdd_disable(priv);
3034}
3035
3036static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
3037{
3038 const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
3039
3040 return mcp251xfd_clks_and_vdd_enable(priv);
3041}
3042
3043static const struct dev_pm_ops mcp251xfd_pm_ops = {
3044 SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
3045 mcp251xfd_runtime_resume, NULL)
3046};
3047
3048static struct spi_driver mcp251xfd_driver = {
3049 .driver = {
3050 .name = DEVICE_NAME,
3051 .pm = &mcp251xfd_pm_ops,
3052 .of_match_table = mcp251xfd_of_match,
3053 },
3054 .probe = mcp251xfd_probe,
3055 .remove = mcp251xfd_remove,
3056 .id_table = mcp251xfd_id_table,
3057};
3058module_spi_driver(mcp251xfd_driver);
3059
3060MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
3061MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
3062MODULE_LICENSE("GPL v2");
3063