1
2
3
4
5
6
7
8
9
10
11#include <linux/cache.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/etherdevice.h>
17#include <linux/ethtool.h>
18#include <linux/if_vlan.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/net_tstamp.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/of_irq.h>
26#include <linux/of_mdio.h>
27#include <linux/of_net.h>
28#include <linux/pm_runtime.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/sys_soc.h>
32#include <linux/reset.h>
33
34#include <asm/div64.h>
35
36#include "ravb.h"
37
38#define RAVB_DEF_MSG_ENABLE \
39 (NETIF_MSG_LINK | \
40 NETIF_MSG_TIMER | \
41 NETIF_MSG_RX_ERR | \
42 NETIF_MSG_TX_ERR)
43
44static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
45 "ch0",
46 "ch1",
47};
48
49static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
50 "ch18",
51 "ch19",
52};
53
54void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
55 u32 set)
56{
57 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
58}
59
60int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
61{
62 int i;
63
64 for (i = 0; i < 10000; i++) {
65 if ((ravb_read(ndev, reg) & mask) == value)
66 return 0;
67 udelay(10);
68 }
69 return -ETIMEDOUT;
70}
71
72static int ravb_config(struct net_device *ndev)
73{
74 int error;
75
76
77 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
78
79 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
80 if (error)
81 netdev_err(ndev, "failed to switch device to config mode\n");
82
83 return error;
84}
85
86static void ravb_set_rate(struct net_device *ndev)
87{
88 struct ravb_private *priv = netdev_priv(ndev);
89
90 switch (priv->speed) {
91 case 100:
92 ravb_write(ndev, GECMR_SPEED_100, GECMR);
93 break;
94 case 1000:
95 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
96 break;
97 }
98}
99
100static void ravb_set_buffer_align(struct sk_buff *skb)
101{
102 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
103
104 if (reserve)
105 skb_reserve(skb, RAVB_ALIGN - reserve);
106}
107
108
109
110
111
112
113static void ravb_read_mac_address(struct device_node *np,
114 struct net_device *ndev)
115{
116 int ret;
117
118 ret = of_get_mac_address(np, ndev->dev_addr);
119 if (ret) {
120 u32 mahr = ravb_read(ndev, MAHR);
121 u32 malr = ravb_read(ndev, MALR);
122
123 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
124 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
125 ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
126 ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
127 ndev->dev_addr[4] = (malr >> 8) & 0xFF;
128 ndev->dev_addr[5] = (malr >> 0) & 0xFF;
129 }
130}
131
132static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
133{
134 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
135 mdiobb);
136
137 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
138}
139
140
141static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
142{
143 ravb_mdio_ctrl(ctrl, PIR_MDC, level);
144}
145
146
147static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
148{
149 ravb_mdio_ctrl(ctrl, PIR_MMD, output);
150}
151
152
153static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
154{
155 ravb_mdio_ctrl(ctrl, PIR_MDO, value);
156}
157
158
159static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
160{
161 struct ravb_private *priv = container_of(ctrl, struct ravb_private,
162 mdiobb);
163
164 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
165}
166
167
168static const struct mdiobb_ops bb_ops = {
169 .owner = THIS_MODULE,
170 .set_mdc = ravb_set_mdc,
171 .set_mdio_dir = ravb_set_mdio_dir,
172 .set_mdio_data = ravb_set_mdio_data,
173 .get_mdio_data = ravb_get_mdio_data,
174};
175
176
177static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
178{
179 struct ravb_private *priv = netdev_priv(ndev);
180 struct net_device_stats *stats = &priv->stats[q];
181 unsigned int num_tx_desc = priv->num_tx_desc;
182 struct ravb_tx_desc *desc;
183 unsigned int entry;
184 int free_num = 0;
185 u32 size;
186
187 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
188 bool txed;
189
190 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
191 num_tx_desc);
192 desc = &priv->tx_ring[q][entry];
193 txed = desc->die_dt == DT_FEMPTY;
194 if (free_txed_only && !txed)
195 break;
196
197 dma_rmb();
198 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
199
200 if (priv->tx_skb[q][entry / num_tx_desc]) {
201 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
202 size, DMA_TO_DEVICE);
203
204 if (entry % num_tx_desc == num_tx_desc - 1) {
205 entry /= num_tx_desc;
206 dev_kfree_skb_any(priv->tx_skb[q][entry]);
207 priv->tx_skb[q][entry] = NULL;
208 if (txed)
209 stats->tx_packets++;
210 }
211 free_num++;
212 }
213 if (txed)
214 stats->tx_bytes += size;
215 desc->die_dt = DT_EEMPTY;
216 }
217 return free_num;
218}
219
220static void ravb_rx_ring_free(struct net_device *ndev, int q)
221{
222 struct ravb_private *priv = netdev_priv(ndev);
223 unsigned int ring_size;
224 unsigned int i;
225
226 if (!priv->rx_ring[q])
227 return;
228
229 for (i = 0; i < priv->num_rx_ring[q]; i++) {
230 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
231
232 if (!dma_mapping_error(ndev->dev.parent,
233 le32_to_cpu(desc->dptr)))
234 dma_unmap_single(ndev->dev.parent,
235 le32_to_cpu(desc->dptr),
236 RX_BUF_SZ,
237 DMA_FROM_DEVICE);
238 }
239 ring_size = sizeof(struct ravb_ex_rx_desc) *
240 (priv->num_rx_ring[q] + 1);
241 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
242 priv->rx_desc_dma[q]);
243 priv->rx_ring[q] = NULL;
244}
245
246
247static void ravb_ring_free(struct net_device *ndev, int q)
248{
249 struct ravb_private *priv = netdev_priv(ndev);
250 const struct ravb_hw_info *info = priv->info;
251 unsigned int num_tx_desc = priv->num_tx_desc;
252 unsigned int ring_size;
253 unsigned int i;
254
255 info->rx_ring_free(ndev, q);
256
257 if (priv->tx_ring[q]) {
258 ravb_tx_free(ndev, q, false);
259
260 ring_size = sizeof(struct ravb_tx_desc) *
261 (priv->num_tx_ring[q] * num_tx_desc + 1);
262 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
263 priv->tx_desc_dma[q]);
264 priv->tx_ring[q] = NULL;
265 }
266
267
268 if (priv->rx_skb[q]) {
269 for (i = 0; i < priv->num_rx_ring[q]; i++)
270 dev_kfree_skb(priv->rx_skb[q][i]);
271 }
272 kfree(priv->rx_skb[q]);
273 priv->rx_skb[q] = NULL;
274
275
276 kfree(priv->tx_align[q]);
277 priv->tx_align[q] = NULL;
278
279
280
281
282 kfree(priv->tx_skb[q]);
283 priv->tx_skb[q] = NULL;
284}
285
286static void ravb_rx_ring_format(struct net_device *ndev, int q)
287{
288 struct ravb_private *priv = netdev_priv(ndev);
289 struct ravb_ex_rx_desc *rx_desc;
290 unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
291 dma_addr_t dma_addr;
292 unsigned int i;
293
294 memset(priv->rx_ring[q], 0, rx_ring_size);
295
296 for (i = 0; i < priv->num_rx_ring[q]; i++) {
297
298 rx_desc = &priv->rx_ring[q][i];
299 rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
300 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
301 RX_BUF_SZ,
302 DMA_FROM_DEVICE);
303
304
305
306 if (dma_mapping_error(ndev->dev.parent, dma_addr))
307 rx_desc->ds_cc = cpu_to_le16(0);
308 rx_desc->dptr = cpu_to_le32(dma_addr);
309 rx_desc->die_dt = DT_FEMPTY;
310 }
311 rx_desc = &priv->rx_ring[q][i];
312 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
313 rx_desc->die_dt = DT_LINKFIX;
314}
315
316
317static void ravb_ring_format(struct net_device *ndev, int q)
318{
319 struct ravb_private *priv = netdev_priv(ndev);
320 const struct ravb_hw_info *info = priv->info;
321 unsigned int num_tx_desc = priv->num_tx_desc;
322 struct ravb_tx_desc *tx_desc;
323 struct ravb_desc *desc;
324 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
325 num_tx_desc;
326 unsigned int i;
327
328 priv->cur_rx[q] = 0;
329 priv->cur_tx[q] = 0;
330 priv->dirty_rx[q] = 0;
331 priv->dirty_tx[q] = 0;
332
333 info->rx_ring_format(ndev, q);
334
335 memset(priv->tx_ring[q], 0, tx_ring_size);
336
337 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
338 i++, tx_desc++) {
339 tx_desc->die_dt = DT_EEMPTY;
340 if (num_tx_desc > 1) {
341 tx_desc++;
342 tx_desc->die_dt = DT_EEMPTY;
343 }
344 }
345 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
346 tx_desc->die_dt = DT_LINKFIX;
347
348
349 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
350 desc->die_dt = DT_LINKFIX;
351 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
352
353
354 desc = &priv->desc_bat[q];
355 desc->die_dt = DT_LINKFIX;
356 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
357}
358
359static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
360{
361 struct ravb_private *priv = netdev_priv(ndev);
362 unsigned int ring_size;
363
364 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
365
366 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
367 &priv->rx_desc_dma[q],
368 GFP_KERNEL);
369 return priv->rx_ring[q];
370}
371
372
373static int ravb_ring_init(struct net_device *ndev, int q)
374{
375 struct ravb_private *priv = netdev_priv(ndev);
376 const struct ravb_hw_info *info = priv->info;
377 unsigned int num_tx_desc = priv->num_tx_desc;
378 unsigned int ring_size;
379 struct sk_buff *skb;
380 unsigned int i;
381
382
383 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
384 sizeof(*priv->rx_skb[q]), GFP_KERNEL);
385 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
386 sizeof(*priv->tx_skb[q]), GFP_KERNEL);
387 if (!priv->rx_skb[q] || !priv->tx_skb[q])
388 goto error;
389
390 for (i = 0; i < priv->num_rx_ring[q]; i++) {
391 skb = netdev_alloc_skb(ndev, info->max_rx_len);
392 if (!skb)
393 goto error;
394 ravb_set_buffer_align(skb);
395 priv->rx_skb[q][i] = skb;
396 }
397
398 if (num_tx_desc > 1) {
399
400 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
401 DPTR_ALIGN - 1, GFP_KERNEL);
402 if (!priv->tx_align[q])
403 goto error;
404 }
405
406
407 if (!info->alloc_rx_desc(ndev, q))
408 goto error;
409
410 priv->dirty_rx[q] = 0;
411
412
413 ring_size = sizeof(struct ravb_tx_desc) *
414 (priv->num_tx_ring[q] * num_tx_desc + 1);
415 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
416 &priv->tx_desc_dma[q],
417 GFP_KERNEL);
418 if (!priv->tx_ring[q])
419 goto error;
420
421 return 0;
422
423error:
424 ravb_ring_free(ndev, q);
425
426 return -ENOMEM;
427}
428
429static void ravb_rcar_emac_init(struct net_device *ndev)
430{
431
432 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
433
434
435 ravb_write(ndev, ECMR_ZPF | ECMR_DM |
436 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
437 ECMR_TE | ECMR_RE, ECMR);
438
439 ravb_set_rate(ndev);
440
441
442 ravb_write(ndev,
443 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
444 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
445 ravb_write(ndev,
446 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
447
448
449 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
450
451
452 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
453}
454
455
456static void ravb_emac_init(struct net_device *ndev)
457{
458 struct ravb_private *priv = netdev_priv(ndev);
459 const struct ravb_hw_info *info = priv->info;
460
461 info->emac_init(ndev);
462}
463
464static void ravb_rcar_dmac_init(struct net_device *ndev)
465{
466 struct ravb_private *priv = netdev_priv(ndev);
467 const struct ravb_hw_info *info = priv->info;
468
469
470 ravb_write(ndev,
471 RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
472
473
474 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
475
476
477 ravb_write(ndev, TCCR_TFEN, TCCR);
478
479
480 if (info->multi_irqs) {
481
482 ravb_write(ndev, 0, DIL);
483
484 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
485 }
486
487 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
488
489 ravb_write(ndev, 0, RIC1);
490
491 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
492
493 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
494}
495
496
497static int ravb_dmac_init(struct net_device *ndev)
498{
499 struct ravb_private *priv = netdev_priv(ndev);
500 const struct ravb_hw_info *info = priv->info;
501 int error;
502
503
504 error = ravb_config(ndev);
505 if (error)
506 return error;
507
508 error = ravb_ring_init(ndev, RAVB_BE);
509 if (error)
510 return error;
511 error = ravb_ring_init(ndev, RAVB_NC);
512 if (error) {
513 ravb_ring_free(ndev, RAVB_BE);
514 return error;
515 }
516
517
518 ravb_ring_format(ndev, RAVB_BE);
519 ravb_ring_format(ndev, RAVB_NC);
520
521 info->dmac_init(ndev);
522
523
524 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
525
526 return 0;
527}
528
529static void ravb_get_tx_tstamp(struct net_device *ndev)
530{
531 struct ravb_private *priv = netdev_priv(ndev);
532 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
533 struct skb_shared_hwtstamps shhwtstamps;
534 struct sk_buff *skb;
535 struct timespec64 ts;
536 u16 tag, tfa_tag;
537 int count;
538 u32 tfa2;
539
540 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
541 while (count--) {
542 tfa2 = ravb_read(ndev, TFA2);
543 tfa_tag = (tfa2 & TFA2_TST) >> 16;
544 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
545 ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
546 ravb_read(ndev, TFA1);
547 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
548 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
549 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
550 list) {
551 skb = ts_skb->skb;
552 tag = ts_skb->tag;
553 list_del(&ts_skb->list);
554 kfree(ts_skb);
555 if (tag == tfa_tag) {
556 skb_tstamp_tx(skb, &shhwtstamps);
557 dev_consume_skb_any(skb);
558 break;
559 } else {
560 dev_kfree_skb_any(skb);
561 }
562 }
563 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
564 }
565}
566
567static void ravb_rx_csum(struct sk_buff *skb)
568{
569 u8 *hw_csum;
570
571
572
573
574 if (unlikely(skb->len < sizeof(__sum16)))
575 return;
576 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
577 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
578 skb->ip_summed = CHECKSUM_COMPLETE;
579 skb_trim(skb, skb->len - sizeof(__sum16));
580}
581
582static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q)
583{
584 struct ravb_private *priv = netdev_priv(ndev);
585 const struct ravb_hw_info *info = priv->info;
586 int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
587 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
588 priv->cur_rx[q];
589 struct net_device_stats *stats = &priv->stats[q];
590 struct ravb_ex_rx_desc *desc;
591 struct sk_buff *skb;
592 dma_addr_t dma_addr;
593 struct timespec64 ts;
594 u8 desc_status;
595 u16 pkt_len;
596 int limit;
597
598 boguscnt = min(boguscnt, *quota);
599 limit = boguscnt;
600 desc = &priv->rx_ring[q][entry];
601 while (desc->die_dt != DT_FEMPTY) {
602
603 dma_rmb();
604 desc_status = desc->msc;
605 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
606
607 if (--boguscnt < 0)
608 break;
609
610
611 if (!pkt_len)
612 continue;
613
614 if (desc_status & MSC_MC)
615 stats->multicast++;
616
617 if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
618 MSC_CEEF)) {
619 stats->rx_errors++;
620 if (desc_status & MSC_CRC)
621 stats->rx_crc_errors++;
622 if (desc_status & MSC_RFE)
623 stats->rx_frame_errors++;
624 if (desc_status & (MSC_RTLF | MSC_RTSF))
625 stats->rx_length_errors++;
626 if (desc_status & MSC_CEEF)
627 stats->rx_missed_errors++;
628 } else {
629 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
630
631 skb = priv->rx_skb[q][entry];
632 priv->rx_skb[q][entry] = NULL;
633 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
634 RX_BUF_SZ,
635 DMA_FROM_DEVICE);
636 get_ts &= (q == RAVB_NC) ?
637 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
638 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
639 if (get_ts) {
640 struct skb_shared_hwtstamps *shhwtstamps;
641
642 shhwtstamps = skb_hwtstamps(skb);
643 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
644 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
645 32) | le32_to_cpu(desc->ts_sl);
646 ts.tv_nsec = le32_to_cpu(desc->ts_n);
647 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
648 }
649
650 skb_put(skb, pkt_len);
651 skb->protocol = eth_type_trans(skb, ndev);
652 if (ndev->features & NETIF_F_RXCSUM)
653 ravb_rx_csum(skb);
654 napi_gro_receive(&priv->napi[q], skb);
655 stats->rx_packets++;
656 stats->rx_bytes += pkt_len;
657 }
658
659 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
660 desc = &priv->rx_ring[q][entry];
661 }
662
663
664 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
665 entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
666 desc = &priv->rx_ring[q][entry];
667 desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
668
669 if (!priv->rx_skb[q][entry]) {
670 skb = netdev_alloc_skb(ndev, info->max_rx_len);
671 if (!skb)
672 break;
673 ravb_set_buffer_align(skb);
674 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
675 le16_to_cpu(desc->ds_cc),
676 DMA_FROM_DEVICE);
677 skb_checksum_none_assert(skb);
678
679
680
681 if (dma_mapping_error(ndev->dev.parent, dma_addr))
682 desc->ds_cc = cpu_to_le16(0);
683 desc->dptr = cpu_to_le32(dma_addr);
684 priv->rx_skb[q][entry] = skb;
685 }
686
687 dma_wmb();
688 desc->die_dt = DT_FEMPTY;
689 }
690
691 *quota -= limit - (++boguscnt);
692
693 return boguscnt <= 0;
694}
695
696
697static bool ravb_rx(struct net_device *ndev, int *quota, int q)
698{
699 struct ravb_private *priv = netdev_priv(ndev);
700 const struct ravb_hw_info *info = priv->info;
701
702 return info->receive(ndev, quota, q);
703}
704
705static void ravb_rcv_snd_disable(struct net_device *ndev)
706{
707
708 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
709}
710
711static void ravb_rcv_snd_enable(struct net_device *ndev)
712{
713
714 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
715}
716
717
718static int ravb_stop_dma(struct net_device *ndev)
719{
720 int error;
721
722
723 error = ravb_wait(ndev, TCCR,
724 TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
725 if (error)
726 return error;
727
728 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
729 0);
730 if (error)
731 return error;
732
733
734 ravb_rcv_snd_disable(ndev);
735
736
737 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
738 if (error)
739 return error;
740
741
742 return ravb_config(ndev);
743}
744
745
746static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
747{
748 struct ravb_private *priv = netdev_priv(ndev);
749 u32 ecsr, psr;
750
751 ecsr = ravb_read(ndev, ECSR);
752 ravb_write(ndev, ecsr, ECSR);
753
754 if (ecsr & ECSR_MPD)
755 pm_wakeup_event(&priv->pdev->dev, 0);
756 if (ecsr & ECSR_ICD)
757 ndev->stats.tx_carrier_errors++;
758 if (ecsr & ECSR_LCHNG) {
759
760 if (priv->no_avb_link)
761 return;
762 psr = ravb_read(ndev, PSR);
763 if (priv->avb_link_active_low)
764 psr ^= PSR_LMON;
765 if (!(psr & PSR_LMON)) {
766
767 ravb_rcv_snd_disable(ndev);
768 } else {
769
770 ravb_rcv_snd_enable(ndev);
771 }
772 }
773}
774
775static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
776{
777 struct net_device *ndev = dev_id;
778 struct ravb_private *priv = netdev_priv(ndev);
779
780 spin_lock(&priv->lock);
781 ravb_emac_interrupt_unlocked(ndev);
782 spin_unlock(&priv->lock);
783 return IRQ_HANDLED;
784}
785
786
787static void ravb_error_interrupt(struct net_device *ndev)
788{
789 struct ravb_private *priv = netdev_priv(ndev);
790 u32 eis, ris2;
791
792 eis = ravb_read(ndev, EIS);
793 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
794 if (eis & EIS_QFS) {
795 ris2 = ravb_read(ndev, RIS2);
796 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
797 RIS2);
798
799
800 if (ris2 & RIS2_QFF0)
801 priv->stats[RAVB_BE].rx_over_errors++;
802
803
804 if (ris2 & RIS2_QFF1)
805 priv->stats[RAVB_NC].rx_over_errors++;
806
807
808 if (ris2 & RIS2_RFFF)
809 priv->rx_fifo_errors++;
810 }
811}
812
813static bool ravb_queue_interrupt(struct net_device *ndev, int q)
814{
815 struct ravb_private *priv = netdev_priv(ndev);
816 const struct ravb_hw_info *info = priv->info;
817 u32 ris0 = ravb_read(ndev, RIS0);
818 u32 ric0 = ravb_read(ndev, RIC0);
819 u32 tis = ravb_read(ndev, TIS);
820 u32 tic = ravb_read(ndev, TIC);
821
822 if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
823 if (napi_schedule_prep(&priv->napi[q])) {
824
825 if (!info->multi_irqs) {
826 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
827 ravb_write(ndev, tic & ~BIT(q), TIC);
828 } else {
829 ravb_write(ndev, BIT(q), RID0);
830 ravb_write(ndev, BIT(q), TID);
831 }
832 __napi_schedule(&priv->napi[q]);
833 } else {
834 netdev_warn(ndev,
835 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
836 ris0, ric0);
837 netdev_warn(ndev,
838 " tx status 0x%08x, tx mask 0x%08x.\n",
839 tis, tic);
840 }
841 return true;
842 }
843 return false;
844}
845
846static bool ravb_timestamp_interrupt(struct net_device *ndev)
847{
848 u32 tis = ravb_read(ndev, TIS);
849
850 if (tis & TIS_TFUF) {
851 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
852 ravb_get_tx_tstamp(ndev);
853 return true;
854 }
855 return false;
856}
857
858static irqreturn_t ravb_interrupt(int irq, void *dev_id)
859{
860 struct net_device *ndev = dev_id;
861 struct ravb_private *priv = netdev_priv(ndev);
862 irqreturn_t result = IRQ_NONE;
863 u32 iss;
864
865 spin_lock(&priv->lock);
866
867 iss = ravb_read(ndev, ISS);
868
869
870 if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
871 int q;
872
873
874 if (ravb_timestamp_interrupt(ndev))
875 result = IRQ_HANDLED;
876
877
878 for (q = RAVB_NC; q >= RAVB_BE; q--) {
879 if (ravb_queue_interrupt(ndev, q))
880 result = IRQ_HANDLED;
881 }
882 }
883
884
885 if (iss & ISS_MS) {
886 ravb_emac_interrupt_unlocked(ndev);
887 result = IRQ_HANDLED;
888 }
889
890
891 if (iss & ISS_ES) {
892 ravb_error_interrupt(ndev);
893 result = IRQ_HANDLED;
894 }
895
896
897 if (iss & ISS_CGIS) {
898 ravb_ptp_interrupt(ndev);
899 result = IRQ_HANDLED;
900 }
901
902 spin_unlock(&priv->lock);
903 return result;
904}
905
906
907static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
908{
909 struct net_device *ndev = dev_id;
910 struct ravb_private *priv = netdev_priv(ndev);
911 irqreturn_t result = IRQ_NONE;
912 u32 iss;
913
914 spin_lock(&priv->lock);
915
916 iss = ravb_read(ndev, ISS);
917
918
919 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
920 result = IRQ_HANDLED;
921
922
923 if (iss & ISS_ES) {
924 ravb_error_interrupt(ndev);
925 result = IRQ_HANDLED;
926 }
927
928
929 if (iss & ISS_CGIS) {
930 ravb_ptp_interrupt(ndev);
931 result = IRQ_HANDLED;
932 }
933
934 spin_unlock(&priv->lock);
935 return result;
936}
937
938static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
939{
940 struct net_device *ndev = dev_id;
941 struct ravb_private *priv = netdev_priv(ndev);
942 irqreturn_t result = IRQ_NONE;
943
944 spin_lock(&priv->lock);
945
946
947 if (ravb_queue_interrupt(ndev, q))
948 result = IRQ_HANDLED;
949
950 spin_unlock(&priv->lock);
951 return result;
952}
953
954static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
955{
956 return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
957}
958
959static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
960{
961 return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
962}
963
964static int ravb_poll(struct napi_struct *napi, int budget)
965{
966 struct net_device *ndev = napi->dev;
967 struct ravb_private *priv = netdev_priv(ndev);
968 const struct ravb_hw_info *info = priv->info;
969 unsigned long flags;
970 int q = napi - priv->napi;
971 int mask = BIT(q);
972 int quota = budget;
973
974
975
976 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
977 if (ravb_rx(ndev, "a, q))
978 goto out;
979
980
981 spin_lock_irqsave(&priv->lock, flags);
982
983 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
984 ravb_tx_free(ndev, q, true);
985 netif_wake_subqueue(ndev, q);
986 spin_unlock_irqrestore(&priv->lock, flags);
987
988 napi_complete(napi);
989
990
991 spin_lock_irqsave(&priv->lock, flags);
992 if (!info->multi_irqs) {
993 ravb_modify(ndev, RIC0, mask, mask);
994 ravb_modify(ndev, TIC, mask, mask);
995 } else {
996 ravb_write(ndev, mask, RIE0);
997 ravb_write(ndev, mask, TIE);
998 }
999 spin_unlock_irqrestore(&priv->lock, flags);
1000
1001
1002 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
1003 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1004 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1005 ndev->stats.rx_over_errors = priv->rx_over_errors;
1006 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1007 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1008out:
1009 return budget - quota;
1010}
1011
1012
1013static void ravb_adjust_link(struct net_device *ndev)
1014{
1015 struct ravb_private *priv = netdev_priv(ndev);
1016 const struct ravb_hw_info *info = priv->info;
1017 struct phy_device *phydev = ndev->phydev;
1018 bool new_state = false;
1019 unsigned long flags;
1020
1021 spin_lock_irqsave(&priv->lock, flags);
1022
1023
1024 if (priv->no_avb_link)
1025 ravb_rcv_snd_disable(ndev);
1026
1027 if (phydev->link) {
1028 if (phydev->speed != priv->speed) {
1029 new_state = true;
1030 priv->speed = phydev->speed;
1031 info->set_rate(ndev);
1032 }
1033 if (!priv->link) {
1034 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1035 new_state = true;
1036 priv->link = phydev->link;
1037 }
1038 } else if (priv->link) {
1039 new_state = true;
1040 priv->link = 0;
1041 priv->speed = 0;
1042 }
1043
1044
1045 if (priv->no_avb_link && phydev->link)
1046 ravb_rcv_snd_enable(ndev);
1047
1048 spin_unlock_irqrestore(&priv->lock, flags);
1049
1050 if (new_state && netif_msg_link(priv))
1051 phy_print_status(phydev);
1052}
1053
1054static const struct soc_device_attribute r8a7795es10[] = {
1055 { .soc_id = "r8a7795", .revision = "ES1.0", },
1056 { }
1057};
1058
1059
1060static int ravb_phy_init(struct net_device *ndev)
1061{
1062 struct device_node *np = ndev->dev.parent->of_node;
1063 struct ravb_private *priv = netdev_priv(ndev);
1064 struct phy_device *phydev;
1065 struct device_node *pn;
1066 phy_interface_t iface;
1067 int err;
1068
1069 priv->link = 0;
1070 priv->speed = 0;
1071
1072
1073 pn = of_parse_phandle(np, "phy-handle", 0);
1074 if (!pn) {
1075
1076
1077
1078 if (of_phy_is_fixed_link(np)) {
1079 err = of_phy_register_fixed_link(np);
1080 if (err)
1081 return err;
1082 }
1083 pn = of_node_get(np);
1084 }
1085
1086 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1087 : priv->phy_interface;
1088 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1089 of_node_put(pn);
1090 if (!phydev) {
1091 netdev_err(ndev, "failed to connect PHY\n");
1092 err = -ENOENT;
1093 goto err_deregister_fixed_link;
1094 }
1095
1096
1097
1098
1099 if (soc_device_match(r8a7795es10)) {
1100 err = phy_set_max_speed(phydev, SPEED_100);
1101 if (err) {
1102 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1103 goto err_phy_disconnect;
1104 }
1105
1106 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1107 }
1108
1109
1110 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1111 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1112 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1113 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1114
1115
1116 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1117 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1118
1119 phy_attached_info(phydev);
1120
1121 return 0;
1122
1123err_phy_disconnect:
1124 phy_disconnect(phydev);
1125err_deregister_fixed_link:
1126 if (of_phy_is_fixed_link(np))
1127 of_phy_deregister_fixed_link(np);
1128
1129 return err;
1130}
1131
1132
1133static int ravb_phy_start(struct net_device *ndev)
1134{
1135 int error;
1136
1137 error = ravb_phy_init(ndev);
1138 if (error)
1139 return error;
1140
1141 phy_start(ndev->phydev);
1142
1143 return 0;
1144}
1145
1146static u32 ravb_get_msglevel(struct net_device *ndev)
1147{
1148 struct ravb_private *priv = netdev_priv(ndev);
1149
1150 return priv->msg_enable;
1151}
1152
1153static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1154{
1155 struct ravb_private *priv = netdev_priv(ndev);
1156
1157 priv->msg_enable = value;
1158}
1159
1160static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1161 "rx_queue_0_current",
1162 "tx_queue_0_current",
1163 "rx_queue_0_dirty",
1164 "tx_queue_0_dirty",
1165 "rx_queue_0_packets",
1166 "tx_queue_0_packets",
1167 "rx_queue_0_bytes",
1168 "tx_queue_0_bytes",
1169 "rx_queue_0_mcast_packets",
1170 "rx_queue_0_errors",
1171 "rx_queue_0_crc_errors",
1172 "rx_queue_0_frame_errors",
1173 "rx_queue_0_length_errors",
1174 "rx_queue_0_missed_errors",
1175 "rx_queue_0_over_errors",
1176
1177 "rx_queue_1_current",
1178 "tx_queue_1_current",
1179 "rx_queue_1_dirty",
1180 "tx_queue_1_dirty",
1181 "rx_queue_1_packets",
1182 "tx_queue_1_packets",
1183 "rx_queue_1_bytes",
1184 "tx_queue_1_bytes",
1185 "rx_queue_1_mcast_packets",
1186 "rx_queue_1_errors",
1187 "rx_queue_1_crc_errors",
1188 "rx_queue_1_frame_errors",
1189 "rx_queue_1_length_errors",
1190 "rx_queue_1_missed_errors",
1191 "rx_queue_1_over_errors",
1192};
1193
1194static int ravb_get_sset_count(struct net_device *netdev, int sset)
1195{
1196 struct ravb_private *priv = netdev_priv(netdev);
1197 const struct ravb_hw_info *info = priv->info;
1198
1199 switch (sset) {
1200 case ETH_SS_STATS:
1201 return info->stats_len;
1202 default:
1203 return -EOPNOTSUPP;
1204 }
1205}
1206
1207static void ravb_get_ethtool_stats(struct net_device *ndev,
1208 struct ethtool_stats *estats, u64 *data)
1209{
1210 struct ravb_private *priv = netdev_priv(ndev);
1211 int i = 0;
1212 int q;
1213
1214
1215 for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1216 struct net_device_stats *stats = &priv->stats[q];
1217
1218 data[i++] = priv->cur_rx[q];
1219 data[i++] = priv->cur_tx[q];
1220 data[i++] = priv->dirty_rx[q];
1221 data[i++] = priv->dirty_tx[q];
1222 data[i++] = stats->rx_packets;
1223 data[i++] = stats->tx_packets;
1224 data[i++] = stats->rx_bytes;
1225 data[i++] = stats->tx_bytes;
1226 data[i++] = stats->multicast;
1227 data[i++] = stats->rx_errors;
1228 data[i++] = stats->rx_crc_errors;
1229 data[i++] = stats->rx_frame_errors;
1230 data[i++] = stats->rx_length_errors;
1231 data[i++] = stats->rx_missed_errors;
1232 data[i++] = stats->rx_over_errors;
1233 }
1234}
1235
1236static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1237{
1238 struct ravb_private *priv = netdev_priv(ndev);
1239 const struct ravb_hw_info *info = priv->info;
1240
1241 switch (stringset) {
1242 case ETH_SS_STATS:
1243 memcpy(data, info->gstrings_stats, info->gstrings_size);
1244 break;
1245 }
1246}
1247
1248static void ravb_get_ringparam(struct net_device *ndev,
1249 struct ethtool_ringparam *ring)
1250{
1251 struct ravb_private *priv = netdev_priv(ndev);
1252
1253 ring->rx_max_pending = BE_RX_RING_MAX;
1254 ring->tx_max_pending = BE_TX_RING_MAX;
1255 ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1256 ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1257}
1258
1259static int ravb_set_ringparam(struct net_device *ndev,
1260 struct ethtool_ringparam *ring)
1261{
1262 struct ravb_private *priv = netdev_priv(ndev);
1263 const struct ravb_hw_info *info = priv->info;
1264 int error;
1265
1266 if (ring->tx_pending > BE_TX_RING_MAX ||
1267 ring->rx_pending > BE_RX_RING_MAX ||
1268 ring->tx_pending < BE_TX_RING_MIN ||
1269 ring->rx_pending < BE_RX_RING_MIN)
1270 return -EINVAL;
1271 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1272 return -EINVAL;
1273
1274 if (netif_running(ndev)) {
1275 netif_device_detach(ndev);
1276
1277 if (info->no_ptp_cfg_active)
1278 ravb_ptp_stop(ndev);
1279
1280 error = ravb_stop_dma(ndev);
1281 if (error) {
1282 netdev_err(ndev,
1283 "cannot set ringparam! Any AVB processes are still running?\n");
1284 return error;
1285 }
1286 synchronize_irq(ndev->irq);
1287
1288
1289 ravb_ring_free(ndev, RAVB_BE);
1290 ravb_ring_free(ndev, RAVB_NC);
1291 }
1292
1293
1294 priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1295 priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1296
1297 if (netif_running(ndev)) {
1298 error = ravb_dmac_init(ndev);
1299 if (error) {
1300 netdev_err(ndev,
1301 "%s: ravb_dmac_init() failed, error %d\n",
1302 __func__, error);
1303 return error;
1304 }
1305
1306 ravb_emac_init(ndev);
1307
1308
1309 if (info->no_ptp_cfg_active)
1310 ravb_ptp_init(ndev, priv->pdev);
1311
1312 netif_device_attach(ndev);
1313 }
1314
1315 return 0;
1316}
1317
1318static int ravb_get_ts_info(struct net_device *ndev,
1319 struct ethtool_ts_info *info)
1320{
1321 struct ravb_private *priv = netdev_priv(ndev);
1322
1323 info->so_timestamping =
1324 SOF_TIMESTAMPING_TX_SOFTWARE |
1325 SOF_TIMESTAMPING_RX_SOFTWARE |
1326 SOF_TIMESTAMPING_SOFTWARE |
1327 SOF_TIMESTAMPING_TX_HARDWARE |
1328 SOF_TIMESTAMPING_RX_HARDWARE |
1329 SOF_TIMESTAMPING_RAW_HARDWARE;
1330 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1331 info->rx_filters =
1332 (1 << HWTSTAMP_FILTER_NONE) |
1333 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1334 (1 << HWTSTAMP_FILTER_ALL);
1335 info->phc_index = ptp_clock_index(priv->ptp.clock);
1336
1337 return 0;
1338}
1339
1340static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1341{
1342 struct ravb_private *priv = netdev_priv(ndev);
1343
1344 wol->supported = WAKE_MAGIC;
1345 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1346}
1347
1348static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1349{
1350 struct ravb_private *priv = netdev_priv(ndev);
1351
1352 if (wol->wolopts & ~WAKE_MAGIC)
1353 return -EOPNOTSUPP;
1354
1355 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1356
1357 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1358
1359 return 0;
1360}
1361
1362static const struct ethtool_ops ravb_ethtool_ops = {
1363 .nway_reset = phy_ethtool_nway_reset,
1364 .get_msglevel = ravb_get_msglevel,
1365 .set_msglevel = ravb_set_msglevel,
1366 .get_link = ethtool_op_get_link,
1367 .get_strings = ravb_get_strings,
1368 .get_ethtool_stats = ravb_get_ethtool_stats,
1369 .get_sset_count = ravb_get_sset_count,
1370 .get_ringparam = ravb_get_ringparam,
1371 .set_ringparam = ravb_set_ringparam,
1372 .get_ts_info = ravb_get_ts_info,
1373 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1374 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1375 .get_wol = ravb_get_wol,
1376 .set_wol = ravb_set_wol,
1377};
1378
1379static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1380 struct net_device *ndev, struct device *dev,
1381 const char *ch)
1382{
1383 char *name;
1384 int error;
1385
1386 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1387 if (!name)
1388 return -ENOMEM;
1389 error = request_irq(irq, handler, 0, name, ndev);
1390 if (error)
1391 netdev_err(ndev, "cannot request IRQ %s\n", name);
1392
1393 return error;
1394}
1395
1396
1397static int ravb_open(struct net_device *ndev)
1398{
1399 struct ravb_private *priv = netdev_priv(ndev);
1400 const struct ravb_hw_info *info = priv->info;
1401 struct platform_device *pdev = priv->pdev;
1402 struct device *dev = &pdev->dev;
1403 int error;
1404
1405 napi_enable(&priv->napi[RAVB_BE]);
1406 napi_enable(&priv->napi[RAVB_NC]);
1407
1408 if (!info->multi_irqs) {
1409 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1410 ndev->name, ndev);
1411 if (error) {
1412 netdev_err(ndev, "cannot request IRQ\n");
1413 goto out_napi_off;
1414 }
1415 } else {
1416 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1417 dev, "ch22:multi");
1418 if (error)
1419 goto out_napi_off;
1420 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1421 dev, "ch24:emac");
1422 if (error)
1423 goto out_free_irq;
1424 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1425 ndev, dev, "ch0:rx_be");
1426 if (error)
1427 goto out_free_irq_emac;
1428 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1429 ndev, dev, "ch18:tx_be");
1430 if (error)
1431 goto out_free_irq_be_rx;
1432 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1433 ndev, dev, "ch1:rx_nc");
1434 if (error)
1435 goto out_free_irq_be_tx;
1436 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1437 ndev, dev, "ch19:tx_nc");
1438 if (error)
1439 goto out_free_irq_nc_rx;
1440 }
1441
1442
1443 error = ravb_dmac_init(ndev);
1444 if (error)
1445 goto out_free_irq_nc_tx;
1446 ravb_emac_init(ndev);
1447
1448
1449 if (info->no_ptp_cfg_active)
1450 ravb_ptp_init(ndev, priv->pdev);
1451
1452 netif_tx_start_all_queues(ndev);
1453
1454
1455 error = ravb_phy_start(ndev);
1456 if (error)
1457 goto out_ptp_stop;
1458
1459 return 0;
1460
1461out_ptp_stop:
1462
1463 if (info->no_ptp_cfg_active)
1464 ravb_ptp_stop(ndev);
1465out_free_irq_nc_tx:
1466 if (!info->multi_irqs)
1467 goto out_free_irq;
1468 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1469out_free_irq_nc_rx:
1470 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1471out_free_irq_be_tx:
1472 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1473out_free_irq_be_rx:
1474 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1475out_free_irq_emac:
1476 free_irq(priv->emac_irq, ndev);
1477out_free_irq:
1478 free_irq(ndev->irq, ndev);
1479out_napi_off:
1480 napi_disable(&priv->napi[RAVB_NC]);
1481 napi_disable(&priv->napi[RAVB_BE]);
1482 return error;
1483}
1484
1485
1486static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1487{
1488 struct ravb_private *priv = netdev_priv(ndev);
1489
1490 netif_err(priv, tx_err, ndev,
1491 "transmit timed out, status %08x, resetting...\n",
1492 ravb_read(ndev, ISS));
1493
1494
1495 ndev->stats.tx_errors++;
1496
1497 schedule_work(&priv->work);
1498}
1499
1500static void ravb_tx_timeout_work(struct work_struct *work)
1501{
1502 struct ravb_private *priv = container_of(work, struct ravb_private,
1503 work);
1504 const struct ravb_hw_info *info = priv->info;
1505 struct net_device *ndev = priv->ndev;
1506 int error;
1507
1508 netif_tx_stop_all_queues(ndev);
1509
1510
1511 if (info->no_ptp_cfg_active)
1512 ravb_ptp_stop(ndev);
1513
1514
1515 if (ravb_stop_dma(ndev)) {
1516
1517
1518
1519
1520
1521
1522
1523
1524 ravb_rcv_snd_enable(ndev);
1525 goto out;
1526 }
1527
1528 ravb_ring_free(ndev, RAVB_BE);
1529 ravb_ring_free(ndev, RAVB_NC);
1530
1531
1532 error = ravb_dmac_init(ndev);
1533 if (error) {
1534
1535
1536
1537
1538 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1539 __func__, error);
1540 return;
1541 }
1542 ravb_emac_init(ndev);
1543
1544out:
1545
1546 if (info->no_ptp_cfg_active)
1547 ravb_ptp_init(ndev, priv->pdev);
1548
1549 netif_tx_start_all_queues(ndev);
1550}
1551
1552
1553static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1554{
1555 struct ravb_private *priv = netdev_priv(ndev);
1556 unsigned int num_tx_desc = priv->num_tx_desc;
1557 u16 q = skb_get_queue_mapping(skb);
1558 struct ravb_tstamp_skb *ts_skb;
1559 struct ravb_tx_desc *desc;
1560 unsigned long flags;
1561 u32 dma_addr;
1562 void *buffer;
1563 u32 entry;
1564 u32 len;
1565
1566 spin_lock_irqsave(&priv->lock, flags);
1567 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1568 num_tx_desc) {
1569 netif_err(priv, tx_queued, ndev,
1570 "still transmitting with the full ring!\n");
1571 netif_stop_subqueue(ndev, q);
1572 spin_unlock_irqrestore(&priv->lock, flags);
1573 return NETDEV_TX_BUSY;
1574 }
1575
1576 if (skb_put_padto(skb, ETH_ZLEN))
1577 goto exit;
1578
1579 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1580 priv->tx_skb[q][entry / num_tx_desc] = skb;
1581
1582 if (num_tx_desc > 1) {
1583 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1584 entry / num_tx_desc * DPTR_ALIGN;
1585 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599 if (len == 0)
1600 len = DPTR_ALIGN;
1601
1602 memcpy(buffer, skb->data, len);
1603 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1604 DMA_TO_DEVICE);
1605 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1606 goto drop;
1607
1608 desc = &priv->tx_ring[q][entry];
1609 desc->ds_tagl = cpu_to_le16(len);
1610 desc->dptr = cpu_to_le32(dma_addr);
1611
1612 buffer = skb->data + len;
1613 len = skb->len - len;
1614 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1615 DMA_TO_DEVICE);
1616 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1617 goto unmap;
1618
1619 desc++;
1620 } else {
1621 desc = &priv->tx_ring[q][entry];
1622 len = skb->len;
1623 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1624 DMA_TO_DEVICE);
1625 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1626 goto drop;
1627 }
1628 desc->ds_tagl = cpu_to_le16(len);
1629 desc->dptr = cpu_to_le32(dma_addr);
1630
1631
1632 if (q == RAVB_NC) {
1633 ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1634 if (!ts_skb) {
1635 if (num_tx_desc > 1) {
1636 desc--;
1637 dma_unmap_single(ndev->dev.parent, dma_addr,
1638 len, DMA_TO_DEVICE);
1639 }
1640 goto unmap;
1641 }
1642 ts_skb->skb = skb_get(skb);
1643 ts_skb->tag = priv->ts_skb_tag++;
1644 priv->ts_skb_tag &= 0x3ff;
1645 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1646
1647
1648 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1649 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1650 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
1651 }
1652
1653 skb_tx_timestamp(skb);
1654
1655 dma_wmb();
1656 if (num_tx_desc > 1) {
1657 desc->die_dt = DT_FEND;
1658 desc--;
1659 desc->die_dt = DT_FSTART;
1660 } else {
1661 desc->die_dt = DT_FSINGLE;
1662 }
1663 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
1664
1665 priv->cur_tx[q] += num_tx_desc;
1666 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1667 (priv->num_tx_ring[q] - 1) * num_tx_desc &&
1668 !ravb_tx_free(ndev, q, true))
1669 netif_stop_subqueue(ndev, q);
1670
1671exit:
1672 spin_unlock_irqrestore(&priv->lock, flags);
1673 return NETDEV_TX_OK;
1674
1675unmap:
1676 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
1677 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
1678drop:
1679 dev_kfree_skb_any(skb);
1680 priv->tx_skb[q][entry / num_tx_desc] = NULL;
1681 goto exit;
1682}
1683
1684static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1685 struct net_device *sb_dev)
1686{
1687
1688 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1689 RAVB_BE;
1690
1691}
1692
1693static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1694{
1695 struct ravb_private *priv = netdev_priv(ndev);
1696 const struct ravb_hw_info *info = priv->info;
1697 struct net_device_stats *nstats, *stats0, *stats1;
1698
1699 nstats = &ndev->stats;
1700 stats0 = &priv->stats[RAVB_BE];
1701 stats1 = &priv->stats[RAVB_NC];
1702
1703 if (info->tx_counters) {
1704 nstats->tx_dropped += ravb_read(ndev, TROCR);
1705 ravb_write(ndev, 0, TROCR);
1706 }
1707
1708 nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1709 nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1710 nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1711 nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1712 nstats->multicast = stats0->multicast + stats1->multicast;
1713 nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1714 nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1715 nstats->rx_frame_errors =
1716 stats0->rx_frame_errors + stats1->rx_frame_errors;
1717 nstats->rx_length_errors =
1718 stats0->rx_length_errors + stats1->rx_length_errors;
1719 nstats->rx_missed_errors =
1720 stats0->rx_missed_errors + stats1->rx_missed_errors;
1721 nstats->rx_over_errors =
1722 stats0->rx_over_errors + stats1->rx_over_errors;
1723
1724 return nstats;
1725}
1726
1727
1728static void ravb_set_rx_mode(struct net_device *ndev)
1729{
1730 struct ravb_private *priv = netdev_priv(ndev);
1731 unsigned long flags;
1732
1733 spin_lock_irqsave(&priv->lock, flags);
1734 ravb_modify(ndev, ECMR, ECMR_PRM,
1735 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
1736 spin_unlock_irqrestore(&priv->lock, flags);
1737}
1738
1739
1740static int ravb_close(struct net_device *ndev)
1741{
1742 struct device_node *np = ndev->dev.parent->of_node;
1743 struct ravb_private *priv = netdev_priv(ndev);
1744 const struct ravb_hw_info *info = priv->info;
1745 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1746
1747 netif_tx_stop_all_queues(ndev);
1748
1749
1750 ravb_write(ndev, 0, RIC0);
1751 ravb_write(ndev, 0, RIC2);
1752 ravb_write(ndev, 0, TIC);
1753
1754
1755 if (info->no_ptp_cfg_active)
1756 ravb_ptp_stop(ndev);
1757
1758
1759 if (ravb_stop_dma(ndev) < 0)
1760 netdev_err(ndev,
1761 "device will be stopped after h/w processes are done.\n");
1762
1763
1764 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1765 list_del(&ts_skb->list);
1766 kfree_skb(ts_skb->skb);
1767 kfree(ts_skb);
1768 }
1769
1770
1771 if (ndev->phydev) {
1772 phy_stop(ndev->phydev);
1773 phy_disconnect(ndev->phydev);
1774 if (of_phy_is_fixed_link(np))
1775 of_phy_deregister_fixed_link(np);
1776 }
1777
1778 if (info->multi_irqs) {
1779 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1780 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1781 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1782 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1783 free_irq(priv->emac_irq, ndev);
1784 }
1785 free_irq(ndev->irq, ndev);
1786
1787 napi_disable(&priv->napi[RAVB_NC]);
1788 napi_disable(&priv->napi[RAVB_BE]);
1789
1790
1791 ravb_ring_free(ndev, RAVB_BE);
1792 ravb_ring_free(ndev, RAVB_NC);
1793
1794 return 0;
1795}
1796
1797static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1798{
1799 struct ravb_private *priv = netdev_priv(ndev);
1800 struct hwtstamp_config config;
1801
1802 config.flags = 0;
1803 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1804 HWTSTAMP_TX_OFF;
1805 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
1806 case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
1807 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1808 break;
1809 case RAVB_RXTSTAMP_TYPE_ALL:
1810 config.rx_filter = HWTSTAMP_FILTER_ALL;
1811 break;
1812 default:
1813 config.rx_filter = HWTSTAMP_FILTER_NONE;
1814 }
1815
1816 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1817 -EFAULT : 0;
1818}
1819
1820
1821static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1822{
1823 struct ravb_private *priv = netdev_priv(ndev);
1824 struct hwtstamp_config config;
1825 u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1826 u32 tstamp_tx_ctrl;
1827
1828 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1829 return -EFAULT;
1830
1831
1832 if (config.flags)
1833 return -EINVAL;
1834
1835 switch (config.tx_type) {
1836 case HWTSTAMP_TX_OFF:
1837 tstamp_tx_ctrl = 0;
1838 break;
1839 case HWTSTAMP_TX_ON:
1840 tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1841 break;
1842 default:
1843 return -ERANGE;
1844 }
1845
1846 switch (config.rx_filter) {
1847 case HWTSTAMP_FILTER_NONE:
1848 tstamp_rx_ctrl = 0;
1849 break;
1850 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1851 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1852 break;
1853 default:
1854 config.rx_filter = HWTSTAMP_FILTER_ALL;
1855 tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1856 }
1857
1858 priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1859 priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1860
1861 return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1862 -EFAULT : 0;
1863}
1864
1865
1866static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1867{
1868 struct phy_device *phydev = ndev->phydev;
1869
1870 if (!netif_running(ndev))
1871 return -EINVAL;
1872
1873 if (!phydev)
1874 return -ENODEV;
1875
1876 switch (cmd) {
1877 case SIOCGHWTSTAMP:
1878 return ravb_hwtstamp_get(ndev, req);
1879 case SIOCSHWTSTAMP:
1880 return ravb_hwtstamp_set(ndev, req);
1881 }
1882
1883 return phy_mii_ioctl(phydev, req, cmd);
1884}
1885
1886static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
1887{
1888 struct ravb_private *priv = netdev_priv(ndev);
1889
1890 ndev->mtu = new_mtu;
1891
1892 if (netif_running(ndev)) {
1893 synchronize_irq(priv->emac_irq);
1894 ravb_emac_init(ndev);
1895 }
1896
1897 netdev_update_features(ndev);
1898
1899 return 0;
1900}
1901
1902static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
1903{
1904 struct ravb_private *priv = netdev_priv(ndev);
1905 unsigned long flags;
1906
1907 spin_lock_irqsave(&priv->lock, flags);
1908
1909
1910 ravb_rcv_snd_disable(ndev);
1911
1912
1913 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
1914
1915
1916 ravb_rcv_snd_enable(ndev);
1917
1918 spin_unlock_irqrestore(&priv->lock, flags);
1919}
1920
1921static int ravb_set_features_rx_csum(struct net_device *ndev,
1922 netdev_features_t features)
1923{
1924 netdev_features_t changed = ndev->features ^ features;
1925
1926 if (changed & NETIF_F_RXCSUM)
1927 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
1928
1929 ndev->features = features;
1930
1931 return 0;
1932}
1933
1934static int ravb_set_features(struct net_device *ndev,
1935 netdev_features_t features)
1936{
1937 struct ravb_private *priv = netdev_priv(ndev);
1938 const struct ravb_hw_info *info = priv->info;
1939
1940 return info->set_rx_csum_feature(ndev, features);
1941}
1942
1943static const struct net_device_ops ravb_netdev_ops = {
1944 .ndo_open = ravb_open,
1945 .ndo_stop = ravb_close,
1946 .ndo_start_xmit = ravb_start_xmit,
1947 .ndo_select_queue = ravb_select_queue,
1948 .ndo_get_stats = ravb_get_stats,
1949 .ndo_set_rx_mode = ravb_set_rx_mode,
1950 .ndo_tx_timeout = ravb_tx_timeout,
1951 .ndo_eth_ioctl = ravb_do_ioctl,
1952 .ndo_change_mtu = ravb_change_mtu,
1953 .ndo_validate_addr = eth_validate_addr,
1954 .ndo_set_mac_address = eth_mac_addr,
1955 .ndo_set_features = ravb_set_features,
1956};
1957
1958
1959static int ravb_mdio_init(struct ravb_private *priv)
1960{
1961 struct platform_device *pdev = priv->pdev;
1962 struct device *dev = &pdev->dev;
1963 int error;
1964
1965
1966 priv->mdiobb.ops = &bb_ops;
1967
1968
1969 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1970 if (!priv->mii_bus)
1971 return -ENOMEM;
1972
1973
1974 priv->mii_bus->name = "ravb_mii";
1975 priv->mii_bus->parent = dev;
1976 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1977 pdev->name, pdev->id);
1978
1979
1980 error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1981 if (error)
1982 goto out_free_bus;
1983
1984 return 0;
1985
1986out_free_bus:
1987 free_mdio_bitbang(priv->mii_bus);
1988 return error;
1989}
1990
1991
1992static int ravb_mdio_release(struct ravb_private *priv)
1993{
1994
1995 mdiobus_unregister(priv->mii_bus);
1996
1997
1998 free_mdio_bitbang(priv->mii_bus);
1999
2000 return 0;
2001}
2002
2003static const struct ravb_hw_info ravb_gen3_hw_info = {
2004 .rx_ring_free = ravb_rx_ring_free,
2005 .rx_ring_format = ravb_rx_ring_format,
2006 .alloc_rx_desc = ravb_alloc_rx_desc,
2007 .receive = ravb_rcar_rx,
2008 .set_rate = ravb_set_rate,
2009 .set_rx_csum_feature = ravb_set_features_rx_csum,
2010 .dmac_init = ravb_rcar_dmac_init,
2011 .emac_init = ravb_rcar_emac_init,
2012 .gstrings_stats = ravb_gstrings_stats,
2013 .gstrings_size = sizeof(ravb_gstrings_stats),
2014 .net_hw_features = NETIF_F_RXCSUM,
2015 .net_features = NETIF_F_RXCSUM,
2016 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2017 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2018 .internal_delay = 1,
2019 .tx_counters = 1,
2020 .multi_irqs = 1,
2021 .ptp_cfg_active = 1,
2022};
2023
2024static const struct ravb_hw_info ravb_gen2_hw_info = {
2025 .rx_ring_free = ravb_rx_ring_free,
2026 .rx_ring_format = ravb_rx_ring_format,
2027 .alloc_rx_desc = ravb_alloc_rx_desc,
2028 .receive = ravb_rcar_rx,
2029 .set_rate = ravb_set_rate,
2030 .set_rx_csum_feature = ravb_set_features_rx_csum,
2031 .dmac_init = ravb_rcar_dmac_init,
2032 .emac_init = ravb_rcar_emac_init,
2033 .gstrings_stats = ravb_gstrings_stats,
2034 .gstrings_size = sizeof(ravb_gstrings_stats),
2035 .net_hw_features = NETIF_F_RXCSUM,
2036 .net_features = NETIF_F_RXCSUM,
2037 .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2038 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2039 .aligned_tx = 1,
2040 .no_ptp_cfg_active = 1,
2041};
2042
2043static const struct of_device_id ravb_match_table[] = {
2044 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2045 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2046 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2047 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2048 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2049 { }
2050};
2051MODULE_DEVICE_TABLE(of, ravb_match_table);
2052
2053static int ravb_set_gti(struct net_device *ndev)
2054{
2055 struct ravb_private *priv = netdev_priv(ndev);
2056 struct device *dev = ndev->dev.parent;
2057 unsigned long rate;
2058 uint64_t inc;
2059
2060 rate = clk_get_rate(priv->clk);
2061 if (!rate)
2062 return -EINVAL;
2063
2064 inc = 1000000000ULL << 20;
2065 do_div(inc, rate);
2066
2067 if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
2068 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
2069 inc, GTI_TIV_MIN, GTI_TIV_MAX);
2070 return -EINVAL;
2071 }
2072
2073 ravb_write(ndev, inc, GTI);
2074
2075 return 0;
2076}
2077
2078static void ravb_set_config_mode(struct net_device *ndev)
2079{
2080 struct ravb_private *priv = netdev_priv(ndev);
2081 const struct ravb_hw_info *info = priv->info;
2082
2083 if (info->no_ptp_cfg_active) {
2084 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
2085
2086 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
2087 } else {
2088 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
2089 CCC_GAC | CCC_CSEL_HPB);
2090 }
2091}
2092
2093
2094static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
2095{
2096 struct ravb_private *priv = netdev_priv(ndev);
2097 bool explicit_delay = false;
2098 u32 delay;
2099
2100 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
2101
2102 priv->rxcidm = !!delay;
2103 explicit_delay = true;
2104 }
2105 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
2106
2107 priv->txcidm = !!delay;
2108 explicit_delay = true;
2109 }
2110
2111 if (explicit_delay)
2112 return;
2113
2114
2115 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2116 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2117 priv->rxcidm = 1;
2118 priv->rgmii_override = 1;
2119 }
2120
2121 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2122 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2123 priv->txcidm = 1;
2124 priv->rgmii_override = 1;
2125 }
2126}
2127
2128static void ravb_set_delay_mode(struct net_device *ndev)
2129{
2130 struct ravb_private *priv = netdev_priv(ndev);
2131 u32 set = 0;
2132
2133 if (priv->rxcidm)
2134 set |= APSR_RDM;
2135 if (priv->txcidm)
2136 set |= APSR_TDM;
2137 ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
2138}
2139
2140static int ravb_probe(struct platform_device *pdev)
2141{
2142 struct device_node *np = pdev->dev.of_node;
2143 const struct ravb_hw_info *info;
2144 struct reset_control *rstc;
2145 struct ravb_private *priv;
2146 struct net_device *ndev;
2147 int error, irq, q;
2148 struct resource *res;
2149 int i;
2150
2151 if (!np) {
2152 dev_err(&pdev->dev,
2153 "this driver is required to be instantiated from device tree\n");
2154 return -EINVAL;
2155 }
2156
2157 rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
2158 if (IS_ERR(rstc))
2159 return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2160 "failed to get cpg reset\n");
2161
2162 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2163 NUM_TX_QUEUE, NUM_RX_QUEUE);
2164 if (!ndev)
2165 return -ENOMEM;
2166
2167 info = of_device_get_match_data(&pdev->dev);
2168
2169 ndev->features = info->net_features;
2170 ndev->hw_features = info->net_hw_features;
2171
2172 reset_control_deassert(rstc);
2173 pm_runtime_enable(&pdev->dev);
2174 pm_runtime_get_sync(&pdev->dev);
2175
2176 if (info->multi_irqs)
2177 irq = platform_get_irq_byname(pdev, "ch22");
2178 else
2179 irq = platform_get_irq(pdev, 0);
2180 if (irq < 0) {
2181 error = irq;
2182 goto out_release;
2183 }
2184 ndev->irq = irq;
2185
2186 SET_NETDEV_DEV(ndev, &pdev->dev);
2187
2188 priv = netdev_priv(ndev);
2189 priv->info = info;
2190 priv->rstc = rstc;
2191 priv->ndev = ndev;
2192 priv->pdev = pdev;
2193 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2194 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2195 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2196 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2197 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2198 if (IS_ERR(priv->addr)) {
2199 error = PTR_ERR(priv->addr);
2200 goto out_release;
2201 }
2202
2203
2204 ndev->base_addr = res->start;
2205
2206 spin_lock_init(&priv->lock);
2207 INIT_WORK(&priv->work, ravb_tx_timeout_work);
2208
2209 error = of_get_phy_mode(np, &priv->phy_interface);
2210 if (error && error != -ENODEV)
2211 goto out_release;
2212
2213 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2214 priv->avb_link_active_low =
2215 of_property_read_bool(np, "renesas,ether-link-active-low");
2216
2217 if (info->multi_irqs) {
2218 irq = platform_get_irq_byname(pdev, "ch24");
2219 if (irq < 0) {
2220 error = irq;
2221 goto out_release;
2222 }
2223 priv->emac_irq = irq;
2224 for (i = 0; i < NUM_RX_QUEUE; i++) {
2225 irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2226 if (irq < 0) {
2227 error = irq;
2228 goto out_release;
2229 }
2230 priv->rx_irqs[i] = irq;
2231 }
2232 for (i = 0; i < NUM_TX_QUEUE; i++) {
2233 irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2234 if (irq < 0) {
2235 error = irq;
2236 goto out_release;
2237 }
2238 priv->tx_irqs[i] = irq;
2239 }
2240 }
2241
2242 priv->clk = devm_clk_get(&pdev->dev, NULL);
2243 if (IS_ERR(priv->clk)) {
2244 error = PTR_ERR(priv->clk);
2245 goto out_release;
2246 }
2247
2248 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2249 if (IS_ERR(priv->refclk)) {
2250 error = PTR_ERR(priv->refclk);
2251 goto out_release;
2252 }
2253 clk_prepare_enable(priv->refclk);
2254
2255 ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2256 ndev->min_mtu = ETH_MIN_MTU;
2257
2258
2259
2260
2261
2262
2263 priv->num_tx_desc = info->aligned_tx ? 2 : 1;
2264
2265
2266 ndev->netdev_ops = &ravb_netdev_ops;
2267 ndev->ethtool_ops = &ravb_ethtool_ops;
2268
2269
2270 ravb_set_config_mode(ndev);
2271
2272
2273 error = ravb_set_gti(ndev);
2274 if (error)
2275 goto out_disable_refclk;
2276
2277
2278 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2279
2280 if (info->internal_delay) {
2281 ravb_parse_delay_mode(np, ndev);
2282 ravb_set_delay_mode(ndev);
2283 }
2284
2285
2286 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2287 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2288 &priv->desc_bat_dma, GFP_KERNEL);
2289 if (!priv->desc_bat) {
2290 dev_err(&pdev->dev,
2291 "Cannot allocate desc base address table (size %d bytes)\n",
2292 priv->desc_bat_size);
2293 error = -ENOMEM;
2294 goto out_disable_refclk;
2295 }
2296 for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2297 priv->desc_bat[q].die_dt = DT_EOS;
2298 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2299
2300
2301 INIT_LIST_HEAD(&priv->ts_skb_list);
2302
2303
2304 if (info->ptp_cfg_active)
2305 ravb_ptp_init(ndev, pdev);
2306
2307
2308 priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2309
2310
2311 ravb_read_mac_address(np, ndev);
2312 if (!is_valid_ether_addr(ndev->dev_addr)) {
2313 dev_warn(&pdev->dev,
2314 "no valid MAC address supplied, using a random one\n");
2315 eth_hw_addr_random(ndev);
2316 }
2317
2318
2319 error = ravb_mdio_init(priv);
2320 if (error) {
2321 dev_err(&pdev->dev, "failed to initialize MDIO\n");
2322 goto out_dma_free;
2323 }
2324
2325 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
2326 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
2327
2328
2329 error = register_netdev(ndev);
2330 if (error)
2331 goto out_napi_del;
2332
2333 device_set_wakeup_capable(&pdev->dev, 1);
2334
2335
2336 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2337 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2338
2339 platform_set_drvdata(pdev, ndev);
2340
2341 return 0;
2342
2343out_napi_del:
2344 netif_napi_del(&priv->napi[RAVB_NC]);
2345 netif_napi_del(&priv->napi[RAVB_BE]);
2346 ravb_mdio_release(priv);
2347out_dma_free:
2348 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2349 priv->desc_bat_dma);
2350
2351
2352 if (info->ptp_cfg_active)
2353 ravb_ptp_stop(ndev);
2354out_disable_refclk:
2355 clk_disable_unprepare(priv->refclk);
2356out_release:
2357 free_netdev(ndev);
2358
2359 pm_runtime_put(&pdev->dev);
2360 pm_runtime_disable(&pdev->dev);
2361 reset_control_assert(rstc);
2362 return error;
2363}
2364
2365static int ravb_remove(struct platform_device *pdev)
2366{
2367 struct net_device *ndev = platform_get_drvdata(pdev);
2368 struct ravb_private *priv = netdev_priv(ndev);
2369 const struct ravb_hw_info *info = priv->info;
2370
2371
2372 if (info->ptp_cfg_active)
2373 ravb_ptp_stop(ndev);
2374
2375 clk_disable_unprepare(priv->refclk);
2376
2377 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2378 priv->desc_bat_dma);
2379
2380 ravb_write(ndev, CCC_OPC_RESET, CCC);
2381 pm_runtime_put_sync(&pdev->dev);
2382 unregister_netdev(ndev);
2383 netif_napi_del(&priv->napi[RAVB_NC]);
2384 netif_napi_del(&priv->napi[RAVB_BE]);
2385 ravb_mdio_release(priv);
2386 pm_runtime_disable(&pdev->dev);
2387 reset_control_assert(priv->rstc);
2388 free_netdev(ndev);
2389 platform_set_drvdata(pdev, NULL);
2390
2391 return 0;
2392}
2393
2394static int ravb_wol_setup(struct net_device *ndev)
2395{
2396 struct ravb_private *priv = netdev_priv(ndev);
2397
2398
2399 ravb_write(ndev, 0, RIC0);
2400 ravb_write(ndev, 0, RIC2);
2401 ravb_write(ndev, 0, TIC);
2402
2403
2404 synchronize_irq(priv->emac_irq);
2405 napi_disable(&priv->napi[RAVB_NC]);
2406 napi_disable(&priv->napi[RAVB_BE]);
2407 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2408
2409
2410 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2411
2412 return enable_irq_wake(priv->emac_irq);
2413}
2414
2415static int ravb_wol_restore(struct net_device *ndev)
2416{
2417 struct ravb_private *priv = netdev_priv(ndev);
2418 int ret;
2419
2420 napi_enable(&priv->napi[RAVB_NC]);
2421 napi_enable(&priv->napi[RAVB_BE]);
2422
2423
2424 ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2425
2426 ret = ravb_close(ndev);
2427 if (ret < 0)
2428 return ret;
2429
2430 return disable_irq_wake(priv->emac_irq);
2431}
2432
2433static int __maybe_unused ravb_suspend(struct device *dev)
2434{
2435 struct net_device *ndev = dev_get_drvdata(dev);
2436 struct ravb_private *priv = netdev_priv(ndev);
2437 int ret;
2438
2439 if (!netif_running(ndev))
2440 return 0;
2441
2442 netif_device_detach(ndev);
2443
2444 if (priv->wol_enabled)
2445 ret = ravb_wol_setup(ndev);
2446 else
2447 ret = ravb_close(ndev);
2448
2449 return ret;
2450}
2451
2452static int __maybe_unused ravb_resume(struct device *dev)
2453{
2454 struct net_device *ndev = dev_get_drvdata(dev);
2455 struct ravb_private *priv = netdev_priv(ndev);
2456 const struct ravb_hw_info *info = priv->info;
2457 int ret = 0;
2458
2459
2460 if (priv->wol_enabled)
2461 ravb_write(ndev, CCC_OPC_RESET, CCC);
2462
2463
2464
2465
2466
2467
2468
2469 ravb_set_config_mode(ndev);
2470
2471
2472 ret = ravb_set_gti(ndev);
2473 if (ret)
2474 return ret;
2475
2476
2477 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2478
2479 if (info->internal_delay)
2480 ravb_set_delay_mode(ndev);
2481
2482
2483 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2484
2485 if (netif_running(ndev)) {
2486 if (priv->wol_enabled) {
2487 ret = ravb_wol_restore(ndev);
2488 if (ret)
2489 return ret;
2490 }
2491 ret = ravb_open(ndev);
2492 if (ret < 0)
2493 return ret;
2494 netif_device_attach(ndev);
2495 }
2496
2497 return ret;
2498}
2499
2500static int __maybe_unused ravb_runtime_nop(struct device *dev)
2501{
2502
2503
2504
2505
2506
2507
2508
2509 return 0;
2510}
2511
2512static const struct dev_pm_ops ravb_dev_pm_ops = {
2513 SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
2514 SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
2515};
2516
2517static struct platform_driver ravb_driver = {
2518 .probe = ravb_probe,
2519 .remove = ravb_remove,
2520 .driver = {
2521 .name = "ravb",
2522 .pm = &ravb_dev_pm_ops,
2523 .of_match_table = ravb_match_table,
2524 },
2525};
2526
2527module_platform_driver(ravb_driver);
2528
2529MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
2530MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
2531MODULE_LICENSE("GPL v2");
2532