1
2
3
4
5
6
7
8
9
10#include <linux/etherdevice.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/interrupt.h>
14#include <linux/clk.h>
15#include <linux/delay.h>
16
17#include <linux/if_vlan.h>
18
19#include <linux/of_net.h>
20#include <linux/of_platform.h>
21
22#include <xway_dma.h>
23
24
25#define XRX200_DMA_DATA_LEN (SZ_64K - 1)
26#define XRX200_DMA_RX 0
27#define XRX200_DMA_TX 1
28#define XRX200_DMA_BURST_LEN 8
29
30#define XRX200_DMA_PACKET_COMPLETE 0
31#define XRX200_DMA_PACKET_IN_PROGRESS 1
32
33
34#define PMAC_RX_IPG 0x0024
35#define PMAC_RX_IPG_MASK 0xf
36
37#define PMAC_HD_CTL 0x0000
38
39#define PMAC_HD_CTL_ADD BIT(0)
40
41#define PMAC_HD_CTL_TAG BIT(1)
42
43#define PMAC_HD_CTL_AC BIT(2)
44
45#define PMAC_HD_CTL_AS BIT(3)
46
47#define PMAC_HD_CTL_RC BIT(4)
48
49#define PMAC_HD_CTL_RL2 BIT(5)
50
51#define PMAC_HD_CTL_RXSH BIT(6)
52
53#define PMAC_HD_CTL_AST BIT(7)
54
55#define PMAC_HD_CTL_RST BIT(8)
56
57#define PMAC_HD_CTL_CCRC BIT(9)
58
59#define PMAC_HD_CTL_FC BIT(10)
60
61struct xrx200_chan {
62 int tx_free;
63
64 struct napi_struct napi;
65 struct ltq_dma_channel dma;
66
67 union {
68 struct sk_buff *skb[LTQ_DESC_NUM];
69 void *rx_buff[LTQ_DESC_NUM];
70 };
71
72 struct sk_buff *skb_head;
73 struct sk_buff *skb_tail;
74
75 struct xrx200_priv *priv;
76};
77
78struct xrx200_priv {
79 struct clk *clk;
80
81 struct xrx200_chan chan_tx;
82 struct xrx200_chan chan_rx;
83
84 u16 rx_buf_size;
85 u16 rx_skb_size;
86
87 struct net_device *net_dev;
88 struct device *dev;
89
90 __iomem void *pmac_reg;
91};
92
93static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
94{
95 return __raw_readl(priv->pmac_reg + offset);
96}
97
98static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
99{
100 __raw_writel(val, priv->pmac_reg + offset);
101}
102
103static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
104 u32 offset)
105{
106 u32 val = xrx200_pmac_r32(priv, offset);
107
108 val &= ~(clear);
109 val |= set;
110 xrx200_pmac_w32(priv, val, offset);
111}
112
113static int xrx200_max_frame_len(int mtu)
114{
115 return VLAN_ETH_HLEN + mtu;
116}
117
118static int xrx200_buffer_size(int mtu)
119{
120 return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
121}
122
123static int xrx200_skb_size(u16 buf_size)
124{
125 return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
126 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
127}
128
129
130static void xrx200_flush_dma(struct xrx200_chan *ch)
131{
132 int i;
133
134 for (i = 0; i < LTQ_DESC_NUM; i++) {
135 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
136
137 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
138 break;
139
140 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
141 ch->priv->rx_buf_size;
142 ch->dma.desc++;
143 ch->dma.desc %= LTQ_DESC_NUM;
144 }
145}
146
147static int xrx200_open(struct net_device *net_dev)
148{
149 struct xrx200_priv *priv = netdev_priv(net_dev);
150
151 napi_enable(&priv->chan_tx.napi);
152 ltq_dma_open(&priv->chan_tx.dma);
153 ltq_dma_enable_irq(&priv->chan_tx.dma);
154
155 napi_enable(&priv->chan_rx.napi);
156 ltq_dma_open(&priv->chan_rx.dma);
157
158
159
160
161
162
163 usleep_range(20, 40);
164 xrx200_flush_dma(&priv->chan_rx);
165 ltq_dma_enable_irq(&priv->chan_rx.dma);
166
167 netif_wake_queue(net_dev);
168
169 return 0;
170}
171
172static int xrx200_close(struct net_device *net_dev)
173{
174 struct xrx200_priv *priv = netdev_priv(net_dev);
175
176 netif_stop_queue(net_dev);
177
178 napi_disable(&priv->chan_rx.napi);
179 ltq_dma_close(&priv->chan_rx.dma);
180
181 napi_disable(&priv->chan_tx.napi);
182 ltq_dma_close(&priv->chan_tx.dma);
183
184 return 0;
185}
186
187static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
188{
189 void *buf = ch->rx_buff[ch->dma.desc];
190 struct xrx200_priv *priv = ch->priv;
191 dma_addr_t mapping;
192 int ret = 0;
193
194 ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
195 if (!ch->rx_buff[ch->dma.desc]) {
196 ret = -ENOMEM;
197 goto skip;
198 }
199
200 mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
201 priv->rx_buf_size, DMA_FROM_DEVICE);
202 if (unlikely(dma_mapping_error(priv->dev, mapping))) {
203 skb_free_frag(ch->rx_buff[ch->dma.desc]);
204 ch->rx_buff[ch->dma.desc] = buf;
205 ret = -ENOMEM;
206 goto skip;
207 }
208
209 ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
210
211 wmb();
212skip:
213 ch->dma.desc_base[ch->dma.desc].ctl =
214 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
215
216 return ret;
217}
218
219static int xrx200_hw_receive(struct xrx200_chan *ch)
220{
221 struct xrx200_priv *priv = ch->priv;
222 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
223 void *buf = ch->rx_buff[ch->dma.desc];
224 u32 ctl = desc->ctl;
225 int len = (ctl & LTQ_DMA_SIZE_MASK);
226 struct net_device *net_dev = priv->net_dev;
227 struct sk_buff *skb;
228 int ret;
229
230 ret = xrx200_alloc_buf(ch, napi_alloc_frag);
231
232 ch->dma.desc++;
233 ch->dma.desc %= LTQ_DESC_NUM;
234
235 if (ret) {
236 net_dev->stats.rx_dropped++;
237 netdev_err(net_dev, "failed to allocate new rx buffer\n");
238 return ret;
239 }
240
241 skb = build_skb(buf, priv->rx_skb_size);
242 skb_reserve(skb, NET_SKB_PAD);
243 skb_put(skb, len);
244
245
246 if (ctl & LTQ_DMA_SOP) {
247 ch->skb_head = skb;
248 ch->skb_tail = skb;
249 skb_reserve(skb, NET_IP_ALIGN);
250 } else if (ch->skb_head) {
251 if (ch->skb_head == ch->skb_tail)
252 skb_shinfo(ch->skb_tail)->frag_list = skb;
253 else
254 ch->skb_tail->next = skb;
255 ch->skb_tail = skb;
256 ch->skb_head->len += skb->len;
257 ch->skb_head->data_len += skb->len;
258 ch->skb_head->truesize += skb->truesize;
259 }
260
261 if (ctl & LTQ_DMA_EOP) {
262 ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
263 net_dev->stats.rx_packets++;
264 net_dev->stats.rx_bytes += ch->skb_head->len;
265 netif_receive_skb(ch->skb_head);
266 ch->skb_head = NULL;
267 ch->skb_tail = NULL;
268 ret = XRX200_DMA_PACKET_COMPLETE;
269 } else {
270 ret = XRX200_DMA_PACKET_IN_PROGRESS;
271 }
272
273 return ret;
274}
275
276static int xrx200_poll_rx(struct napi_struct *napi, int budget)
277{
278 struct xrx200_chan *ch = container_of(napi,
279 struct xrx200_chan, napi);
280 int rx = 0;
281 int ret;
282
283 while (rx < budget) {
284 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
285
286 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
287 ret = xrx200_hw_receive(ch);
288 if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
289 continue;
290 if (ret != XRX200_DMA_PACKET_COMPLETE)
291 return ret;
292 rx++;
293 } else {
294 break;
295 }
296 }
297
298 if (rx < budget) {
299 if (napi_complete_done(&ch->napi, rx))
300 ltq_dma_enable_irq(&ch->dma);
301 }
302
303 return rx;
304}
305
306static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
307{
308 struct xrx200_chan *ch = container_of(napi,
309 struct xrx200_chan, napi);
310 struct net_device *net_dev = ch->priv->net_dev;
311 int pkts = 0;
312 int bytes = 0;
313
314 netif_tx_lock(net_dev);
315 while (pkts < budget) {
316 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
317
318 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
319 struct sk_buff *skb = ch->skb[ch->tx_free];
320
321 pkts++;
322 bytes += skb->len;
323 ch->skb[ch->tx_free] = NULL;
324 consume_skb(skb);
325 memset(&ch->dma.desc_base[ch->tx_free], 0,
326 sizeof(struct ltq_dma_desc));
327 ch->tx_free++;
328 ch->tx_free %= LTQ_DESC_NUM;
329 } else {
330 break;
331 }
332 }
333
334 net_dev->stats.tx_packets += pkts;
335 net_dev->stats.tx_bytes += bytes;
336 netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
337
338 netif_tx_unlock(net_dev);
339 if (netif_queue_stopped(net_dev))
340 netif_wake_queue(net_dev);
341
342 if (pkts < budget) {
343 if (napi_complete_done(&ch->napi, pkts))
344 ltq_dma_enable_irq(&ch->dma);
345 }
346
347 return pkts;
348}
349
350static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
351 struct net_device *net_dev)
352{
353 struct xrx200_priv *priv = netdev_priv(net_dev);
354 struct xrx200_chan *ch = &priv->chan_tx;
355 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
356 u32 byte_offset;
357 dma_addr_t mapping;
358 int len;
359
360 skb->dev = net_dev;
361 if (skb_put_padto(skb, ETH_ZLEN)) {
362 net_dev->stats.tx_dropped++;
363 return NETDEV_TX_OK;
364 }
365
366 len = skb->len;
367
368 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
369 netdev_err(net_dev, "tx ring full\n");
370 netif_stop_queue(net_dev);
371 return NETDEV_TX_BUSY;
372 }
373
374 ch->skb[ch->dma.desc] = skb;
375
376 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
377 if (unlikely(dma_mapping_error(priv->dev, mapping)))
378 goto err_drop;
379
380
381 byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
382
383 desc->addr = mapping - byte_offset;
384
385 wmb();
386 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
387 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
388 ch->dma.desc++;
389 ch->dma.desc %= LTQ_DESC_NUM;
390 if (ch->dma.desc == ch->tx_free)
391 netif_stop_queue(net_dev);
392
393 netdev_sent_queue(net_dev, len);
394
395 return NETDEV_TX_OK;
396
397err_drop:
398 dev_kfree_skb(skb);
399 net_dev->stats.tx_dropped++;
400 net_dev->stats.tx_errors++;
401 return NETDEV_TX_OK;
402}
403
404static int
405xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
406{
407 struct xrx200_priv *priv = netdev_priv(net_dev);
408 struct xrx200_chan *ch_rx = &priv->chan_rx;
409 int old_mtu = net_dev->mtu;
410 bool running = false;
411 void *buff;
412 int curr_desc;
413 int ret = 0;
414
415 net_dev->mtu = new_mtu;
416 priv->rx_buf_size = xrx200_buffer_size(new_mtu);
417 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
418
419 if (new_mtu <= old_mtu)
420 return ret;
421
422 running = netif_running(net_dev);
423 if (running) {
424 napi_disable(&ch_rx->napi);
425 ltq_dma_close(&ch_rx->dma);
426 }
427
428 xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
429 curr_desc = ch_rx->dma.desc;
430
431 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
432 ch_rx->dma.desc++) {
433 buff = ch_rx->rx_buff[ch_rx->dma.desc];
434 ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
435 if (ret) {
436 net_dev->mtu = old_mtu;
437 priv->rx_buf_size = xrx200_buffer_size(old_mtu);
438 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
439 break;
440 }
441 skb_free_frag(buff);
442 }
443
444 ch_rx->dma.desc = curr_desc;
445 if (running) {
446 napi_enable(&ch_rx->napi);
447 ltq_dma_open(&ch_rx->dma);
448 ltq_dma_enable_irq(&ch_rx->dma);
449 }
450
451 return ret;
452}
453
454static const struct net_device_ops xrx200_netdev_ops = {
455 .ndo_open = xrx200_open,
456 .ndo_stop = xrx200_close,
457 .ndo_start_xmit = xrx200_start_xmit,
458 .ndo_change_mtu = xrx200_change_mtu,
459 .ndo_set_mac_address = eth_mac_addr,
460 .ndo_validate_addr = eth_validate_addr,
461};
462
463static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
464{
465 struct xrx200_chan *ch = ptr;
466
467 if (napi_schedule_prep(&ch->napi)) {
468 ltq_dma_disable_irq(&ch->dma);
469 __napi_schedule(&ch->napi);
470 }
471
472 ltq_dma_ack_irq(&ch->dma);
473
474 return IRQ_HANDLED;
475}
476
477static int xrx200_dma_init(struct xrx200_priv *priv)
478{
479 struct xrx200_chan *ch_rx = &priv->chan_rx;
480 struct xrx200_chan *ch_tx = &priv->chan_tx;
481 int ret = 0;
482 int i;
483
484 ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
485 XRX200_DMA_BURST_LEN);
486
487 ch_rx->dma.nr = XRX200_DMA_RX;
488 ch_rx->dma.dev = priv->dev;
489 ch_rx->priv = priv;
490
491 ltq_dma_alloc_rx(&ch_rx->dma);
492 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
493 ch_rx->dma.desc++) {
494 ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
495 if (ret)
496 goto rx_free;
497 }
498 ch_rx->dma.desc = 0;
499 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
500 "xrx200_net_rx", &priv->chan_rx);
501 if (ret) {
502 dev_err(priv->dev, "failed to request RX irq %d\n",
503 ch_rx->dma.irq);
504 goto rx_ring_free;
505 }
506
507 ch_tx->dma.nr = XRX200_DMA_TX;
508 ch_tx->dma.dev = priv->dev;
509 ch_tx->priv = priv;
510
511 ltq_dma_alloc_tx(&ch_tx->dma);
512 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
513 "xrx200_net_tx", &priv->chan_tx);
514 if (ret) {
515 dev_err(priv->dev, "failed to request TX irq %d\n",
516 ch_tx->dma.irq);
517 goto tx_free;
518 }
519
520 return ret;
521
522tx_free:
523 ltq_dma_free(&ch_tx->dma);
524
525rx_ring_free:
526
527 for (i = 0; i < LTQ_DESC_NUM; i++) {
528 if (priv->chan_rx.skb[i])
529 skb_free_frag(priv->chan_rx.rx_buff[i]);
530 }
531
532rx_free:
533 ltq_dma_free(&ch_rx->dma);
534 return ret;
535}
536
537static void xrx200_hw_cleanup(struct xrx200_priv *priv)
538{
539 int i;
540
541 ltq_dma_free(&priv->chan_tx.dma);
542 ltq_dma_free(&priv->chan_rx.dma);
543
544
545 for (i = 0; i < LTQ_DESC_NUM; i++)
546 skb_free_frag(priv->chan_rx.rx_buff[i]);
547}
548
549static int xrx200_probe(struct platform_device *pdev)
550{
551 struct device *dev = &pdev->dev;
552 struct device_node *np = dev->of_node;
553 struct xrx200_priv *priv;
554 struct net_device *net_dev;
555 int err;
556
557
558 net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
559 if (!net_dev)
560 return -ENOMEM;
561
562 priv = netdev_priv(net_dev);
563 priv->net_dev = net_dev;
564 priv->dev = dev;
565
566 net_dev->netdev_ops = &xrx200_netdev_ops;
567 SET_NETDEV_DEV(net_dev, dev);
568 net_dev->min_mtu = ETH_ZLEN;
569 net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
570 priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
571 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
572
573
574 priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
575 if (IS_ERR(priv->pmac_reg))
576 return PTR_ERR(priv->pmac_reg);
577
578 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
579 if (priv->chan_rx.dma.irq < 0)
580 return -ENOENT;
581 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
582 if (priv->chan_tx.dma.irq < 0)
583 return -ENOENT;
584
585
586 priv->clk = devm_clk_get(dev, NULL);
587 if (IS_ERR(priv->clk)) {
588 dev_err(dev, "failed to get clock\n");
589 return PTR_ERR(priv->clk);
590 }
591
592 err = of_get_ethdev_address(np, net_dev);
593 if (err)
594 eth_hw_addr_random(net_dev);
595
596
597 err = xrx200_dma_init(priv);
598 if (err)
599 return err;
600
601
602 err = clk_prepare_enable(priv->clk);
603 if (err)
604 goto err_uninit_dma;
605
606
607 xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
608
609
610 xrx200_pmac_mask(priv, 0,
611 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
612 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
613 PMAC_HD_CTL);
614
615
616 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
617 NAPI_POLL_WEIGHT);
618 netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping,
619 NAPI_POLL_WEIGHT);
620
621 platform_set_drvdata(pdev, priv);
622
623 err = register_netdev(net_dev);
624 if (err)
625 goto err_unprepare_clk;
626
627 return 0;
628
629err_unprepare_clk:
630 clk_disable_unprepare(priv->clk);
631
632err_uninit_dma:
633 xrx200_hw_cleanup(priv);
634
635 return err;
636}
637
638static int xrx200_remove(struct platform_device *pdev)
639{
640 struct xrx200_priv *priv = platform_get_drvdata(pdev);
641 struct net_device *net_dev = priv->net_dev;
642
643
644 netif_stop_queue(net_dev);
645 netif_napi_del(&priv->chan_tx.napi);
646 netif_napi_del(&priv->chan_rx.napi);
647
648
649 unregister_netdev(net_dev);
650
651
652 clk_disable_unprepare(priv->clk);
653
654
655 xrx200_hw_cleanup(priv);
656
657 return 0;
658}
659
660static const struct of_device_id xrx200_match[] = {
661 { .compatible = "lantiq,xrx200-net" },
662 {},
663};
664MODULE_DEVICE_TABLE(of, xrx200_match);
665
666static struct platform_driver xrx200_driver = {
667 .probe = xrx200_probe,
668 .remove = xrx200_remove,
669 .driver = {
670 .name = "lantiq,xrx200-net",
671 .of_match_table = xrx200_match,
672 },
673};
674
675module_platform_driver(xrx200_driver);
676
677MODULE_AUTHOR("John Crispin <john@phrozen.org>");
678MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
679MODULE_LICENSE("GPL");
680