1
2
3
4
5
6
7
8
9
10#include <linux/etherdevice.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/interrupt.h>
14#include <linux/clk.h>
15#include <linux/delay.h>
16
17#include <linux/if_vlan.h>
18
19#include <linux/of_net.h>
20#include <linux/of_platform.h>
21
22#include <xway_dma.h>
23
24
25#define XRX200_DMA_DATA_LEN (SZ_64K - 1)
26#define XRX200_DMA_RX 0
27#define XRX200_DMA_TX 1
28#define XRX200_DMA_BURST_LEN 8
29
30
31#define PMAC_RX_IPG 0x0024
32#define PMAC_RX_IPG_MASK 0xf
33
34#define PMAC_HD_CTL 0x0000
35
36#define PMAC_HD_CTL_ADD BIT(0)
37
38#define PMAC_HD_CTL_TAG BIT(1)
39
40#define PMAC_HD_CTL_AC BIT(2)
41
42#define PMAC_HD_CTL_AS BIT(3)
43
44#define PMAC_HD_CTL_RC BIT(4)
45
46#define PMAC_HD_CTL_RL2 BIT(5)
47
48#define PMAC_HD_CTL_RXSH BIT(6)
49
50#define PMAC_HD_CTL_AST BIT(7)
51
52#define PMAC_HD_CTL_RST BIT(8)
53
54#define PMAC_HD_CTL_CCRC BIT(9)
55
56#define PMAC_HD_CTL_FC BIT(10)
57
58struct xrx200_chan {
59 int tx_free;
60
61 struct napi_struct napi;
62 struct ltq_dma_channel dma;
63 struct sk_buff *skb[LTQ_DESC_NUM];
64
65 struct xrx200_priv *priv;
66};
67
68struct xrx200_priv {
69 struct clk *clk;
70
71 struct xrx200_chan chan_tx;
72 struct xrx200_chan chan_rx;
73
74 u16 rx_buf_size;
75
76 struct net_device *net_dev;
77 struct device *dev;
78
79 __iomem void *pmac_reg;
80};
81
82static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
83{
84 return __raw_readl(priv->pmac_reg + offset);
85}
86
87static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
88{
89 __raw_writel(val, priv->pmac_reg + offset);
90}
91
92static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
93 u32 offset)
94{
95 u32 val = xrx200_pmac_r32(priv, offset);
96
97 val &= ~(clear);
98 val |= set;
99 xrx200_pmac_w32(priv, val, offset);
100}
101
102static int xrx200_max_frame_len(int mtu)
103{
104 return VLAN_ETH_HLEN + mtu;
105}
106
107static int xrx200_buffer_size(int mtu)
108{
109 return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
110}
111
112
113static void xrx200_flush_dma(struct xrx200_chan *ch)
114{
115 int i;
116
117 for (i = 0; i < LTQ_DESC_NUM; i++) {
118 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
119
120 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
121 break;
122
123 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
124 ch->priv->rx_buf_size;
125 ch->dma.desc++;
126 ch->dma.desc %= LTQ_DESC_NUM;
127 }
128}
129
130static int xrx200_open(struct net_device *net_dev)
131{
132 struct xrx200_priv *priv = netdev_priv(net_dev);
133
134 napi_enable(&priv->chan_tx.napi);
135 ltq_dma_open(&priv->chan_tx.dma);
136 ltq_dma_enable_irq(&priv->chan_tx.dma);
137
138 napi_enable(&priv->chan_rx.napi);
139 ltq_dma_open(&priv->chan_rx.dma);
140
141
142
143
144
145
146 usleep_range(20, 40);
147 xrx200_flush_dma(&priv->chan_rx);
148 ltq_dma_enable_irq(&priv->chan_rx.dma);
149
150 netif_wake_queue(net_dev);
151
152 return 0;
153}
154
155static int xrx200_close(struct net_device *net_dev)
156{
157 struct xrx200_priv *priv = netdev_priv(net_dev);
158
159 netif_stop_queue(net_dev);
160
161 napi_disable(&priv->chan_rx.napi);
162 ltq_dma_close(&priv->chan_rx.dma);
163
164 napi_disable(&priv->chan_tx.napi);
165 ltq_dma_close(&priv->chan_tx.dma);
166
167 return 0;
168}
169
170static int xrx200_alloc_skb(struct xrx200_chan *ch)
171{
172 struct sk_buff *skb = ch->skb[ch->dma.desc];
173 struct xrx200_priv *priv = ch->priv;
174 dma_addr_t mapping;
175 int ret = 0;
176
177 ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
178 priv->rx_buf_size);
179 if (!ch->skb[ch->dma.desc]) {
180 ret = -ENOMEM;
181 goto skip;
182 }
183
184 mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
185 priv->rx_buf_size, DMA_FROM_DEVICE);
186 if (unlikely(dma_mapping_error(priv->dev, mapping))) {
187 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
188 ch->skb[ch->dma.desc] = skb;
189 ret = -ENOMEM;
190 goto skip;
191 }
192
193 ch->dma.desc_base[ch->dma.desc].addr = mapping;
194
195 wmb();
196skip:
197 ch->dma.desc_base[ch->dma.desc].ctl =
198 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
199
200 return ret;
201}
202
203static int xrx200_hw_receive(struct xrx200_chan *ch)
204{
205 struct xrx200_priv *priv = ch->priv;
206 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
207 struct sk_buff *skb = ch->skb[ch->dma.desc];
208 int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
209 struct net_device *net_dev = priv->net_dev;
210 int ret;
211
212 ret = xrx200_alloc_skb(ch);
213
214 ch->dma.desc++;
215 ch->dma.desc %= LTQ_DESC_NUM;
216
217 if (ret) {
218 net_dev->stats.rx_dropped++;
219 netdev_err(net_dev, "failed to allocate new rx buffer\n");
220 return ret;
221 }
222
223 skb_put(skb, len);
224 skb->protocol = eth_type_trans(skb, net_dev);
225 netif_receive_skb(skb);
226 net_dev->stats.rx_packets++;
227 net_dev->stats.rx_bytes += len;
228
229 return 0;
230}
231
232static int xrx200_poll_rx(struct napi_struct *napi, int budget)
233{
234 struct xrx200_chan *ch = container_of(napi,
235 struct xrx200_chan, napi);
236 int rx = 0;
237 int ret;
238
239 while (rx < budget) {
240 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
241
242 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
243 ret = xrx200_hw_receive(ch);
244 if (ret)
245 return ret;
246 rx++;
247 } else {
248 break;
249 }
250 }
251
252 if (rx < budget) {
253 if (napi_complete_done(&ch->napi, rx))
254 ltq_dma_enable_irq(&ch->dma);
255 }
256
257 return rx;
258}
259
260static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
261{
262 struct xrx200_chan *ch = container_of(napi,
263 struct xrx200_chan, napi);
264 struct net_device *net_dev = ch->priv->net_dev;
265 int pkts = 0;
266 int bytes = 0;
267
268 netif_tx_lock(net_dev);
269 while (pkts < budget) {
270 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
271
272 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
273 struct sk_buff *skb = ch->skb[ch->tx_free];
274
275 pkts++;
276 bytes += skb->len;
277 ch->skb[ch->tx_free] = NULL;
278 consume_skb(skb);
279 memset(&ch->dma.desc_base[ch->tx_free], 0,
280 sizeof(struct ltq_dma_desc));
281 ch->tx_free++;
282 ch->tx_free %= LTQ_DESC_NUM;
283 } else {
284 break;
285 }
286 }
287
288 net_dev->stats.tx_packets += pkts;
289 net_dev->stats.tx_bytes += bytes;
290 netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
291
292 netif_tx_unlock(net_dev);
293 if (netif_queue_stopped(net_dev))
294 netif_wake_queue(net_dev);
295
296 if (pkts < budget) {
297 if (napi_complete_done(&ch->napi, pkts))
298 ltq_dma_enable_irq(&ch->dma);
299 }
300
301 return pkts;
302}
303
304static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
305 struct net_device *net_dev)
306{
307 struct xrx200_priv *priv = netdev_priv(net_dev);
308 struct xrx200_chan *ch = &priv->chan_tx;
309 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
310 u32 byte_offset;
311 dma_addr_t mapping;
312 int len;
313
314 skb->dev = net_dev;
315 if (skb_put_padto(skb, ETH_ZLEN)) {
316 net_dev->stats.tx_dropped++;
317 return NETDEV_TX_OK;
318 }
319
320 len = skb->len;
321
322 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
323 netdev_err(net_dev, "tx ring full\n");
324 netif_stop_queue(net_dev);
325 return NETDEV_TX_BUSY;
326 }
327
328 ch->skb[ch->dma.desc] = skb;
329
330 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
331 if (unlikely(dma_mapping_error(priv->dev, mapping)))
332 goto err_drop;
333
334
335 byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
336
337 desc->addr = mapping - byte_offset;
338
339 wmb();
340 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
341 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
342 ch->dma.desc++;
343 ch->dma.desc %= LTQ_DESC_NUM;
344 if (ch->dma.desc == ch->tx_free)
345 netif_stop_queue(net_dev);
346
347 netdev_sent_queue(net_dev, len);
348
349 return NETDEV_TX_OK;
350
351err_drop:
352 dev_kfree_skb(skb);
353 net_dev->stats.tx_dropped++;
354 net_dev->stats.tx_errors++;
355 return NETDEV_TX_OK;
356}
357
358static int
359xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
360{
361 struct xrx200_priv *priv = netdev_priv(net_dev);
362 struct xrx200_chan *ch_rx = &priv->chan_rx;
363 int old_mtu = net_dev->mtu;
364 bool running = false;
365 struct sk_buff *skb;
366 int curr_desc;
367 int ret = 0;
368
369 net_dev->mtu = new_mtu;
370 priv->rx_buf_size = xrx200_buffer_size(new_mtu);
371
372 if (new_mtu <= old_mtu)
373 return ret;
374
375 running = netif_running(net_dev);
376 if (running) {
377 napi_disable(&ch_rx->napi);
378 ltq_dma_close(&ch_rx->dma);
379 }
380
381 xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
382 curr_desc = ch_rx->dma.desc;
383
384 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
385 ch_rx->dma.desc++) {
386 skb = ch_rx->skb[ch_rx->dma.desc];
387 ret = xrx200_alloc_skb(ch_rx);
388 if (ret) {
389 net_dev->mtu = old_mtu;
390 priv->rx_buf_size = xrx200_buffer_size(old_mtu);
391 break;
392 }
393 dev_kfree_skb_any(skb);
394 }
395
396 ch_rx->dma.desc = curr_desc;
397 if (running) {
398 napi_enable(&ch_rx->napi);
399 ltq_dma_open(&ch_rx->dma);
400 ltq_dma_enable_irq(&ch_rx->dma);
401 }
402
403 return ret;
404}
405
406static const struct net_device_ops xrx200_netdev_ops = {
407 .ndo_open = xrx200_open,
408 .ndo_stop = xrx200_close,
409 .ndo_start_xmit = xrx200_start_xmit,
410 .ndo_change_mtu = xrx200_change_mtu,
411 .ndo_set_mac_address = eth_mac_addr,
412 .ndo_validate_addr = eth_validate_addr,
413};
414
415static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
416{
417 struct xrx200_chan *ch = ptr;
418
419 if (napi_schedule_prep(&ch->napi)) {
420 ltq_dma_disable_irq(&ch->dma);
421 __napi_schedule(&ch->napi);
422 }
423
424 ltq_dma_ack_irq(&ch->dma);
425
426 return IRQ_HANDLED;
427}
428
429static int xrx200_dma_init(struct xrx200_priv *priv)
430{
431 struct xrx200_chan *ch_rx = &priv->chan_rx;
432 struct xrx200_chan *ch_tx = &priv->chan_tx;
433 int ret = 0;
434 int i;
435
436 ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
437 XRX200_DMA_BURST_LEN);
438
439 ch_rx->dma.nr = XRX200_DMA_RX;
440 ch_rx->dma.dev = priv->dev;
441 ch_rx->priv = priv;
442
443 ltq_dma_alloc_rx(&ch_rx->dma);
444 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
445 ch_rx->dma.desc++) {
446 ret = xrx200_alloc_skb(ch_rx);
447 if (ret)
448 goto rx_free;
449 }
450 ch_rx->dma.desc = 0;
451 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
452 "xrx200_net_rx", &priv->chan_rx);
453 if (ret) {
454 dev_err(priv->dev, "failed to request RX irq %d\n",
455 ch_rx->dma.irq);
456 goto rx_ring_free;
457 }
458
459 ch_tx->dma.nr = XRX200_DMA_TX;
460 ch_tx->dma.dev = priv->dev;
461 ch_tx->priv = priv;
462
463 ltq_dma_alloc_tx(&ch_tx->dma);
464 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
465 "xrx200_net_tx", &priv->chan_tx);
466 if (ret) {
467 dev_err(priv->dev, "failed to request TX irq %d\n",
468 ch_tx->dma.irq);
469 goto tx_free;
470 }
471
472 return ret;
473
474tx_free:
475 ltq_dma_free(&ch_tx->dma);
476
477rx_ring_free:
478
479 for (i = 0; i < LTQ_DESC_NUM; i++) {
480 if (priv->chan_rx.skb[i])
481 dev_kfree_skb_any(priv->chan_rx.skb[i]);
482 }
483
484rx_free:
485 ltq_dma_free(&ch_rx->dma);
486 return ret;
487}
488
489static void xrx200_hw_cleanup(struct xrx200_priv *priv)
490{
491 int i;
492
493 ltq_dma_free(&priv->chan_tx.dma);
494 ltq_dma_free(&priv->chan_rx.dma);
495
496
497 for (i = 0; i < LTQ_DESC_NUM; i++)
498 dev_kfree_skb_any(priv->chan_rx.skb[i]);
499}
500
501static int xrx200_probe(struct platform_device *pdev)
502{
503 struct device *dev = &pdev->dev;
504 struct device_node *np = dev->of_node;
505 struct xrx200_priv *priv;
506 struct net_device *net_dev;
507 int err;
508
509
510 net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
511 if (!net_dev)
512 return -ENOMEM;
513
514 priv = netdev_priv(net_dev);
515 priv->net_dev = net_dev;
516 priv->dev = dev;
517
518 net_dev->netdev_ops = &xrx200_netdev_ops;
519 SET_NETDEV_DEV(net_dev, dev);
520 net_dev->min_mtu = ETH_ZLEN;
521 net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
522 priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
523
524
525 priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
526 if (IS_ERR(priv->pmac_reg))
527 return PTR_ERR(priv->pmac_reg);
528
529 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
530 if (priv->chan_rx.dma.irq < 0)
531 return -ENOENT;
532 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
533 if (priv->chan_tx.dma.irq < 0)
534 return -ENOENT;
535
536
537 priv->clk = devm_clk_get(dev, NULL);
538 if (IS_ERR(priv->clk)) {
539 dev_err(dev, "failed to get clock\n");
540 return PTR_ERR(priv->clk);
541 }
542
543 err = of_get_ethdev_address(np, net_dev);
544 if (err)
545 eth_hw_addr_random(net_dev);
546
547
548 err = xrx200_dma_init(priv);
549 if (err)
550 return err;
551
552
553 err = clk_prepare_enable(priv->clk);
554 if (err)
555 goto err_uninit_dma;
556
557
558 xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
559
560
561 xrx200_pmac_mask(priv, 0,
562 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
563 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
564 PMAC_HD_CTL);
565
566
567 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
568 netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
569
570 platform_set_drvdata(pdev, priv);
571
572 err = register_netdev(net_dev);
573 if (err)
574 goto err_unprepare_clk;
575
576 return 0;
577
578err_unprepare_clk:
579 clk_disable_unprepare(priv->clk);
580
581err_uninit_dma:
582 xrx200_hw_cleanup(priv);
583
584 return err;
585}
586
587static int xrx200_remove(struct platform_device *pdev)
588{
589 struct xrx200_priv *priv = platform_get_drvdata(pdev);
590 struct net_device *net_dev = priv->net_dev;
591
592
593 netif_stop_queue(net_dev);
594 netif_napi_del(&priv->chan_tx.napi);
595 netif_napi_del(&priv->chan_rx.napi);
596
597
598 unregister_netdev(net_dev);
599
600
601 clk_disable_unprepare(priv->clk);
602
603
604 xrx200_hw_cleanup(priv);
605
606 return 0;
607}
608
609static const struct of_device_id xrx200_match[] = {
610 { .compatible = "lantiq,xrx200-net" },
611 {},
612};
613MODULE_DEVICE_TABLE(of, xrx200_match);
614
615static struct platform_driver xrx200_driver = {
616 .probe = xrx200_probe,
617 .remove = xrx200_remove,
618 .driver = {
619 .name = "lantiq,xrx200-net",
620 .of_match_table = xrx200_match,
621 },
622};
623
624module_platform_driver(xrx200_driver);
625
626MODULE_AUTHOR("John Crispin <john@phrozen.org>");
627MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
628MODULE_LICENSE("GPL");
629