1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/module.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/skbuff.h>
19#include <linux/dma-mapping.h>
20#include <linux/ethtool.h>
21#include <linux/platform_device.h>
22#include <linux/interrupt.h>
23#include <linux/irq.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/crc32.h>
27#include <linux/crc32c.h>
28#include <linux/circ_buf.h>
29
30#include "moxart_ether.h"
31
32static inline void moxart_desc_write(u32 data, u32 *desc)
33{
34 *desc = cpu_to_le32(data);
35}
36
37static inline u32 moxart_desc_read(u32 *desc)
38{
39 return le32_to_cpu(*desc);
40}
41
42static inline void moxart_emac_write(struct net_device *ndev,
43 unsigned int reg, unsigned long value)
44{
45 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
46
47 writel(value, priv->base + reg);
48}
49
50static void moxart_update_mac_address(struct net_device *ndev)
51{
52 moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
53 ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
54 moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
55 ((ndev->dev_addr[2] << 24) |
56 (ndev->dev_addr[3] << 16) |
57 (ndev->dev_addr[4] << 8) |
58 (ndev->dev_addr[5])));
59}
60
61static int moxart_set_mac_address(struct net_device *ndev, void *addr)
62{
63 struct sockaddr *address = addr;
64
65 if (!is_valid_ether_addr(address->sa_data))
66 return -EADDRNOTAVAIL;
67
68 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
69 moxart_update_mac_address(ndev);
70
71 return 0;
72}
73
74static void moxart_mac_free_memory(struct net_device *ndev)
75{
76 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
77 int i;
78
79 for (i = 0; i < RX_DESC_NUM; i++)
80 dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
81 priv->rx_buf_size, DMA_FROM_DEVICE);
82
83 if (priv->tx_desc_base)
84 dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
85 priv->tx_desc_base, priv->tx_base);
86
87 if (priv->rx_desc_base)
88 dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
89 priv->rx_desc_base, priv->rx_base);
90
91 kfree(priv->tx_buf_base);
92 kfree(priv->rx_buf_base);
93}
94
95static void moxart_mac_reset(struct net_device *ndev)
96{
97 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
98
99 writel(SW_RST, priv->base + REG_MAC_CTRL);
100 while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
101 mdelay(10);
102
103 writel(0, priv->base + REG_INTERRUPT_MASK);
104
105 priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
106}
107
108static void moxart_mac_enable(struct net_device *ndev)
109{
110 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
111
112 writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
113 writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
114 writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
115
116 priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
117 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
118
119 priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
120 writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
121}
122
123static void moxart_mac_setup_desc_ring(struct net_device *ndev)
124{
125 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
126 void *desc;
127 int i;
128
129 for (i = 0; i < TX_DESC_NUM; i++) {
130 desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
131 memset(desc, 0, TX_REG_DESC_SIZE);
132
133 priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
134 }
135 moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
136
137 priv->tx_head = 0;
138 priv->tx_tail = 0;
139
140 for (i = 0; i < RX_DESC_NUM; i++) {
141 desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
142 memset(desc, 0, RX_REG_DESC_SIZE);
143 moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
144 moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
145 desc + RX_REG_OFFSET_DESC1);
146
147 priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
148 priv->rx_mapping[i] = dma_map_single(&ndev->dev,
149 priv->rx_buf[i],
150 priv->rx_buf_size,
151 DMA_FROM_DEVICE);
152 if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
153 netdev_err(ndev, "DMA mapping error\n");
154
155 moxart_desc_write(priv->rx_mapping[i],
156 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
157 moxart_desc_write((uintptr_t)priv->rx_buf[i],
158 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
159 }
160 moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
161
162 priv->rx_head = 0;
163
164
165 writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
166 writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
167}
168
169static int moxart_mac_open(struct net_device *ndev)
170{
171 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
172
173 if (!is_valid_ether_addr(ndev->dev_addr))
174 return -EADDRNOTAVAIL;
175
176 napi_enable(&priv->napi);
177
178 moxart_mac_reset(ndev);
179 moxart_update_mac_address(ndev);
180 moxart_mac_setup_desc_ring(ndev);
181 moxart_mac_enable(ndev);
182 netif_start_queue(ndev);
183
184 netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
185 __func__, readl(priv->base + REG_INTERRUPT_MASK),
186 readl(priv->base + REG_MAC_CTRL));
187
188 return 0;
189}
190
191static int moxart_mac_stop(struct net_device *ndev)
192{
193 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
194
195 napi_disable(&priv->napi);
196
197 netif_stop_queue(ndev);
198
199
200 writel(0, priv->base + REG_INTERRUPT_MASK);
201
202
203 writel(0, priv->base + REG_MAC_CTRL);
204
205 return 0;
206}
207
208static int moxart_rx_poll(struct napi_struct *napi, int budget)
209{
210 struct moxart_mac_priv_t *priv = container_of(napi,
211 struct moxart_mac_priv_t,
212 napi);
213 struct net_device *ndev = priv->ndev;
214 struct sk_buff *skb;
215 void *desc;
216 unsigned int desc0, len;
217 int rx_head = priv->rx_head;
218 int rx = 0;
219
220 while (rx < budget) {
221 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
222 desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
223 rmb();
224
225 if (desc0 & RX_DESC0_DMA_OWN)
226 break;
227
228 if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
229 RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
230 net_dbg_ratelimited("packet error\n");
231 ndev->stats.rx_dropped++;
232 ndev->stats.rx_errors++;
233 goto rx_next;
234 }
235
236 len = desc0 & RX_DESC0_FRAME_LEN_MASK;
237
238 if (len > RX_BUF_SIZE)
239 len = RX_BUF_SIZE;
240
241 dma_sync_single_for_cpu(&ndev->dev,
242 priv->rx_mapping[rx_head],
243 priv->rx_buf_size, DMA_FROM_DEVICE);
244 skb = netdev_alloc_skb_ip_align(ndev, len);
245
246 if (unlikely(!skb)) {
247 net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
248 ndev->stats.rx_dropped++;
249 ndev->stats.rx_errors++;
250 goto rx_next;
251 }
252
253 memcpy(skb->data, priv->rx_buf[rx_head], len);
254 skb_put(skb, len);
255 skb->protocol = eth_type_trans(skb, ndev);
256 napi_gro_receive(&priv->napi, skb);
257 rx++;
258
259 ndev->stats.rx_packets++;
260 ndev->stats.rx_bytes += len;
261 if (desc0 & RX_DESC0_MULTICAST)
262 ndev->stats.multicast++;
263
264rx_next:
265 wmb();
266 moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
267
268 rx_head = RX_NEXT(rx_head);
269 priv->rx_head = rx_head;
270 }
271
272 if (rx < budget)
273 napi_complete_done(napi, rx);
274
275 priv->reg_imr |= RPKT_FINISH_M;
276 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
277
278 return rx;
279}
280
281static int moxart_tx_queue_space(struct net_device *ndev)
282{
283 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
284
285 return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
286}
287
288static void moxart_tx_finished(struct net_device *ndev)
289{
290 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
291 unsigned int tx_head = priv->tx_head;
292 unsigned int tx_tail = priv->tx_tail;
293
294 while (tx_tail != tx_head) {
295 dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
296 priv->tx_len[tx_tail], DMA_TO_DEVICE);
297
298 ndev->stats.tx_packets++;
299 ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
300
301 dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
302 priv->tx_skb[tx_tail] = NULL;
303
304 tx_tail = TX_NEXT(tx_tail);
305 }
306 priv->tx_tail = tx_tail;
307 if (netif_queue_stopped(ndev) &&
308 moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
309 netif_wake_queue(ndev);
310}
311
312static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
313{
314 struct net_device *ndev = (struct net_device *)dev_id;
315 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
316 unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
317
318 if (ists & XPKT_OK_INT_STS)
319 moxart_tx_finished(ndev);
320
321 if (ists & RPKT_FINISH) {
322 if (napi_schedule_prep(&priv->napi)) {
323 priv->reg_imr &= ~RPKT_FINISH_M;
324 writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
325 __napi_schedule(&priv->napi);
326 }
327 }
328
329 return IRQ_HANDLED;
330}
331
332static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
333{
334 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
335 void *desc;
336 unsigned int len;
337 unsigned int tx_head;
338 u32 txdes1;
339 int ret = NETDEV_TX_BUSY;
340
341 spin_lock_irq(&priv->txlock);
342
343 tx_head = priv->tx_head;
344 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
345
346 if (moxart_tx_queue_space(ndev) == 1)
347 netif_stop_queue(ndev);
348
349 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
350 net_dbg_ratelimited("no TX space for packet\n");
351 ndev->stats.tx_dropped++;
352 goto out_unlock;
353 }
354 rmb();
355
356 len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
357
358 priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
359 len, DMA_TO_DEVICE);
360 if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
361 netdev_err(ndev, "DMA mapping error\n");
362 goto out_unlock;
363 }
364
365 priv->tx_len[tx_head] = len;
366 priv->tx_skb[tx_head] = skb;
367
368 moxart_desc_write(priv->tx_mapping[tx_head],
369 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
370 moxart_desc_write((uintptr_t)skb->data,
371 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
372
373 if (skb->len < ETH_ZLEN) {
374 memset(&skb->data[skb->len],
375 0, ETH_ZLEN - skb->len);
376 len = ETH_ZLEN;
377 }
378
379 dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
380 priv->tx_buf_size, DMA_TO_DEVICE);
381
382 txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
383 if (tx_head == TX_DESC_NUM_MASK)
384 txdes1 |= TX_DESC1_END;
385 moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
386 wmb();
387 moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
388
389
390 writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
391
392 priv->tx_head = TX_NEXT(tx_head);
393
394 netif_trans_update(ndev);
395 ret = NETDEV_TX_OK;
396out_unlock:
397 spin_unlock_irq(&priv->txlock);
398
399 return ret;
400}
401
402static void moxart_mac_setmulticast(struct net_device *ndev)
403{
404 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
405 struct netdev_hw_addr *ha;
406 int crc_val;
407
408 netdev_for_each_mc_addr(ha, ndev) {
409 crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
410 crc_val = (crc_val >> 26) & 0x3f;
411 if (crc_val >= 32) {
412 writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
413 (1UL << (crc_val - 32)),
414 priv->base + REG_MCAST_HASH_TABLE1);
415 } else {
416 writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
417 (1UL << crc_val),
418 priv->base + REG_MCAST_HASH_TABLE0);
419 }
420 }
421}
422
423static void moxart_mac_set_rx_mode(struct net_device *ndev)
424{
425 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
426
427 spin_lock_irq(&priv->txlock);
428
429 (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
430 (priv->reg_maccr &= ~RCV_ALL);
431
432 (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
433 (priv->reg_maccr &= ~RX_MULTIPKT);
434
435 if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
436 priv->reg_maccr |= HT_MULTI_EN;
437 moxart_mac_setmulticast(ndev);
438 } else {
439 priv->reg_maccr &= ~HT_MULTI_EN;
440 }
441
442 writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
443
444 spin_unlock_irq(&priv->txlock);
445}
446
447static const struct net_device_ops moxart_netdev_ops = {
448 .ndo_open = moxart_mac_open,
449 .ndo_stop = moxart_mac_stop,
450 .ndo_start_xmit = moxart_mac_start_xmit,
451 .ndo_set_rx_mode = moxart_mac_set_rx_mode,
452 .ndo_set_mac_address = moxart_set_mac_address,
453 .ndo_validate_addr = eth_validate_addr,
454};
455
456static int moxart_mac_probe(struct platform_device *pdev)
457{
458 struct device *p_dev = &pdev->dev;
459 struct device_node *node = p_dev->of_node;
460 struct net_device *ndev;
461 struct moxart_mac_priv_t *priv;
462 struct resource *res;
463 unsigned int irq;
464 int ret;
465
466 ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
467 if (!ndev)
468 return -ENOMEM;
469
470 irq = irq_of_parse_and_map(node, 0);
471 if (irq <= 0) {
472 netdev_err(ndev, "irq_of_parse_and_map failed\n");
473 ret = -EINVAL;
474 goto irq_map_fail;
475 }
476
477 priv = netdev_priv(ndev);
478 priv->ndev = ndev;
479
480 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
481 ndev->base_addr = res->start;
482 priv->base = devm_ioremap_resource(p_dev, res);
483 if (IS_ERR(priv->base)) {
484 dev_err(p_dev, "devm_ioremap_resource failed\n");
485 ret = PTR_ERR(priv->base);
486 goto init_fail;
487 }
488
489 spin_lock_init(&priv->txlock);
490
491 priv->tx_buf_size = TX_BUF_SIZE;
492 priv->rx_buf_size = RX_BUF_SIZE;
493
494 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
495 TX_DESC_NUM, &priv->tx_base,
496 GFP_DMA | GFP_KERNEL);
497 if (!priv->tx_desc_base) {
498 ret = -ENOMEM;
499 goto init_fail;
500 }
501
502 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
503 RX_DESC_NUM, &priv->rx_base,
504 GFP_DMA | GFP_KERNEL);
505 if (!priv->rx_desc_base) {
506 ret = -ENOMEM;
507 goto init_fail;
508 }
509
510 priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM,
511 GFP_ATOMIC);
512 if (!priv->tx_buf_base) {
513 ret = -ENOMEM;
514 goto init_fail;
515 }
516
517 priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM,
518 GFP_ATOMIC);
519 if (!priv->rx_buf_base) {
520 ret = -ENOMEM;
521 goto init_fail;
522 }
523
524 platform_set_drvdata(pdev, ndev);
525
526 ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
527 pdev->name, ndev);
528 if (ret) {
529 netdev_err(ndev, "devm_request_irq failed\n");
530 goto init_fail;
531 }
532
533 ndev->netdev_ops = &moxart_netdev_ops;
534 netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
535 ndev->priv_flags |= IFF_UNICAST_FLT;
536 ndev->irq = irq;
537
538 SET_NETDEV_DEV(ndev, &pdev->dev);
539
540 ret = register_netdev(ndev);
541 if (ret) {
542 free_netdev(ndev);
543 goto init_fail;
544 }
545
546 netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
547 __func__, ndev->irq, ndev->dev_addr);
548
549 return 0;
550
551init_fail:
552 netdev_err(ndev, "init failed\n");
553 moxart_mac_free_memory(ndev);
554irq_map_fail:
555 free_netdev(ndev);
556 return ret;
557}
558
559static int moxart_remove(struct platform_device *pdev)
560{
561 struct net_device *ndev = platform_get_drvdata(pdev);
562
563 unregister_netdev(ndev);
564 free_irq(ndev->irq, ndev);
565 moxart_mac_free_memory(ndev);
566 free_netdev(ndev);
567
568 return 0;
569}
570
571static const struct of_device_id moxart_mac_match[] = {
572 { .compatible = "moxa,moxart-mac" },
573 { }
574};
575MODULE_DEVICE_TABLE(of, moxart_mac_match);
576
577static struct platform_driver moxart_mac_driver = {
578 .probe = moxart_mac_probe,
579 .remove = moxart_remove,
580 .driver = {
581 .name = "moxart-ethernet",
582 .of_match_table = moxart_mac_match,
583 },
584};
585module_platform_driver(moxart_mac_driver);
586
587MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
588MODULE_LICENSE("GPL v2");
589MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
590