1
2
3
4
5
6
7#include <linux/etherdevice.h>
8#include <linux/module.h>
9#include <linux/netdevice.h>
10#include <linux/of_address.h>
11#include <linux/of_mdio.h>
12#include <linux/of_net.h>
13#include <linux/of_platform.h>
14#include <linux/of_irq.h>
15#include <linux/skbuff.h>
16#include <linux/phy.h>
17#include <linux/mii.h>
18#include <linux/nvmem-consumer.h>
19#include <linux/ethtool.h>
20#include <linux/iopoll.h>
21
22#define TX_BD_NUM 64
23#define RX_BD_NUM 128
24
25
26#define XAXIDMA_TX_CR_OFFSET 0x00
27#define XAXIDMA_TX_SR_OFFSET 0x04
28#define XAXIDMA_TX_CDESC_OFFSET 0x08
29#define XAXIDMA_TX_TDESC_OFFSET 0x10
30
31#define XAXIDMA_RX_CR_OFFSET 0x30
32#define XAXIDMA_RX_SR_OFFSET 0x34
33#define XAXIDMA_RX_CDESC_OFFSET 0x38
34#define XAXIDMA_RX_TDESC_OFFSET 0x40
35
36#define XAXIDMA_CR_RUNSTOP_MASK 0x1
37#define XAXIDMA_CR_RESET_MASK 0x4
38
39#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF
40#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000
41#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000
42#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000
43
44#define XAXIDMA_DELAY_MASK 0xFF000000
45#define XAXIDMA_COALESCE_MASK 0x00FF0000
46
47#define XAXIDMA_DELAY_SHIFT 24
48#define XAXIDMA_COALESCE_SHIFT 16
49
50#define XAXIDMA_IRQ_IOC_MASK 0x00001000
51#define XAXIDMA_IRQ_DELAY_MASK 0x00002000
52#define XAXIDMA_IRQ_ERROR_MASK 0x00004000
53#define XAXIDMA_IRQ_ALL_MASK 0x00007000
54
55
56#define XAXIDMA_DFT_TX_THRESHOLD 24
57#define XAXIDMA_DFT_TX_WAITBOUND 254
58#define XAXIDMA_DFT_RX_THRESHOLD 24
59#define XAXIDMA_DFT_RX_WAITBOUND 254
60
61#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF
62#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000
63#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000
64#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000
65#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000
66#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000
67#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000
68#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000
69#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000
70
71#define NIXGE_REG_CTRL_OFFSET 0x4000
72#define NIXGE_REG_INFO 0x00
73#define NIXGE_REG_MAC_CTL 0x04
74#define NIXGE_REG_PHY_CTL 0x08
75#define NIXGE_REG_LED_CTL 0x0c
76#define NIXGE_REG_MDIO_DATA 0x10
77#define NIXGE_REG_MDIO_ADDR 0x14
78#define NIXGE_REG_MDIO_OP 0x18
79#define NIXGE_REG_MDIO_CTRL 0x1c
80
81#define NIXGE_ID_LED_CTL_EN BIT(0)
82#define NIXGE_ID_LED_CTL_VAL BIT(1)
83
84#define NIXGE_MDIO_CLAUSE45 BIT(12)
85#define NIXGE_MDIO_CLAUSE22 0
86#define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
87#define NIXGE_MDIO_OP_ADDRESS 0
88#define NIXGE_MDIO_C45_WRITE BIT(0)
89#define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
90#define NIXGE_MDIO_C22_WRITE BIT(0)
91#define NIXGE_MDIO_C22_READ BIT(1)
92#define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
93#define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
94
95#define NIXGE_REG_MAC_LSB 0x1000
96#define NIXGE_REG_MAC_MSB 0x1004
97
98
99#define NIXGE_HDR_SIZE 14
100#define NIXGE_TRL_SIZE 4
101#define NIXGE_MTU 1500
102#define NIXGE_JUMBO_MTU 9000
103
104#define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
105#define NIXGE_MAX_JUMBO_FRAME_SIZE \
106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
107
108struct nixge_hw_dma_bd {
109 u32 next;
110 u32 reserved1;
111 u32 phys;
112 u32 reserved2;
113 u32 reserved3;
114 u32 reserved4;
115 u32 cntrl;
116 u32 status;
117 u32 app0;
118 u32 app1;
119 u32 app2;
120 u32 app3;
121 u32 app4;
122 u32 sw_id_offset;
123 u32 reserved5;
124 u32 reserved6;
125};
126
127struct nixge_tx_skb {
128 struct sk_buff *skb;
129 dma_addr_t mapping;
130 size_t size;
131 bool mapped_as_page;
132};
133
134struct nixge_priv {
135 struct net_device *ndev;
136 struct napi_struct napi;
137 struct device *dev;
138
139
140 struct device_node *phy_node;
141 phy_interface_t phy_mode;
142
143 int link;
144 unsigned int speed;
145 unsigned int duplex;
146
147
148 struct mii_bus *mii_bus;
149
150
151 void __iomem *ctrl_regs;
152 void __iomem *dma_regs;
153
154 struct tasklet_struct dma_err_tasklet;
155
156 int tx_irq;
157 int rx_irq;
158 u32 last_link;
159
160
161 struct nixge_hw_dma_bd *tx_bd_v;
162 struct nixge_tx_skb *tx_skb;
163 dma_addr_t tx_bd_p;
164
165 struct nixge_hw_dma_bd *rx_bd_v;
166 dma_addr_t rx_bd_p;
167 u32 tx_bd_ci;
168 u32 tx_bd_tail;
169 u32 rx_bd_ci;
170
171 u32 coalesce_count_rx;
172 u32 coalesce_count_tx;
173};
174
175static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
176{
177 writel(val, priv->dma_regs + offset);
178}
179
180static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
181{
182 return readl(priv->dma_regs + offset);
183}
184
185static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
186{
187 writel(val, priv->ctrl_regs + offset);
188}
189
190static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
191{
192 return readl(priv->ctrl_regs + offset);
193}
194
195#define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
196 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
197 (sleep_us), (timeout_us))
198
199#define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
200 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
201 (sleep_us), (timeout_us))
202
203static void nixge_hw_dma_bd_release(struct net_device *ndev)
204{
205 struct nixge_priv *priv = netdev_priv(ndev);
206 int i;
207
208 for (i = 0; i < RX_BD_NUM; i++) {
209 dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
210 NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
211 dev_kfree_skb((struct sk_buff *)
212 (priv->rx_bd_v[i].sw_id_offset));
213 }
214
215 if (priv->rx_bd_v)
216 dma_free_coherent(ndev->dev.parent,
217 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
218 priv->rx_bd_v,
219 priv->rx_bd_p);
220
221 if (priv->tx_skb)
222 devm_kfree(ndev->dev.parent, priv->tx_skb);
223
224 if (priv->tx_bd_v)
225 dma_free_coherent(ndev->dev.parent,
226 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
227 priv->tx_bd_v,
228 priv->tx_bd_p);
229}
230
231static int nixge_hw_dma_bd_init(struct net_device *ndev)
232{
233 struct nixge_priv *priv = netdev_priv(ndev);
234 struct sk_buff *skb;
235 u32 cr;
236 int i;
237
238
239 priv->tx_bd_ci = 0;
240 priv->tx_bd_tail = 0;
241 priv->rx_bd_ci = 0;
242
243
244 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
245 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
246 &priv->tx_bd_p, GFP_KERNEL);
247 if (!priv->tx_bd_v)
248 goto out;
249
250 priv->tx_skb = devm_kcalloc(ndev->dev.parent,
251 TX_BD_NUM, sizeof(*priv->tx_skb),
252 GFP_KERNEL);
253 if (!priv->tx_skb)
254 goto out;
255
256 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
257 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
258 &priv->rx_bd_p, GFP_KERNEL);
259 if (!priv->rx_bd_v)
260 goto out;
261
262 for (i = 0; i < TX_BD_NUM; i++) {
263 priv->tx_bd_v[i].next = priv->tx_bd_p +
264 sizeof(*priv->tx_bd_v) *
265 ((i + 1) % TX_BD_NUM);
266 }
267
268 for (i = 0; i < RX_BD_NUM; i++) {
269 priv->rx_bd_v[i].next = priv->rx_bd_p +
270 sizeof(*priv->rx_bd_v) *
271 ((i + 1) % RX_BD_NUM);
272
273 skb = netdev_alloc_skb_ip_align(ndev,
274 NIXGE_MAX_JUMBO_FRAME_SIZE);
275 if (!skb)
276 goto out;
277
278 priv->rx_bd_v[i].sw_id_offset = (u32)skb;
279 priv->rx_bd_v[i].phys =
280 dma_map_single(ndev->dev.parent,
281 skb->data,
282 NIXGE_MAX_JUMBO_FRAME_SIZE,
283 DMA_FROM_DEVICE);
284 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
285 }
286
287
288 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
289
290 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
291 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
292
293 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
294 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
295
296 cr |= XAXIDMA_IRQ_ALL_MASK;
297
298 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
299
300
301 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
302
303 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
304 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
305
306 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
307 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
308
309 cr |= XAXIDMA_IRQ_ALL_MASK;
310
311 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
312
313
314
315
316 nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
317 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
318 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
319 cr | XAXIDMA_CR_RUNSTOP_MASK);
320 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
321 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
322
323
324
325
326
327 nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
328 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
329 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
330 cr | XAXIDMA_CR_RUNSTOP_MASK);
331
332 return 0;
333out:
334 nixge_hw_dma_bd_release(ndev);
335 return -ENOMEM;
336}
337
338static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
339{
340 u32 status;
341 int err;
342
343
344
345
346
347
348 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
349 err = nixge_dma_poll_timeout(priv, offset, status,
350 !(status & XAXIDMA_CR_RESET_MASK), 10,
351 1000);
352 if (err)
353 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
354}
355
356static void nixge_device_reset(struct net_device *ndev)
357{
358 struct nixge_priv *priv = netdev_priv(ndev);
359
360 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
361 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
362
363 if (nixge_hw_dma_bd_init(ndev))
364 netdev_err(ndev, "%s: descriptor allocation failed\n",
365 __func__);
366
367 netif_trans_update(ndev);
368}
369
370static void nixge_handle_link_change(struct net_device *ndev)
371{
372 struct nixge_priv *priv = netdev_priv(ndev);
373 struct phy_device *phydev = ndev->phydev;
374
375 if (phydev->link != priv->link || phydev->speed != priv->speed ||
376 phydev->duplex != priv->duplex) {
377 priv->link = phydev->link;
378 priv->speed = phydev->speed;
379 priv->duplex = phydev->duplex;
380 phy_print_status(phydev);
381 }
382}
383
384static void nixge_tx_skb_unmap(struct nixge_priv *priv,
385 struct nixge_tx_skb *tx_skb)
386{
387 if (tx_skb->mapping) {
388 if (tx_skb->mapped_as_page)
389 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
390 tx_skb->size, DMA_TO_DEVICE);
391 else
392 dma_unmap_single(priv->ndev->dev.parent,
393 tx_skb->mapping,
394 tx_skb->size, DMA_TO_DEVICE);
395 tx_skb->mapping = 0;
396 }
397
398 if (tx_skb->skb) {
399 dev_kfree_skb_any(tx_skb->skb);
400 tx_skb->skb = NULL;
401 }
402}
403
404static void nixge_start_xmit_done(struct net_device *ndev)
405{
406 struct nixge_priv *priv = netdev_priv(ndev);
407 struct nixge_hw_dma_bd *cur_p;
408 struct nixge_tx_skb *tx_skb;
409 unsigned int status = 0;
410 u32 packets = 0;
411 u32 size = 0;
412
413 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
414 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
415
416 status = cur_p->status;
417
418 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
419 nixge_tx_skb_unmap(priv, tx_skb);
420 cur_p->status = 0;
421
422 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
423 packets++;
424
425 ++priv->tx_bd_ci;
426 priv->tx_bd_ci %= TX_BD_NUM;
427 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
428 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
429 status = cur_p->status;
430 }
431
432 ndev->stats.tx_packets += packets;
433 ndev->stats.tx_bytes += size;
434
435 if (packets)
436 netif_wake_queue(ndev);
437}
438
439static int nixge_check_tx_bd_space(struct nixge_priv *priv,
440 int num_frag)
441{
442 struct nixge_hw_dma_bd *cur_p;
443
444 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
445 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
446 return NETDEV_TX_BUSY;
447 return 0;
448}
449
450static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
451{
452 struct nixge_priv *priv = netdev_priv(ndev);
453 struct nixge_hw_dma_bd *cur_p;
454 struct nixge_tx_skb *tx_skb;
455 dma_addr_t tail_p;
456 skb_frag_t *frag;
457 u32 num_frag;
458 u32 ii;
459
460 num_frag = skb_shinfo(skb)->nr_frags;
461 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
462 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
463
464 if (nixge_check_tx_bd_space(priv, num_frag)) {
465 if (!netif_queue_stopped(ndev))
466 netif_stop_queue(ndev);
467 return NETDEV_TX_OK;
468 }
469
470 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
471 skb_headlen(skb), DMA_TO_DEVICE);
472 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
473 goto drop;
474
475 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
476
477 tx_skb->skb = NULL;
478 tx_skb->mapping = cur_p->phys;
479 tx_skb->size = skb_headlen(skb);
480 tx_skb->mapped_as_page = false;
481
482 for (ii = 0; ii < num_frag; ii++) {
483 ++priv->tx_bd_tail;
484 priv->tx_bd_tail %= TX_BD_NUM;
485 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
486 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
487 frag = &skb_shinfo(skb)->frags[ii];
488
489 cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
490 skb_frag_size(frag),
491 DMA_TO_DEVICE);
492 if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
493 goto frag_err;
494
495 cur_p->cntrl = skb_frag_size(frag);
496
497 tx_skb->skb = NULL;
498 tx_skb->mapping = cur_p->phys;
499 tx_skb->size = skb_frag_size(frag);
500 tx_skb->mapped_as_page = true;
501 }
502
503
504 tx_skb->skb = skb;
505
506 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
507 cur_p->app4 = (unsigned long)skb;
508
509 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
510
511 nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
512 ++priv->tx_bd_tail;
513 priv->tx_bd_tail %= TX_BD_NUM;
514
515 return NETDEV_TX_OK;
516frag_err:
517 for (; ii > 0; ii--) {
518 if (priv->tx_bd_tail)
519 priv->tx_bd_tail--;
520 else
521 priv->tx_bd_tail = TX_BD_NUM - 1;
522
523 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
524 nixge_tx_skb_unmap(priv, tx_skb);
525
526 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
527 cur_p->status = 0;
528 }
529 dma_unmap_single(priv->ndev->dev.parent,
530 tx_skb->mapping,
531 tx_skb->size, DMA_TO_DEVICE);
532drop:
533 ndev->stats.tx_dropped++;
534 return NETDEV_TX_OK;
535}
536
537static int nixge_recv(struct net_device *ndev, int budget)
538{
539 struct nixge_priv *priv = netdev_priv(ndev);
540 struct sk_buff *skb, *new_skb;
541 struct nixge_hw_dma_bd *cur_p;
542 dma_addr_t tail_p = 0;
543 u32 packets = 0;
544 u32 length = 0;
545 u32 size = 0;
546
547 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
548
549 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
550 budget > packets)) {
551 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
552 priv->rx_bd_ci;
553
554 skb = (struct sk_buff *)(cur_p->sw_id_offset);
555
556 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
557 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
558 length = NIXGE_MAX_JUMBO_FRAME_SIZE;
559
560 dma_unmap_single(ndev->dev.parent, cur_p->phys,
561 NIXGE_MAX_JUMBO_FRAME_SIZE,
562 DMA_FROM_DEVICE);
563
564 skb_put(skb, length);
565
566 skb->protocol = eth_type_trans(skb, ndev);
567 skb_checksum_none_assert(skb);
568
569
570
571
572 skb->ip_summed = CHECKSUM_NONE;
573
574 napi_gro_receive(&priv->napi, skb);
575
576 size += length;
577 packets++;
578
579 new_skb = netdev_alloc_skb_ip_align(ndev,
580 NIXGE_MAX_JUMBO_FRAME_SIZE);
581 if (!new_skb)
582 return packets;
583
584 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
585 NIXGE_MAX_JUMBO_FRAME_SIZE,
586 DMA_FROM_DEVICE);
587 if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
588
589 netdev_err(ndev, "Failed to map ...\n");
590 }
591 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
592 cur_p->status = 0;
593 cur_p->sw_id_offset = (u32)new_skb;
594
595 ++priv->rx_bd_ci;
596 priv->rx_bd_ci %= RX_BD_NUM;
597 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
598 }
599
600 ndev->stats.rx_packets += packets;
601 ndev->stats.rx_bytes += size;
602
603 if (tail_p)
604 nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
605
606 return packets;
607}
608
609static int nixge_poll(struct napi_struct *napi, int budget)
610{
611 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
612 int work_done;
613 u32 status, cr;
614
615 work_done = 0;
616
617 work_done = nixge_recv(priv->ndev, budget);
618 if (work_done < budget) {
619 napi_complete_done(napi, work_done);
620 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
621
622 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
623
624 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
625 napi_reschedule(napi);
626 } else {
627
628 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
629 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
630 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
631 }
632 }
633
634 return work_done;
635}
636
637static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
638{
639 struct nixge_priv *priv = netdev_priv(_ndev);
640 struct net_device *ndev = _ndev;
641 unsigned int status;
642 u32 cr;
643
644 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
645 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
646 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
647 nixge_start_xmit_done(priv->ndev);
648 goto out;
649 }
650 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
651 netdev_err(ndev, "No interrupts asserted in Tx path\n");
652 return IRQ_NONE;
653 }
654 if (status & XAXIDMA_IRQ_ERROR_MASK) {
655 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
656 netdev_err(ndev, "Current BD is at: 0x%x\n",
657 (priv->tx_bd_v[priv->tx_bd_ci]).phys);
658
659 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
660
661 cr &= (~XAXIDMA_IRQ_ALL_MASK);
662
663 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
664
665 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
666
667 cr &= (~XAXIDMA_IRQ_ALL_MASK);
668
669 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
670
671 tasklet_schedule(&priv->dma_err_tasklet);
672 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
673 }
674out:
675 return IRQ_HANDLED;
676}
677
678static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
679{
680 struct nixge_priv *priv = netdev_priv(_ndev);
681 struct net_device *ndev = _ndev;
682 unsigned int status;
683 u32 cr;
684
685 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
686 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
687
688 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
689 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
690 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
691 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
692
693 if (napi_schedule_prep(&priv->napi))
694 __napi_schedule(&priv->napi);
695 goto out;
696 }
697 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
698 netdev_err(ndev, "No interrupts asserted in Rx path\n");
699 return IRQ_NONE;
700 }
701 if (status & XAXIDMA_IRQ_ERROR_MASK) {
702 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
703 netdev_err(ndev, "Current BD is at: 0x%x\n",
704 (priv->rx_bd_v[priv->rx_bd_ci]).phys);
705
706 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
707
708 cr &= (~XAXIDMA_IRQ_ALL_MASK);
709
710 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
711
712 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
713
714 cr &= (~XAXIDMA_IRQ_ALL_MASK);
715
716 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
717
718 tasklet_schedule(&priv->dma_err_tasklet);
719 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
720 }
721out:
722 return IRQ_HANDLED;
723}
724
725static void nixge_dma_err_handler(unsigned long data)
726{
727 struct nixge_priv *lp = (struct nixge_priv *)data;
728 struct nixge_hw_dma_bd *cur_p;
729 struct nixge_tx_skb *tx_skb;
730 u32 cr, i;
731
732 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
733 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
734
735 for (i = 0; i < TX_BD_NUM; i++) {
736 cur_p = &lp->tx_bd_v[i];
737 tx_skb = &lp->tx_skb[i];
738 nixge_tx_skb_unmap(lp, tx_skb);
739
740 cur_p->phys = 0;
741 cur_p->cntrl = 0;
742 cur_p->status = 0;
743 cur_p->app0 = 0;
744 cur_p->app1 = 0;
745 cur_p->app2 = 0;
746 cur_p->app3 = 0;
747 cur_p->app4 = 0;
748 cur_p->sw_id_offset = 0;
749 }
750
751 for (i = 0; i < RX_BD_NUM; i++) {
752 cur_p = &lp->rx_bd_v[i];
753 cur_p->status = 0;
754 cur_p->app0 = 0;
755 cur_p->app1 = 0;
756 cur_p->app2 = 0;
757 cur_p->app3 = 0;
758 cur_p->app4 = 0;
759 }
760
761 lp->tx_bd_ci = 0;
762 lp->tx_bd_tail = 0;
763 lp->rx_bd_ci = 0;
764
765
766 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
767
768 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
769 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
770
771 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
772 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
773
774 cr |= XAXIDMA_IRQ_ALL_MASK;
775
776 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
777
778
779 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
780
781 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
782 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
783
784 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
785 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
786
787 cr |= XAXIDMA_IRQ_ALL_MASK;
788
789 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
790
791
792
793
794 nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
795 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
796 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
797 cr | XAXIDMA_CR_RUNSTOP_MASK);
798 nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
799 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
800
801
802
803
804
805 nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
806 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
807 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
808 cr | XAXIDMA_CR_RUNSTOP_MASK);
809}
810
811static int nixge_open(struct net_device *ndev)
812{
813 struct nixge_priv *priv = netdev_priv(ndev);
814 struct phy_device *phy;
815 int ret;
816
817 nixge_device_reset(ndev);
818
819 phy = of_phy_connect(ndev, priv->phy_node,
820 &nixge_handle_link_change, 0, priv->phy_mode);
821 if (!phy)
822 return -ENODEV;
823
824 phy_start(phy);
825
826
827 tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
828 (unsigned long)priv);
829
830 napi_enable(&priv->napi);
831
832
833 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
834 if (ret)
835 goto err_tx_irq;
836
837 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
838 if (ret)
839 goto err_rx_irq;
840
841 netif_start_queue(ndev);
842
843 return 0;
844
845err_rx_irq:
846 free_irq(priv->tx_irq, ndev);
847err_tx_irq:
848 phy_stop(phy);
849 phy_disconnect(phy);
850 tasklet_kill(&priv->dma_err_tasklet);
851 netdev_err(ndev, "request_irq() failed\n");
852 return ret;
853}
854
855static int nixge_stop(struct net_device *ndev)
856{
857 struct nixge_priv *priv = netdev_priv(ndev);
858 u32 cr;
859
860 netif_stop_queue(ndev);
861 napi_disable(&priv->napi);
862
863 if (ndev->phydev) {
864 phy_stop(ndev->phydev);
865 phy_disconnect(ndev->phydev);
866 }
867
868 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
869 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
870 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
871 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
872 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
873 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
874
875 tasklet_kill(&priv->dma_err_tasklet);
876
877 free_irq(priv->tx_irq, ndev);
878 free_irq(priv->rx_irq, ndev);
879
880 nixge_hw_dma_bd_release(ndev);
881
882 return 0;
883}
884
885static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
886{
887 if (netif_running(ndev))
888 return -EBUSY;
889
890 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
891 NIXGE_MAX_JUMBO_FRAME_SIZE)
892 return -EINVAL;
893
894 ndev->mtu = new_mtu;
895
896 return 0;
897}
898
899static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
900{
901 struct nixge_priv *priv = netdev_priv(ndev);
902
903 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
904 (ndev->dev_addr[2]) << 24 |
905 (ndev->dev_addr[3] << 16) |
906 (ndev->dev_addr[4] << 8) |
907 (ndev->dev_addr[5] << 0));
908
909 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
910 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
911
912 return 0;
913}
914
915static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
916{
917 int err;
918
919 err = eth_mac_addr(ndev, p);
920 if (!err)
921 __nixge_hw_set_mac_address(ndev);
922
923 return err;
924}
925
926static const struct net_device_ops nixge_netdev_ops = {
927 .ndo_open = nixge_open,
928 .ndo_stop = nixge_stop,
929 .ndo_start_xmit = nixge_start_xmit,
930 .ndo_change_mtu = nixge_change_mtu,
931 .ndo_set_mac_address = nixge_net_set_mac_address,
932 .ndo_validate_addr = eth_validate_addr,
933};
934
935static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
936 struct ethtool_drvinfo *ed)
937{
938 strlcpy(ed->driver, "nixge", sizeof(ed->driver));
939 strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
940}
941
942static int nixge_ethtools_get_coalesce(struct net_device *ndev,
943 struct ethtool_coalesce *ecoalesce)
944{
945 struct nixge_priv *priv = netdev_priv(ndev);
946 u32 regval = 0;
947
948 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
949 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
950 >> XAXIDMA_COALESCE_SHIFT;
951 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
952 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
953 >> XAXIDMA_COALESCE_SHIFT;
954 return 0;
955}
956
957static int nixge_ethtools_set_coalesce(struct net_device *ndev,
958 struct ethtool_coalesce *ecoalesce)
959{
960 struct nixge_priv *priv = netdev_priv(ndev);
961
962 if (netif_running(ndev)) {
963 netdev_err(ndev,
964 "Please stop netif before applying configuration\n");
965 return -EBUSY;
966 }
967
968 if (ecoalesce->rx_coalesce_usecs ||
969 ecoalesce->rx_coalesce_usecs_irq ||
970 ecoalesce->rx_max_coalesced_frames_irq ||
971 ecoalesce->tx_coalesce_usecs ||
972 ecoalesce->tx_coalesce_usecs_irq ||
973 ecoalesce->tx_max_coalesced_frames_irq ||
974 ecoalesce->stats_block_coalesce_usecs ||
975 ecoalesce->use_adaptive_rx_coalesce ||
976 ecoalesce->use_adaptive_tx_coalesce ||
977 ecoalesce->pkt_rate_low ||
978 ecoalesce->rx_coalesce_usecs_low ||
979 ecoalesce->rx_max_coalesced_frames_low ||
980 ecoalesce->tx_coalesce_usecs_low ||
981 ecoalesce->tx_max_coalesced_frames_low ||
982 ecoalesce->pkt_rate_high ||
983 ecoalesce->rx_coalesce_usecs_high ||
984 ecoalesce->rx_max_coalesced_frames_high ||
985 ecoalesce->tx_coalesce_usecs_high ||
986 ecoalesce->tx_max_coalesced_frames_high ||
987 ecoalesce->rate_sample_interval)
988 return -EOPNOTSUPP;
989 if (ecoalesce->rx_max_coalesced_frames)
990 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
991 if (ecoalesce->tx_max_coalesced_frames)
992 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
993
994 return 0;
995}
996
997static int nixge_ethtools_set_phys_id(struct net_device *ndev,
998 enum ethtool_phys_id_state state)
999{
1000 struct nixge_priv *priv = netdev_priv(ndev);
1001 u32 ctrl;
1002
1003 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
1004 switch (state) {
1005 case ETHTOOL_ID_ACTIVE:
1006 ctrl |= NIXGE_ID_LED_CTL_EN;
1007
1008 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1009 return 2;
1010
1011 case ETHTOOL_ID_ON:
1012 ctrl |= NIXGE_ID_LED_CTL_VAL;
1013 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1014 break;
1015
1016 case ETHTOOL_ID_OFF:
1017 ctrl &= ~NIXGE_ID_LED_CTL_VAL;
1018 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1019 break;
1020
1021 case ETHTOOL_ID_INACTIVE:
1022
1023 ctrl &= ~NIXGE_ID_LED_CTL_EN;
1024 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1025 break;
1026 }
1027
1028 return 0;
1029}
1030
1031static const struct ethtool_ops nixge_ethtool_ops = {
1032 .get_drvinfo = nixge_ethtools_get_drvinfo,
1033 .get_coalesce = nixge_ethtools_get_coalesce,
1034 .set_coalesce = nixge_ethtools_set_coalesce,
1035 .set_phys_id = nixge_ethtools_set_phys_id,
1036 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1037 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1038 .get_link = ethtool_op_get_link,
1039};
1040
1041static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
1042{
1043 struct nixge_priv *priv = bus->priv;
1044 u32 status, tmp;
1045 int err;
1046 u16 device;
1047
1048 if (reg & MII_ADDR_C45) {
1049 device = (reg >> 16) & 0x1f;
1050
1051 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1052
1053 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1054 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1055
1056 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1057 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1058
1059 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1060 !status, 10, 1000);
1061 if (err) {
1062 dev_err(priv->dev, "timeout setting address");
1063 return err;
1064 }
1065
1066 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
1067 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1068 } else {
1069 device = reg & 0x1f;
1070
1071 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
1072 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1073 }
1074
1075 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1076 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1077
1078 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1079 !status, 10, 1000);
1080 if (err) {
1081 dev_err(priv->dev, "timeout setting read command");
1082 return err;
1083 }
1084
1085 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
1086
1087 return status;
1088}
1089
1090static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
1091{
1092 struct nixge_priv *priv = bus->priv;
1093 u32 status, tmp;
1094 u16 device;
1095 int err;
1096
1097 if (reg & MII_ADDR_C45) {
1098 device = (reg >> 16) & 0x1f;
1099
1100 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1101
1102 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1103 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1104
1105 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1106 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1107
1108 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1109 !status, 10, 1000);
1110 if (err) {
1111 dev_err(priv->dev, "timeout setting address");
1112 return err;
1113 }
1114
1115 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
1116 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1117
1118 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1119 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1120 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1121 !status, 10, 1000);
1122 if (err)
1123 dev_err(priv->dev, "timeout setting write command");
1124 } else {
1125 device = reg & 0x1f;
1126
1127 tmp = NIXGE_MDIO_CLAUSE22 |
1128 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
1129 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1130
1131 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1132 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1133 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1134
1135 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1136 !status, 10, 1000);
1137 if (err)
1138 dev_err(priv->dev, "timeout setting write command");
1139 }
1140
1141 return err;
1142}
1143
1144static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
1145{
1146 struct mii_bus *bus;
1147
1148 bus = devm_mdiobus_alloc(priv->dev);
1149 if (!bus)
1150 return -ENOMEM;
1151
1152 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
1153 bus->priv = priv;
1154 bus->name = "nixge_mii_bus";
1155 bus->read = nixge_mdio_read;
1156 bus->write = nixge_mdio_write;
1157 bus->parent = priv->dev;
1158
1159 priv->mii_bus = bus;
1160
1161 return of_mdiobus_register(bus, np);
1162}
1163
1164static void *nixge_get_nvmem_address(struct device *dev)
1165{
1166 struct nvmem_cell *cell;
1167 size_t cell_size;
1168 char *mac;
1169
1170 cell = nvmem_cell_get(dev, "address");
1171 if (IS_ERR(cell))
1172 return NULL;
1173
1174 mac = nvmem_cell_read(cell, &cell_size);
1175 nvmem_cell_put(cell);
1176
1177 return mac;
1178}
1179
1180static int nixge_probe(struct platform_device *pdev)
1181{
1182 struct nixge_priv *priv;
1183 struct net_device *ndev;
1184 struct resource *dmares;
1185 const u8 *mac_addr;
1186 int err;
1187
1188 ndev = alloc_etherdev(sizeof(*priv));
1189 if (!ndev)
1190 return -ENOMEM;
1191
1192 platform_set_drvdata(pdev, ndev);
1193 SET_NETDEV_DEV(ndev, &pdev->dev);
1194
1195 ndev->features = NETIF_F_SG;
1196 ndev->netdev_ops = &nixge_netdev_ops;
1197 ndev->ethtool_ops = &nixge_ethtool_ops;
1198
1199
1200 ndev->min_mtu = 64;
1201 ndev->max_mtu = NIXGE_JUMBO_MTU;
1202
1203 mac_addr = nixge_get_nvmem_address(&pdev->dev);
1204 if (mac_addr && is_valid_ether_addr(mac_addr)) {
1205 ether_addr_copy(ndev->dev_addr, mac_addr);
1206 kfree(mac_addr);
1207 } else {
1208 eth_hw_addr_random(ndev);
1209 }
1210
1211 priv = netdev_priv(ndev);
1212 priv->ndev = ndev;
1213 priv->dev = &pdev->dev;
1214
1215 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1216
1217 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1218 priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
1219 if (IS_ERR(priv->dma_regs)) {
1220 netdev_err(ndev, "failed to map dma regs\n");
1221 return PTR_ERR(priv->dma_regs);
1222 }
1223 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
1224 __nixge_hw_set_mac_address(ndev);
1225
1226 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1227 if (priv->tx_irq < 0) {
1228 netdev_err(ndev, "could not find 'tx' irq");
1229 return priv->tx_irq;
1230 }
1231
1232 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1233 if (priv->rx_irq < 0) {
1234 netdev_err(ndev, "could not find 'rx' irq");
1235 return priv->rx_irq;
1236 }
1237
1238 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1239 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1240
1241 err = nixge_mdio_setup(priv, pdev->dev.of_node);
1242 if (err) {
1243 netdev_err(ndev, "error registering mdio bus");
1244 goto free_netdev;
1245 }
1246
1247 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1248 if (priv->phy_mode < 0) {
1249 netdev_err(ndev, "not find \"phy-mode\" property\n");
1250 err = -EINVAL;
1251 goto unregister_mdio;
1252 }
1253
1254 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1255 if (!priv->phy_node) {
1256 netdev_err(ndev, "not find \"phy-handle\" property\n");
1257 err = -EINVAL;
1258 goto unregister_mdio;
1259 }
1260
1261 err = register_netdev(priv->ndev);
1262 if (err) {
1263 netdev_err(ndev, "register_netdev() error (%i)\n", err);
1264 goto unregister_mdio;
1265 }
1266
1267 return 0;
1268
1269unregister_mdio:
1270 mdiobus_unregister(priv->mii_bus);
1271
1272free_netdev:
1273 free_netdev(ndev);
1274
1275 return err;
1276}
1277
1278static int nixge_remove(struct platform_device *pdev)
1279{
1280 struct net_device *ndev = platform_get_drvdata(pdev);
1281 struct nixge_priv *priv = netdev_priv(ndev);
1282
1283 unregister_netdev(ndev);
1284
1285 mdiobus_unregister(priv->mii_bus);
1286
1287 free_netdev(ndev);
1288
1289 return 0;
1290}
1291
1292
1293static const struct of_device_id nixge_dt_ids[] = {
1294 { .compatible = "ni,xge-enet-2.00", },
1295 {},
1296};
1297MODULE_DEVICE_TABLE(of, nixge_dt_ids);
1298
1299static struct platform_driver nixge_driver = {
1300 .probe = nixge_probe,
1301 .remove = nixge_remove,
1302 .driver = {
1303 .name = "nixge",
1304 .of_match_table = of_match_ptr(nixge_dt_ids),
1305 },
1306};
1307module_platform_driver(nixge_driver);
1308
1309MODULE_LICENSE("GPL v2");
1310MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1311MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");
1312