1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/clk.h>
13#include <linux/crc32.h>
14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h>
16#include <linux/ethtool.h>
17#include <linux/if.h>
18#include <linux/if_ether.h>
19#include <linux/if_vlan.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ip.h>
23#include <linux/kernel.h>
24#include <linux/mii.h>
25#include <linux/module.h>
26#include <linux/net_tstamp.h>
27#include <linux/netdevice.h>
28#include <linux/phy.h>
29#include <linux/platform_device.h>
30#include <linux/prefetch.h>
31#include <linux/skbuff.h>
32#include <linux/slab.h>
33#include <linux/tcp.h>
34#include <linux/sxgbe_platform.h>
35
36#include "sxgbe_common.h"
37#include "sxgbe_desc.h"
38#include "sxgbe_dma.h"
39#include "sxgbe_mtl.h"
40#include "sxgbe_reg.h"
41
42#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
43#define JUMBO_LEN 9000
44
45
46#define TX_TIMEO 5000
47#define DMA_TX_SIZE 512
48#define DMA_RX_SIZE 1024
49#define TC_DEFAULT 64
50#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
51
52#define SXGBE_DEFAULT_LPI_TIMER 1000
53
54static int debug = -1;
55static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
56
57module_param(eee_timer, int, 0644);
58
59module_param(debug, int, 0644);
60static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
61 NETIF_MSG_LINK | NETIF_MSG_IFUP |
62 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
63
64static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
65static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
66static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
67
68#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
69
70#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
71
72
73
74
75
76
77static void sxgbe_verify_args(void)
78{
79 if (unlikely(eee_timer < 0))
80 eee_timer = SXGBE_DEFAULT_LPI_TIMER;
81}
82
83static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
84{
85
86 if (!priv->tx_path_in_lpi_mode)
87 priv->hw->mac->set_eee_mode(priv->ioaddr);
88}
89
90void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
91{
92
93 priv->hw->mac->reset_eee_mode(priv->ioaddr);
94 del_timer_sync(&priv->eee_ctrl_timer);
95 priv->tx_path_in_lpi_mode = false;
96}
97
98
99
100
101
102
103
104
105static void sxgbe_eee_ctrl_timer(struct timer_list *t)
106{
107 struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
108
109 sxgbe_enable_eee_mode(priv);
110 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
111}
112
113
114
115
116
117
118
119
120
121
122bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
123{
124 struct net_device *ndev = priv->dev;
125 bool ret = false;
126
127
128 if (priv->hw_cap.eee) {
129
130 if (phy_init_eee(ndev->phydev, 1))
131 return false;
132
133 priv->eee_active = 1;
134 timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
135 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
136 add_timer(&priv->eee_ctrl_timer);
137
138 priv->hw->mac->set_eee_timer(priv->ioaddr,
139 SXGBE_DEFAULT_LPI_TIMER,
140 priv->tx_lpi_timer);
141
142 pr_info("Energy-Efficient Ethernet initialized\n");
143
144 ret = true;
145 }
146
147 return ret;
148}
149
150static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
151{
152 struct net_device *ndev = priv->dev;
153
154
155
156
157
158 if (priv->eee_enabled)
159 priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
160}
161
162
163
164
165
166
167
168static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
169{
170 u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
171
172
173
174
175 if (clk_rate < SXGBE_CSR_F_150M)
176 priv->clk_csr = SXGBE_CSR_100_150M;
177 else if (clk_rate <= SXGBE_CSR_F_250M)
178 priv->clk_csr = SXGBE_CSR_150_250M;
179 else if (clk_rate <= SXGBE_CSR_F_300M)
180 priv->clk_csr = SXGBE_CSR_250_300M;
181 else if (clk_rate <= SXGBE_CSR_F_350M)
182 priv->clk_csr = SXGBE_CSR_300_350M;
183 else if (clk_rate <= SXGBE_CSR_F_400M)
184 priv->clk_csr = SXGBE_CSR_350_400M;
185 else if (clk_rate <= SXGBE_CSR_F_500M)
186 priv->clk_csr = SXGBE_CSR_400_500M;
187}
188
189
190#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
191
192static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
193{
194 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
195}
196
197
198
199
200
201
202static void sxgbe_adjust_link(struct net_device *dev)
203{
204 struct sxgbe_priv_data *priv = netdev_priv(dev);
205 struct phy_device *phydev = dev->phydev;
206 u8 new_state = 0;
207 u8 speed = 0xff;
208
209 if (!phydev)
210 return;
211
212
213
214
215
216 if (phydev->link) {
217 if (phydev->speed != priv->speed) {
218 new_state = 1;
219 switch (phydev->speed) {
220 case SPEED_10000:
221 speed = SXGBE_SPEED_10G;
222 break;
223 case SPEED_2500:
224 speed = SXGBE_SPEED_2_5G;
225 break;
226 case SPEED_1000:
227 speed = SXGBE_SPEED_1G;
228 break;
229 default:
230 netif_err(priv, link, dev,
231 "Speed (%d) not supported\n",
232 phydev->speed);
233 }
234
235 priv->speed = phydev->speed;
236 priv->hw->mac->set_speed(priv->ioaddr, speed);
237 }
238
239 if (!priv->oldlink) {
240 new_state = 1;
241 priv->oldlink = 1;
242 }
243 } else if (priv->oldlink) {
244 new_state = 1;
245 priv->oldlink = 0;
246 priv->speed = SPEED_UNKNOWN;
247 }
248
249 if (new_state & netif_msg_link(priv))
250 phy_print_status(phydev);
251
252
253 sxgbe_eee_adjust(priv);
254}
255
256
257
258
259
260
261
262
263
264static int sxgbe_init_phy(struct net_device *ndev)
265{
266 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
267 char bus_id[MII_BUS_ID_SIZE];
268 struct phy_device *phydev;
269 struct sxgbe_priv_data *priv = netdev_priv(ndev);
270 int phy_iface = priv->plat->interface;
271
272
273 priv->oldlink = 0;
274 priv->speed = SPEED_UNKNOWN;
275 priv->oldduplex = DUPLEX_UNKNOWN;
276
277 if (priv->plat->phy_bus_name)
278 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
279 priv->plat->phy_bus_name, priv->plat->bus_id);
280 else
281 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
282 priv->plat->bus_id);
283
284 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
285 priv->plat->phy_addr);
286 netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
287
288 phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
289
290 if (IS_ERR(phydev)) {
291 netdev_err(ndev, "Could not attach to PHY\n");
292 return PTR_ERR(phydev);
293 }
294
295
296 if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
297 (phy_iface == PHY_INTERFACE_MODE_RMII))
298 phy_set_max_speed(phydev, SPEED_1000);
299
300 if (phydev->phy_id == 0) {
301 phy_disconnect(phydev);
302 return -ENODEV;
303 }
304
305 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
306 __func__, phydev->phy_id, phydev->link);
307
308 return 0;
309}
310
311
312
313
314
315
316
317static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
318{
319 int i, j;
320 unsigned int txsize = priv->dma_tx_size;
321 unsigned int rxsize = priv->dma_rx_size;
322
323
324 for (j = 0; j < SXGBE_RX_QUEUES; j++) {
325 for (i = 0; i < rxsize; i++)
326 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
327 priv->use_riwt, priv->mode,
328 (i == rxsize - 1));
329 }
330
331 for (j = 0; j < SXGBE_TX_QUEUES; j++) {
332 for (i = 0; i < txsize; i++)
333 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
334 }
335}
336
337static int sxgbe_init_rx_buffers(struct net_device *dev,
338 struct sxgbe_rx_norm_desc *p, int i,
339 unsigned int dma_buf_sz,
340 struct sxgbe_rx_queue *rx_ring)
341{
342 struct sxgbe_priv_data *priv = netdev_priv(dev);
343 struct sk_buff *skb;
344
345 skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
346 if (!skb)
347 return -ENOMEM;
348
349 rx_ring->rx_skbuff[i] = skb;
350 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
351 dma_buf_sz, DMA_FROM_DEVICE);
352
353 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
354 netdev_err(dev, "%s: DMA mapping error\n", __func__);
355 dev_kfree_skb_any(skb);
356 return -EINVAL;
357 }
358
359 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
360
361 return 0;
362}
363
364
365
366
367
368
369
370
371
372
373
374static void sxgbe_free_rx_buffers(struct net_device *dev,
375 struct sxgbe_rx_norm_desc *p, int i,
376 unsigned int dma_buf_sz,
377 struct sxgbe_rx_queue *rx_ring)
378{
379 struct sxgbe_priv_data *priv = netdev_priv(dev);
380
381 kfree_skb(rx_ring->rx_skbuff[i]);
382 dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
383 dma_buf_sz, DMA_FROM_DEVICE);
384}
385
386
387
388
389
390
391
392
393
394static int init_tx_ring(struct device *dev, u8 queue_no,
395 struct sxgbe_tx_queue *tx_ring, int tx_rsize)
396{
397
398 if (!tx_ring) {
399 dev_err(dev, "No memory for TX queue of SXGBE\n");
400 return -ENOMEM;
401 }
402
403
404 tx_ring->dma_tx = dma_alloc_coherent(dev,
405 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
406 &tx_ring->dma_tx_phy, GFP_KERNEL);
407 if (!tx_ring->dma_tx)
408 return -ENOMEM;
409
410
411 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
412 sizeof(dma_addr_t), GFP_KERNEL);
413 if (!tx_ring->tx_skbuff_dma)
414 goto dmamem_err;
415
416 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
417 sizeof(struct sk_buff *), GFP_KERNEL);
418
419 if (!tx_ring->tx_skbuff)
420 goto dmamem_err;
421
422
423 tx_ring->queue_no = queue_no;
424
425
426 tx_ring->dirty_tx = 0;
427 tx_ring->cur_tx = 0;
428
429 return 0;
430
431dmamem_err:
432 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
433 tx_ring->dma_tx, tx_ring->dma_tx_phy);
434 return -ENOMEM;
435}
436
437
438
439
440
441
442
443
444static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
445 int rx_rsize)
446{
447 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
448 rx_ring->dma_rx, rx_ring->dma_rx_phy);
449 kfree(rx_ring->rx_skbuff_dma);
450 kfree(rx_ring->rx_skbuff);
451}
452
453
454
455
456
457
458
459
460
461static int init_rx_ring(struct net_device *dev, u8 queue_no,
462 struct sxgbe_rx_queue *rx_ring, int rx_rsize)
463{
464 struct sxgbe_priv_data *priv = netdev_priv(dev);
465 int desc_index;
466 unsigned int bfsize = 0;
467 unsigned int ret = 0;
468
469
470 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
471
472 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
473
474
475 if (rx_ring == NULL) {
476 netdev_err(dev, "No memory for RX queue\n");
477 return -ENOMEM;
478 }
479
480
481 rx_ring->queue_no = queue_no;
482
483
484 rx_ring->dma_rx = dma_alloc_coherent(priv->device,
485 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
486 &rx_ring->dma_rx_phy, GFP_KERNEL);
487
488 if (rx_ring->dma_rx == NULL)
489 return -ENOMEM;
490
491
492 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
493 sizeof(dma_addr_t), GFP_KERNEL);
494 if (!rx_ring->rx_skbuff_dma) {
495 ret = -ENOMEM;
496 goto err_free_dma_rx;
497 }
498
499 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
500 sizeof(struct sk_buff *), GFP_KERNEL);
501 if (!rx_ring->rx_skbuff) {
502 ret = -ENOMEM;
503 goto err_free_skbuff_dma;
504 }
505
506
507 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
508 struct sxgbe_rx_norm_desc *p;
509 p = rx_ring->dma_rx + desc_index;
510 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
511 bfsize, rx_ring);
512 if (ret)
513 goto err_free_rx_buffers;
514 }
515
516
517 rx_ring->cur_rx = 0;
518 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
519 priv->dma_buf_sz = bfsize;
520
521 return 0;
522
523err_free_rx_buffers:
524 while (--desc_index >= 0) {
525 struct sxgbe_rx_norm_desc *p;
526
527 p = rx_ring->dma_rx + desc_index;
528 sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
529 }
530 kfree(rx_ring->rx_skbuff);
531err_free_skbuff_dma:
532 kfree(rx_ring->rx_skbuff_dma);
533err_free_dma_rx:
534 dma_free_coherent(priv->device,
535 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
536 rx_ring->dma_rx, rx_ring->dma_rx_phy);
537
538 return ret;
539}
540
541
542
543
544
545
546
547static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
548 int tx_rsize)
549{
550 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
551 tx_ring->dma_tx, tx_ring->dma_tx_phy);
552}
553
554
555
556
557
558
559
560
561static int init_dma_desc_rings(struct net_device *netd)
562{
563 int queue_num, ret;
564 struct sxgbe_priv_data *priv = netdev_priv(netd);
565 int tx_rsize = priv->dma_tx_size;
566 int rx_rsize = priv->dma_rx_size;
567
568
569 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
570 ret = init_tx_ring(priv->device, queue_num,
571 priv->txq[queue_num], tx_rsize);
572 if (ret) {
573 dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
574 goto txalloc_err;
575 }
576
577
578
579
580 priv->txq[queue_num]->priv_ptr = priv;
581 }
582
583
584 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
585 ret = init_rx_ring(netd, queue_num,
586 priv->rxq[queue_num], rx_rsize);
587 if (ret) {
588 netdev_err(netd, "RX DMA ring allocation failed!!\n");
589 goto rxalloc_err;
590 }
591
592
593
594
595 priv->rxq[queue_num]->priv_ptr = priv;
596 }
597
598 sxgbe_clear_descriptors(priv);
599
600 return 0;
601
602txalloc_err:
603 while (queue_num--)
604 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
605 return ret;
606
607rxalloc_err:
608 while (queue_num--)
609 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
610 return ret;
611}
612
613static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
614{
615 int dma_desc;
616 struct sxgbe_priv_data *priv = txqueue->priv_ptr;
617 int tx_rsize = priv->dma_tx_size;
618
619 for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
620 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
621
622 if (txqueue->tx_skbuff_dma[dma_desc])
623 dma_unmap_single(priv->device,
624 txqueue->tx_skbuff_dma[dma_desc],
625 priv->hw->desc->get_tx_len(tdesc),
626 DMA_TO_DEVICE);
627
628 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
629 txqueue->tx_skbuff[dma_desc] = NULL;
630 txqueue->tx_skbuff_dma[dma_desc] = 0;
631 }
632}
633
634
635static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
636{
637 int queue_num;
638
639 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
640 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
641 tx_free_ring_skbufs(tqueue);
642 }
643}
644
645static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
646{
647 int queue_num;
648 int tx_rsize = priv->dma_tx_size;
649 int rx_rsize = priv->dma_rx_size;
650
651
652 dma_free_tx_skbufs(priv);
653
654
655 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
656 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
657 }
658
659
660 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
661 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
662 }
663}
664
665static int txring_mem_alloc(struct sxgbe_priv_data *priv)
666{
667 int queue_num;
668
669 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
670 priv->txq[queue_num] = devm_kmalloc(priv->device,
671 sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
672 if (!priv->txq[queue_num])
673 return -ENOMEM;
674 }
675
676 return 0;
677}
678
679static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
680{
681 int queue_num;
682
683 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
684 priv->rxq[queue_num] = devm_kmalloc(priv->device,
685 sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
686 if (!priv->rxq[queue_num])
687 return -ENOMEM;
688 }
689
690 return 0;
691}
692
693
694
695
696
697
698
699static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
700{
701 int queue_num;
702
703
704 if (likely(priv->plat->force_sf_dma_mode)) {
705
706 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
707 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
708 SXGBE_MTL_SFMODE);
709 priv->tx_tc = SXGBE_MTL_SFMODE;
710
711
712 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
713 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
714 SXGBE_MTL_SFMODE);
715 priv->rx_tc = SXGBE_MTL_SFMODE;
716 } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
717
718 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
719 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
720 priv->tx_tc);
721
722 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
723 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
724 priv->rx_tc);
725 } else {
726 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
727 }
728}
729
730
731
732
733
734
735static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
736{
737 struct sxgbe_priv_data *priv = tqueue->priv_ptr;
738 unsigned int tx_rsize = priv->dma_tx_size;
739 struct netdev_queue *dev_txq;
740 u8 queue_no = tqueue->queue_no;
741
742 dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
743
744 __netif_tx_lock(dev_txq, smp_processor_id());
745
746 priv->xstats.tx_clean++;
747 while (tqueue->dirty_tx != tqueue->cur_tx) {
748 unsigned int entry = tqueue->dirty_tx % tx_rsize;
749 struct sk_buff *skb = tqueue->tx_skbuff[entry];
750 struct sxgbe_tx_norm_desc *p;
751
752 p = tqueue->dma_tx + entry;
753
754
755 if (priv->hw->desc->get_tx_owner(p))
756 break;
757
758 if (netif_msg_tx_done(priv))
759 pr_debug("%s: curr %d, dirty %d\n",
760 __func__, tqueue->cur_tx, tqueue->dirty_tx);
761
762 if (likely(tqueue->tx_skbuff_dma[entry])) {
763 dma_unmap_single(priv->device,
764 tqueue->tx_skbuff_dma[entry],
765 priv->hw->desc->get_tx_len(p),
766 DMA_TO_DEVICE);
767 tqueue->tx_skbuff_dma[entry] = 0;
768 }
769
770 if (likely(skb)) {
771 dev_kfree_skb(skb);
772 tqueue->tx_skbuff[entry] = NULL;
773 }
774
775 priv->hw->desc->release_tx_desc(p);
776
777 tqueue->dirty_tx++;
778 }
779
780
781 if (unlikely(netif_tx_queue_stopped(dev_txq) &&
782 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
783 if (netif_msg_tx_done(priv))
784 pr_debug("%s: restart transmit\n", __func__);
785 netif_tx_wake_queue(dev_txq);
786 }
787
788 __netif_tx_unlock(dev_txq);
789}
790
791
792
793
794
795
796static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
797{
798 u8 queue_num;
799
800 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
801 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
802
803 sxgbe_tx_queue_clean(tqueue);
804 }
805
806 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
807 sxgbe_enable_eee_mode(priv);
808 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
809 }
810}
811
812
813
814
815
816
817
818
819static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
820{
821 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
822 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
823 queue_num);
824
825
826 netif_tx_stop_queue(dev_txq);
827
828
829 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
830
831
832 tx_free_ring_skbufs(tx_ring);
833
834
835 tx_ring->cur_tx = 0;
836 tx_ring->dirty_tx = 0;
837
838
839 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
840
841 priv->dev->stats.tx_errors++;
842
843
844 netif_tx_wake_queue(dev_txq);
845}
846
847
848
849
850
851
852
853static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
854{
855 int queue_num;
856
857
858
859
860 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
861 sxgbe_restart_tx_queue(priv, queue_num);
862}
863
864
865
866
867
868
869
870
871
872
873static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
874{
875 int rval = 0;
876 struct sxgbe_hw_features *features = &priv->hw_cap;
877
878
879 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
880 if (rval) {
881 features->pmt_remote_wake_up =
882 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
883 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
884 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
885 features->tx_csum_offload =
886 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
887 features->rx_csum_offload =
888 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
889 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
890 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
891 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
892 features->eee = SXGBE_HW_FEAT_EEE(rval);
893 }
894
895
896 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
897 if (rval) {
898 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
899 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
900 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
901 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
902 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
903 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
904 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
905 features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
906 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
907 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
908 }
909
910
911 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
912 if (rval) {
913 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
914 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
915 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
916 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
917 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
918 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
919 }
920
921 return rval;
922}
923
924
925
926
927
928
929
930
931static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
932{
933 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
934 priv->hw->mac->get_umac_addr((void __iomem *)
935 priv->ioaddr,
936 priv->dev->dev_addr, 0);
937 if (!is_valid_ether_addr(priv->dev->dev_addr))
938 eth_hw_addr_random(priv->dev);
939 }
940 dev_info(priv->device, "device MAC address %pM\n",
941 priv->dev->dev_addr);
942}
943
944
945
946
947
948
949
950
951
952static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
953{
954 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
955 int queue_num;
956
957 if (priv->plat->dma_cfg) {
958 pbl = priv->plat->dma_cfg->pbl;
959 fixed_burst = priv->plat->dma_cfg->fixed_burst;
960 burst_map = priv->plat->dma_cfg->burst_map;
961 }
962
963 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
964 priv->hw->dma->cha_init(priv->ioaddr, queue_num,
965 fixed_burst, pbl,
966 (priv->txq[queue_num])->dma_tx_phy,
967 (priv->rxq[queue_num])->dma_rx_phy,
968 priv->dma_tx_size, priv->dma_rx_size);
969
970 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
971}
972
973
974
975
976
977
978
979static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
980{
981 int queue_num;
982
983 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
984 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
985 priv->hw_cap.tx_mtl_qsize);
986 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
987 }
988}
989
990
991
992
993
994
995
996static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
997{
998 int queue_num;
999
1000 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
1001 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011static void sxgbe_tx_timer(struct timer_list *t)
1012{
1013 struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
1014 sxgbe_tx_queue_clean(p);
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1026{
1027 u8 queue_num;
1028
1029 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1030 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1031 p->tx_coal_frames = SXGBE_TX_FRAMES;
1032 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1033 timer_setup(&p->txtimer, sxgbe_tx_timer, 0);
1034 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1035 add_timer(&p->txtimer);
1036 }
1037}
1038
1039static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
1040{
1041 u8 queue_num;
1042
1043 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1044 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1045 del_timer_sync(&p->txtimer);
1046 }
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static int sxgbe_open(struct net_device *dev)
1059{
1060 struct sxgbe_priv_data *priv = netdev_priv(dev);
1061 int ret, queue_num;
1062
1063 clk_prepare_enable(priv->sxgbe_clk);
1064
1065 sxgbe_check_ether_addr(priv);
1066
1067
1068 ret = sxgbe_init_phy(dev);
1069 if (ret) {
1070 netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
1071 __func__, ret);
1072 goto phy_error;
1073 }
1074
1075
1076 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
1077 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
1078 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
1079 priv->tx_tc = TC_DEFAULT;
1080 priv->rx_tc = TC_DEFAULT;
1081 init_dma_desc_rings(dev);
1082
1083
1084 ret = sxgbe_init_dma_engine(priv);
1085 if (ret < 0) {
1086 netdev_err(dev, "%s: DMA initialization failed\n", __func__);
1087 goto init_error;
1088 }
1089
1090
1091 sxgbe_init_mtl_engine(priv);
1092
1093
1094 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1095
1096
1097 priv->hw->mac->core_init(priv->ioaddr);
1098 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1099 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1100 }
1101
1102
1103 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
1104 IRQF_SHARED, dev->name, dev);
1105 if (unlikely(ret < 0)) {
1106 netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1107 __func__, priv->irq, ret);
1108 goto init_error;
1109 }
1110
1111
1112
1113
1114 if (priv->lpi_irq != dev->irq) {
1115 ret = devm_request_irq(priv->device, priv->lpi_irq,
1116 sxgbe_common_interrupt,
1117 IRQF_SHARED, dev->name, dev);
1118 if (unlikely(ret < 0)) {
1119 netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1120 __func__, priv->lpi_irq, ret);
1121 goto init_error;
1122 }
1123 }
1124
1125
1126 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1127 ret = devm_request_irq(priv->device,
1128 (priv->txq[queue_num])->irq_no,
1129 sxgbe_tx_interrupt, 0,
1130 dev->name, priv->txq[queue_num]);
1131 if (unlikely(ret < 0)) {
1132 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1133 __func__, priv->irq, ret);
1134 goto init_error;
1135 }
1136 }
1137
1138
1139 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1140 ret = devm_request_irq(priv->device,
1141 (priv->rxq[queue_num])->irq_no,
1142 sxgbe_rx_interrupt, 0,
1143 dev->name, priv->rxq[queue_num]);
1144 if (unlikely(ret < 0)) {
1145 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1146 __func__, priv->irq, ret);
1147 goto init_error;
1148 }
1149 }
1150
1151
1152 priv->hw->mac->enable_tx(priv->ioaddr, true);
1153 priv->hw->mac->enable_rx(priv->ioaddr, true);
1154
1155
1156 sxgbe_mtl_operation_mode(priv);
1157
1158
1159 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1160
1161 priv->xstats.tx_threshold = priv->tx_tc;
1162 priv->xstats.rx_threshold = priv->rx_tc;
1163
1164
1165 netdev_dbg(dev, "DMA RX/TX processes started...\n");
1166 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1167 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1168
1169 if (dev->phydev)
1170 phy_start(dev->phydev);
1171
1172
1173 sxgbe_tx_init_coalesce(priv);
1174
1175 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1176 priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1177 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1178 }
1179
1180 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1181 priv->eee_enabled = sxgbe_eee_init(priv);
1182
1183 napi_enable(&priv->napi);
1184 netif_start_queue(dev);
1185
1186 return 0;
1187
1188init_error:
1189 free_dma_desc_resources(priv);
1190 if (dev->phydev)
1191 phy_disconnect(dev->phydev);
1192phy_error:
1193 clk_disable_unprepare(priv->sxgbe_clk);
1194
1195 return ret;
1196}
1197
1198
1199
1200
1201
1202
1203
1204static int sxgbe_release(struct net_device *dev)
1205{
1206 struct sxgbe_priv_data *priv = netdev_priv(dev);
1207
1208 if (priv->eee_enabled)
1209 del_timer_sync(&priv->eee_ctrl_timer);
1210
1211
1212 if (dev->phydev) {
1213 phy_stop(dev->phydev);
1214 phy_disconnect(dev->phydev);
1215 }
1216
1217 netif_tx_stop_all_queues(dev);
1218
1219 napi_disable(&priv->napi);
1220
1221
1222 sxgbe_tx_del_timer(priv);
1223
1224
1225 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1226 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1227
1228
1229 sxgbe_disable_mtl_engine(priv);
1230
1231
1232 free_dma_desc_resources(priv);
1233
1234
1235 priv->hw->mac->enable_tx(priv->ioaddr, false);
1236 priv->hw->mac->enable_rx(priv->ioaddr, false);
1237
1238 clk_disable_unprepare(priv->sxgbe_clk);
1239
1240 return 0;
1241}
1242
1243static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1244 struct sxgbe_tx_norm_desc *first_desc,
1245 struct sk_buff *skb)
1246{
1247 unsigned int total_hdr_len, tcp_hdr_len;
1248
1249
1250 tcp_hdr_len = tcp_hdrlen(skb);
1251 total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
1252
1253 first_desc->tdes01 = dma_map_single(priv->device, skb->data,
1254 total_hdr_len, DMA_TO_DEVICE);
1255 if (dma_mapping_error(priv->device, first_desc->tdes01))
1256 pr_err("%s: TX dma mapping failed!!\n", __func__);
1257
1258 first_desc->tdes23.tx_rd_des23.first_desc = 1;
1259 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
1260 tcp_hdr_len,
1261 skb->len - total_hdr_len);
1262}
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1273{
1274 unsigned int entry, frag_num;
1275 int cksum_flag = 0;
1276 struct netdev_queue *dev_txq;
1277 unsigned txq_index = skb_get_queue_mapping(skb);
1278 struct sxgbe_priv_data *priv = netdev_priv(dev);
1279 unsigned int tx_rsize = priv->dma_tx_size;
1280 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1281 struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1282 struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
1283 int nr_frags = skb_shinfo(skb)->nr_frags;
1284 int no_pagedlen = skb_headlen(skb);
1285 int is_jumbo = 0;
1286 u16 cur_mss = skb_shinfo(skb)->gso_size;
1287 u32 ctxt_desc_req = 0;
1288
1289
1290 dev_txq = netdev_get_tx_queue(dev, txq_index);
1291
1292 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1293 ctxt_desc_req = 1;
1294
1295 if (unlikely(skb_vlan_tag_present(skb) ||
1296 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1297 tqueue->hwts_tx_en)))
1298 ctxt_desc_req = 1;
1299
1300 if (priv->tx_path_in_lpi_mode)
1301 sxgbe_disable_eee_mode(priv);
1302
1303 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1304 if (!netif_tx_queue_stopped(dev_txq)) {
1305 netif_tx_stop_queue(dev_txq);
1306 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1307 __func__, txq_index);
1308 }
1309 return NETDEV_TX_BUSY;
1310 }
1311
1312 entry = tqueue->cur_tx % tx_rsize;
1313 tx_desc = tqueue->dma_tx + entry;
1314
1315 first_desc = tx_desc;
1316 if (ctxt_desc_req)
1317 ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
1318
1319
1320 tqueue->tx_skbuff[entry] = skb;
1321
1322 if (!is_jumbo) {
1323 if (likely(skb_is_gso(skb))) {
1324
1325 if (unlikely(tqueue->prev_mss != cur_mss)) {
1326 priv->hw->desc->tx_ctxt_desc_set_mss(
1327 ctxt_desc, cur_mss);
1328 priv->hw->desc->tx_ctxt_desc_set_tcmssv(
1329 ctxt_desc);
1330 priv->hw->desc->tx_ctxt_desc_reset_ostc(
1331 ctxt_desc);
1332 priv->hw->desc->tx_ctxt_desc_set_ctxt(
1333 ctxt_desc);
1334 priv->hw->desc->tx_ctxt_desc_set_owner(
1335 ctxt_desc);
1336
1337 entry = (++tqueue->cur_tx) % tx_rsize;
1338 first_desc = tqueue->dma_tx + entry;
1339
1340 tqueue->prev_mss = cur_mss;
1341 }
1342 sxgbe_tso_prepare(priv, first_desc, skb);
1343 } else {
1344 tx_desc->tdes01 = dma_map_single(priv->device,
1345 skb->data, no_pagedlen, DMA_TO_DEVICE);
1346 if (dma_mapping_error(priv->device, tx_desc->tdes01))
1347 netdev_err(dev, "%s: TX dma mapping failed!!\n",
1348 __func__);
1349
1350 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1351 no_pagedlen, cksum_flag);
1352 }
1353 }
1354
1355 for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1356 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1357 int len = skb_frag_size(frag);
1358
1359 entry = (++tqueue->cur_tx) % tx_rsize;
1360 tx_desc = tqueue->dma_tx + entry;
1361 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1362 DMA_TO_DEVICE);
1363
1364 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1365 tqueue->tx_skbuff[entry] = NULL;
1366
1367
1368 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1369 len, cksum_flag);
1370
1371 wmb();
1372
1373
1374 priv->hw->desc->set_tx_owner(tx_desc);
1375 }
1376
1377
1378 priv->hw->desc->close_tx_desc(tx_desc);
1379
1380
1381 wmb();
1382
1383 tqueue->tx_count_frames += nr_frags + 1;
1384 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1385 priv->hw->desc->clear_tx_ic(tx_desc);
1386 priv->xstats.tx_reset_ic_bit++;
1387 mod_timer(&tqueue->txtimer,
1388 SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1389 } else {
1390 tqueue->tx_count_frames = 0;
1391 }
1392
1393
1394 priv->hw->desc->set_tx_owner(first_desc);
1395
1396
1397 wmb();
1398
1399 tqueue->cur_tx++;
1400
1401
1402 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1403 __func__, tqueue->cur_tx % tx_rsize,
1404 tqueue->dirty_tx % tx_rsize, entry,
1405 first_desc, nr_frags);
1406
1407 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1408 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1409 __func__);
1410 netif_tx_stop_queue(dev_txq);
1411 }
1412
1413 dev->stats.tx_bytes += skb->len;
1414
1415 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1416 tqueue->hwts_tx_en)) {
1417
1418 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1419 priv->hw->desc->tx_enable_tstamp(first_desc);
1420 }
1421
1422 skb_tx_timestamp(skb);
1423
1424 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1425
1426 return NETDEV_TX_OK;
1427}
1428
1429
1430
1431
1432
1433
1434
1435static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1436{
1437 unsigned int rxsize = priv->dma_rx_size;
1438 int bfsize = priv->dma_buf_sz;
1439 u8 qnum = priv->cur_rx_qnum;
1440
1441 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1442 priv->rxq[qnum]->dirty_rx++) {
1443 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1444 struct sxgbe_rx_norm_desc *p;
1445
1446 p = priv->rxq[qnum]->dma_rx + entry;
1447
1448 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1449 struct sk_buff *skb;
1450
1451 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1452
1453 if (unlikely(skb == NULL))
1454 break;
1455
1456 priv->rxq[qnum]->rx_skbuff[entry] = skb;
1457 priv->rxq[qnum]->rx_skbuff_dma[entry] =
1458 dma_map_single(priv->device, skb->data, bfsize,
1459 DMA_FROM_DEVICE);
1460
1461 p->rdes23.rx_rd_des23.buf2_addr =
1462 priv->rxq[qnum]->rx_skbuff_dma[entry];
1463 }
1464
1465
1466 wmb();
1467 priv->hw->desc->set_rx_owner(p);
1468 priv->hw->desc->set_rx_int_on_com(p);
1469
1470 wmb();
1471 }
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1482{
1483 u8 qnum = priv->cur_rx_qnum;
1484 unsigned int rxsize = priv->dma_rx_size;
1485 unsigned int entry = priv->rxq[qnum]->cur_rx;
1486 unsigned int next_entry = 0;
1487 unsigned int count = 0;
1488 int checksum;
1489 int status;
1490
1491 while (count < limit) {
1492 struct sxgbe_rx_norm_desc *p;
1493 struct sk_buff *skb;
1494 int frame_len;
1495
1496 p = priv->rxq[qnum]->dma_rx + entry;
1497
1498 if (priv->hw->desc->get_rx_owner(p))
1499 break;
1500
1501 count++;
1502
1503 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1504 prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1505
1506
1507
1508
1509
1510 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
1511 &checksum);
1512 if (unlikely(status < 0)) {
1513 entry = next_entry;
1514 continue;
1515 }
1516 if (unlikely(!priv->rxcsum_insertion))
1517 checksum = CHECKSUM_NONE;
1518
1519 skb = priv->rxq[qnum]->rx_skbuff[entry];
1520
1521 if (unlikely(!skb))
1522 netdev_err(priv->dev, "rx descriptor is not consistent\n");
1523
1524 prefetch(skb->data - NET_IP_ALIGN);
1525 priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1526
1527 frame_len = priv->hw->desc->get_rx_frame_len(p);
1528
1529 skb_put(skb, frame_len);
1530
1531 skb->ip_summed = checksum;
1532 if (checksum == CHECKSUM_NONE)
1533 netif_receive_skb(skb);
1534 else
1535 napi_gro_receive(&priv->napi, skb);
1536
1537 entry = next_entry;
1538 }
1539
1540 sxgbe_rx_refill(priv);
1541
1542 return count;
1543}
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553static int sxgbe_poll(struct napi_struct *napi, int budget)
1554{
1555 struct sxgbe_priv_data *priv = container_of(napi,
1556 struct sxgbe_priv_data, napi);
1557 int work_done = 0;
1558 u8 qnum = priv->cur_rx_qnum;
1559
1560 priv->xstats.napi_poll++;
1561
1562 sxgbe_tx_all_clean(priv);
1563
1564 work_done = sxgbe_rx(priv, budget);
1565 if (work_done < budget) {
1566 napi_complete_done(napi, work_done);
1567 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1568 }
1569
1570 return work_done;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582static void sxgbe_tx_timeout(struct net_device *dev, unsigned int txqueue)
1583{
1584 struct sxgbe_priv_data *priv = netdev_priv(dev);
1585
1586 sxgbe_reset_all_tx_queues(priv);
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1598{
1599 struct net_device *netdev = (struct net_device *)dev_id;
1600 struct sxgbe_priv_data *priv = netdev_priv(netdev);
1601 int status;
1602
1603 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1604
1605 if (status & TX_ENTRY_LPI_MODE) {
1606 priv->xstats.tx_lpi_entry_n++;
1607 priv->tx_path_in_lpi_mode = true;
1608 }
1609 if (status & TX_EXIT_LPI_MODE) {
1610 priv->xstats.tx_lpi_exit_n++;
1611 priv->tx_path_in_lpi_mode = false;
1612 }
1613 if (status & RX_ENTRY_LPI_MODE)
1614 priv->xstats.rx_lpi_entry_n++;
1615 if (status & RX_EXIT_LPI_MODE)
1616 priv->xstats.rx_lpi_exit_n++;
1617
1618 return IRQ_HANDLED;
1619}
1620
1621
1622
1623
1624
1625
1626
1627static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1628{
1629 int status;
1630 struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1631 struct sxgbe_priv_data *priv = txq->priv_ptr;
1632
1633
1634 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1635 &priv->xstats);
1636
1637 if (likely((status & handle_tx)))
1638 napi_schedule(&priv->napi);
1639
1640
1641 if (unlikely((status & tx_hard_error)))
1642 sxgbe_restart_tx_queue(priv, txq->queue_no);
1643
1644
1645 if (unlikely((status & tx_bump_tc) &&
1646 (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1647 (priv->tx_tc < 512))) {
1648
1649 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1650 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1651 txq->queue_no, priv->tx_tc);
1652 priv->xstats.tx_threshold = priv->tx_tc;
1653 }
1654
1655 return IRQ_HANDLED;
1656}
1657
1658
1659
1660
1661
1662
1663
1664static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1665{
1666 int status;
1667 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1668 struct sxgbe_priv_data *priv = rxq->priv_ptr;
1669
1670
1671 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1672 &priv->xstats);
1673
1674 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1675 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1676 __napi_schedule(&priv->napi);
1677 }
1678
1679
1680 if (unlikely((status & rx_bump_tc) &&
1681 (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1682 (priv->rx_tc < 128))) {
1683
1684 priv->rx_tc += 32;
1685 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1686 rxq->queue_no, priv->rx_tc);
1687 priv->xstats.rx_threshold = priv->rx_tc;
1688 }
1689
1690 return IRQ_HANDLED;
1691}
1692
1693static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1694{
1695 u64 val = readl(ioaddr + reg_lo);
1696
1697 val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1698
1699 return val;
1700}
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711static void sxgbe_get_stats64(struct net_device *dev,
1712 struct rtnl_link_stats64 *stats)
1713{
1714 struct sxgbe_priv_data *priv = netdev_priv(dev);
1715 void __iomem *ioaddr = priv->ioaddr;
1716 u64 count;
1717
1718 spin_lock(&priv->stats_lock);
1719
1720
1721
1722 writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1723
1724 stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1725 SXGBE_MMC_RXOCTETLO_GCNT_REG,
1726 SXGBE_MMC_RXOCTETHI_GCNT_REG);
1727
1728 stats->rx_packets = sxgbe_get_stat64(ioaddr,
1729 SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1730 SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1731
1732 stats->multicast = sxgbe_get_stat64(ioaddr,
1733 SXGBE_MMC_RXMULTILO_GCNT_REG,
1734 SXGBE_MMC_RXMULTIHI_GCNT_REG);
1735
1736 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1737 SXGBE_MMC_RXCRCERRLO_REG,
1738 SXGBE_MMC_RXCRCERRHI_REG);
1739
1740 stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1741 SXGBE_MMC_RXLENERRLO_REG,
1742 SXGBE_MMC_RXLENERRHI_REG);
1743
1744 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1745 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1746 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1747
1748 stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1749 SXGBE_MMC_TXOCTETLO_GCNT_REG,
1750 SXGBE_MMC_TXOCTETHI_GCNT_REG);
1751
1752 count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1753 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1754
1755 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1756 SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1757 stats->tx_errors = count - stats->tx_errors;
1758 stats->tx_packets = count;
1759 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1760 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1761 writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1762 spin_unlock(&priv->stats_lock);
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774static int sxgbe_set_features(struct net_device *dev,
1775 netdev_features_t features)
1776{
1777 struct sxgbe_priv_data *priv = netdev_priv(dev);
1778 netdev_features_t changed = dev->features ^ features;
1779
1780 if (changed & NETIF_F_RXCSUM) {
1781 if (features & NETIF_F_RXCSUM) {
1782 priv->hw->mac->enable_rx_csum(priv->ioaddr);
1783 priv->rxcsum_insertion = true;
1784 } else {
1785 priv->hw->mac->disable_rx_csum(priv->ioaddr);
1786 priv->rxcsum_insertion = false;
1787 }
1788 }
1789
1790 return 0;
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1804{
1805 dev->mtu = new_mtu;
1806
1807 if (!netif_running(dev))
1808 return 0;
1809
1810
1811
1812
1813
1814 sxgbe_release(dev);
1815 return sxgbe_open(dev);
1816}
1817
1818static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1819 unsigned int reg_n)
1820{
1821 unsigned long data;
1822
1823 data = (addr[5] << 8) | addr[4];
1824
1825
1826
1827
1828 writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1829 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1830 writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static void sxgbe_set_rx_mode(struct net_device *dev)
1845{
1846 struct sxgbe_priv_data *priv = netdev_priv(dev);
1847 void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1848 unsigned int value = 0;
1849 u32 mc_filter[2];
1850 struct netdev_hw_addr *ha;
1851 int reg = 1;
1852
1853 netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1854 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1855
1856 if (dev->flags & IFF_PROMISC) {
1857 value = SXGBE_FRAME_FILTER_PR;
1858
1859 } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1860 (dev->flags & IFF_ALLMULTI)) {
1861 value = SXGBE_FRAME_FILTER_PM;
1862 writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1863 writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1864
1865 } else if (!netdev_mc_empty(dev)) {
1866
1867 value = SXGBE_FRAME_FILTER_HMC;
1868
1869 memset(mc_filter, 0, sizeof(mc_filter));
1870 netdev_for_each_mc_addr(ha, dev) {
1871
1872
1873
1874 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1875
1876
1877
1878
1879
1880 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1881 }
1882 writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1883 writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1884 }
1885
1886
1887 if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1888
1889
1890
1891 value |= SXGBE_FRAME_FILTER_PR;
1892 else {
1893 netdev_for_each_uc_addr(ha, dev) {
1894 sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1895 reg++;
1896 }
1897 }
1898#ifdef FRAME_FILTER_DEBUG
1899
1900 value |= SXGBE_FRAME_FILTER_RA;
1901#endif
1902 writel(value, ioaddr + SXGBE_FRAME_FILTER);
1903
1904 netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1905 readl(ioaddr + SXGBE_FRAME_FILTER),
1906 readl(ioaddr + SXGBE_HASH_HIGH),
1907 readl(ioaddr + SXGBE_HASH_LOW));
1908}
1909
1910#ifdef CONFIG_NET_POLL_CONTROLLER
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static void sxgbe_poll_controller(struct net_device *dev)
1921{
1922 struct sxgbe_priv_data *priv = netdev_priv(dev);
1923
1924 disable_irq(priv->irq);
1925 sxgbe_rx_interrupt(priv->irq, dev);
1926 enable_irq(priv->irq);
1927}
1928#endif
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1939{
1940 int ret = -EOPNOTSUPP;
1941
1942 if (!netif_running(dev))
1943 return -EINVAL;
1944
1945 switch (cmd) {
1946 case SIOCGMIIPHY:
1947 case SIOCGMIIREG:
1948 case SIOCSMIIREG:
1949 ret = phy_do_ioctl(dev, rq, cmd);
1950 break;
1951 default:
1952 break;
1953 }
1954
1955 return ret;
1956}
1957
1958static const struct net_device_ops sxgbe_netdev_ops = {
1959 .ndo_open = sxgbe_open,
1960 .ndo_start_xmit = sxgbe_xmit,
1961 .ndo_stop = sxgbe_release,
1962 .ndo_get_stats64 = sxgbe_get_stats64,
1963 .ndo_change_mtu = sxgbe_change_mtu,
1964 .ndo_set_features = sxgbe_set_features,
1965 .ndo_set_rx_mode = sxgbe_set_rx_mode,
1966 .ndo_tx_timeout = sxgbe_tx_timeout,
1967 .ndo_eth_ioctl = sxgbe_ioctl,
1968#ifdef CONFIG_NET_POLL_CONTROLLER
1969 .ndo_poll_controller = sxgbe_poll_controller,
1970#endif
1971 .ndo_set_mac_address = eth_mac_addr,
1972};
1973
1974
1975static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
1976{
1977 ops_ptr->mac = sxgbe_get_core_ops();
1978 ops_ptr->desc = sxgbe_get_desc_ops();
1979 ops_ptr->dma = sxgbe_get_dma_ops();
1980 ops_ptr->mtl = sxgbe_get_mtl_ops();
1981
1982
1983 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
1984 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
1985
1986
1987
1988
1989
1990 ops_ptr->link.port = 0;
1991 ops_ptr->link.duplex = 0;
1992 ops_ptr->link.speed = SXGBE_SPEED_10G;
1993}
1994
1995
1996
1997
1998
1999
2000
2001static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2002{
2003 u32 ctrl_ids;
2004
2005 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
2006 if(!priv->hw)
2007 return -ENOMEM;
2008
2009
2010 sxgbe_get_ops(priv->hw);
2011
2012
2013 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
2014 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
2015 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
2016 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2017 priv->hw->ctrl_uid, priv->hw->ctrl_id);
2018
2019
2020 if (!sxgbe_get_hw_features(priv))
2021 pr_info("Hardware features not found\n");
2022
2023 if (priv->hw_cap.tx_csum_offload)
2024 pr_info("TX Checksum offload supported\n");
2025
2026 if (priv->hw_cap.rx_csum_offload)
2027 pr_info("RX Checksum offload supported\n");
2028
2029 return 0;
2030}
2031
2032static int sxgbe_sw_reset(void __iomem *addr)
2033{
2034 int retry_count = 10;
2035
2036 writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2037 while (retry_count--) {
2038 if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2039 SXGBE_DMA_SOFT_RESET))
2040 break;
2041 mdelay(10);
2042 }
2043
2044 if (retry_count < 0)
2045 return -EBUSY;
2046
2047 return 0;
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2059 struct sxgbe_plat_data *plat_dat,
2060 void __iomem *addr)
2061{
2062 struct sxgbe_priv_data *priv;
2063 struct net_device *ndev;
2064 int ret;
2065 u8 queue_num;
2066
2067 ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
2068 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
2069 if (!ndev)
2070 return NULL;
2071
2072 SET_NETDEV_DEV(ndev, device);
2073
2074 priv = netdev_priv(ndev);
2075 priv->device = device;
2076 priv->dev = ndev;
2077
2078 sxgbe_set_ethtool_ops(ndev);
2079 priv->plat = plat_dat;
2080 priv->ioaddr = addr;
2081
2082 ret = sxgbe_sw_reset(priv->ioaddr);
2083 if (ret)
2084 goto error_free_netdev;
2085
2086
2087 sxgbe_verify_args();
2088
2089
2090 ret = sxgbe_hw_init(priv);
2091 if (ret)
2092 goto error_free_netdev;
2093
2094
2095 ret = txring_mem_alloc(priv);
2096 if (ret)
2097 goto error_free_hw;
2098
2099 ret = rxring_mem_alloc(priv);
2100 if (ret)
2101 goto error_free_hw;
2102
2103 ndev->netdev_ops = &sxgbe_netdev_ops;
2104
2105 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2106 NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
2107 NETIF_F_GRO;
2108 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2109 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
2110
2111
2112 ndev->priv_flags |= IFF_UNICAST_FLT;
2113
2114
2115 ndev->min_mtu = MIN_MTU;
2116 ndev->max_mtu = MAX_MTU;
2117
2118 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2119
2120
2121 if (priv->hw_cap.tcpseg_offload) {
2122 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
2123 priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
2124 }
2125 }
2126
2127
2128 if (priv->hw_cap.rx_csum_offload) {
2129 priv->hw->mac->enable_rx_csum(priv->ioaddr);
2130 priv->rxcsum_insertion = true;
2131 }
2132
2133
2134 priv->rx_pause = 1;
2135 priv->tx_pause = 1;
2136
2137
2138 if (!priv->plat->riwt_off) {
2139 priv->use_riwt = 1;
2140 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2141 }
2142
2143 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
2144
2145 spin_lock_init(&priv->stats_lock);
2146
2147 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
2148 if (IS_ERR(priv->sxgbe_clk)) {
2149 netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
2150 __func__);
2151 goto error_napi_del;
2152 }
2153
2154
2155
2156
2157
2158
2159
2160 if (!priv->plat->clk_csr)
2161 sxgbe_clk_csr_set(priv);
2162 else
2163 priv->clk_csr = priv->plat->clk_csr;
2164
2165
2166 ret = sxgbe_mdio_register(ndev);
2167 if (ret < 0) {
2168 netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
2169 __func__, priv->plat->bus_id);
2170 goto error_clk_put;
2171 }
2172
2173 ret = register_netdev(ndev);
2174 if (ret) {
2175 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2176 goto error_mdio_unregister;
2177 }
2178
2179 sxgbe_check_ether_addr(priv);
2180
2181 return priv;
2182
2183error_mdio_unregister:
2184 sxgbe_mdio_unregister(ndev);
2185error_clk_put:
2186 clk_put(priv->sxgbe_clk);
2187error_napi_del:
2188 netif_napi_del(&priv->napi);
2189error_free_hw:
2190 kfree(priv->hw);
2191error_free_netdev:
2192 free_netdev(ndev);
2193
2194 return NULL;
2195}
2196
2197
2198
2199
2200
2201
2202
2203int sxgbe_drv_remove(struct net_device *ndev)
2204{
2205 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2206 u8 queue_num;
2207
2208 netdev_info(ndev, "%s: removing driver\n", __func__);
2209
2210 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2211 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2212 }
2213
2214 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2215 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2216
2217 priv->hw->mac->enable_tx(priv->ioaddr, false);
2218 priv->hw->mac->enable_rx(priv->ioaddr, false);
2219
2220 unregister_netdev(ndev);
2221
2222 sxgbe_mdio_unregister(ndev);
2223
2224 clk_put(priv->sxgbe_clk);
2225
2226 netif_napi_del(&priv->napi);
2227
2228 kfree(priv->hw);
2229
2230 free_netdev(ndev);
2231
2232 return 0;
2233}
2234
2235#ifdef CONFIG_PM
2236int sxgbe_suspend(struct net_device *ndev)
2237{
2238 return 0;
2239}
2240
2241int sxgbe_resume(struct net_device *ndev)
2242{
2243 return 0;
2244}
2245
2246int sxgbe_freeze(struct net_device *ndev)
2247{
2248 return -ENOSYS;
2249}
2250
2251int sxgbe_restore(struct net_device *ndev)
2252{
2253 return -ENOSYS;
2254}
2255#endif
2256
2257
2258static int __init sxgbe_init(void)
2259{
2260 int ret;
2261
2262 ret = sxgbe_register_platform();
2263 if (ret)
2264 goto err;
2265 return 0;
2266err:
2267 pr_err("driver registration failed\n");
2268 return ret;
2269}
2270
2271static void __exit sxgbe_exit(void)
2272{
2273 sxgbe_unregister_platform();
2274}
2275
2276module_init(sxgbe_init);
2277module_exit(sxgbe_exit);
2278
2279#ifndef MODULE
2280static int __init sxgbe_cmdline_opt(char *str)
2281{
2282 char *opt;
2283
2284 if (!str || !*str)
2285 return -EINVAL;
2286 while ((opt = strsep(&str, ",")) != NULL) {
2287 if (!strncmp(opt, "eee_timer:", 10)) {
2288 if (kstrtoint(opt + 10, 0, &eee_timer))
2289 goto err;
2290 }
2291 }
2292 return 0;
2293
2294err:
2295 pr_err("%s: ERROR broken module parameter conversion\n", __func__);
2296 return -EINVAL;
2297}
2298
2299__setup("sxgbeeth=", sxgbe_cmdline_opt);
2300#endif
2301
2302
2303
2304MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
2305
2306MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2307MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
2308
2309MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2310MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2311MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2312MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2313
2314MODULE_LICENSE("GPL");
2315