1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/platform_device.h>
35#include <linux/etherdevice.h>
36#include <linux/of_net.h>
37#include <linux/interrupt.h>
38#include <linux/msi.h>
39#include <linux/kthread.h>
40
41#include "../../fsl-mc/include/mc.h"
42#include "../../fsl-mc/include/mc-sys.h"
43#include "dpaa2-eth.h"
44
45
46
47
48#define CREATE_TRACE_POINTS
49#include "dpaa2-eth-trace.h"
50
51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_AUTHOR("Freescale Semiconductor, Inc");
53MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
54
55const char dpaa2_eth_drv_version[] = "0.1";
56
57static void validate_rx_csum(struct dpaa2_eth_priv *priv,
58 u32 fd_status,
59 struct sk_buff *skb)
60{
61 skb_checksum_none_assert(skb);
62
63
64 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
65 return;
66
67
68 if (!((fd_status & DPAA2_FAS_L3CV) &&
69 (fd_status & DPAA2_FAS_L4CV)))
70 return;
71
72
73 skb->ip_summed = CHECKSUM_UNNECESSARY;
74}
75
76
77
78
79static void free_rx_fd(struct dpaa2_eth_priv *priv,
80 const struct dpaa2_fd *fd,
81 void *vaddr)
82{
83 struct device *dev = priv->net_dev->dev.parent;
84 dma_addr_t addr = dpaa2_fd_get_addr(fd);
85 u8 fd_format = dpaa2_fd_get_format(fd);
86 struct dpaa2_sg_entry *sgt;
87 void *sg_vaddr;
88 int i;
89
90
91 if (fd_format == dpaa2_fd_single)
92 goto free_buf;
93 else if (fd_format != dpaa2_fd_sg)
94
95 return;
96
97
98 sgt = vaddr + dpaa2_fd_get_offset(fd);
99 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
100 addr = dpaa2_sg_get_addr(&sgt[i]);
101 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
102 DMA_FROM_DEVICE);
103
104 sg_vaddr = phys_to_virt(addr);
105 skb_free_frag(sg_vaddr);
106
107 if (dpaa2_sg_is_final(&sgt[i]))
108 break;
109 }
110
111free_buf:
112 skb_free_frag(vaddr);
113}
114
115
116static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
117 struct dpaa2_eth_channel *ch,
118 const struct dpaa2_fd *fd,
119 void *fd_vaddr)
120{
121 struct sk_buff *skb = NULL;
122 u16 fd_offset = dpaa2_fd_get_offset(fd);
123 u32 fd_length = dpaa2_fd_get_len(fd);
124
125 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
126 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
127 if (unlikely(!skb))
128 return NULL;
129
130 skb_reserve(skb, fd_offset);
131 skb_put(skb, fd_length);
132
133 ch->buf_count--;
134
135 return skb;
136}
137
138
139static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
140 struct dpaa2_eth_channel *ch,
141 struct dpaa2_sg_entry *sgt)
142{
143 struct sk_buff *skb = NULL;
144 struct device *dev = priv->net_dev->dev.parent;
145 void *sg_vaddr;
146 dma_addr_t sg_addr;
147 u16 sg_offset;
148 u32 sg_length;
149 struct page *page, *head_page;
150 int page_offset;
151 int i;
152
153 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
154 struct dpaa2_sg_entry *sge = &sgt[i];
155
156
157
158
159
160
161 sg_addr = dpaa2_sg_get_addr(sge);
162 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
163 DMA_FROM_DEVICE);
164
165 sg_vaddr = phys_to_virt(sg_addr);
166 sg_length = dpaa2_sg_get_len(sge);
167
168 if (i == 0) {
169
170 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
171 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
172 if (unlikely(!skb))
173 return NULL;
174
175 sg_offset = dpaa2_sg_get_offset(sge);
176 skb_reserve(skb, sg_offset);
177 skb_put(skb, sg_length);
178 } else {
179
180 page = virt_to_page(sg_vaddr);
181 head_page = virt_to_head_page(sg_vaddr);
182
183
184
185
186
187
188 page_offset = ((unsigned long)sg_vaddr &
189 (PAGE_SIZE - 1)) +
190 (page_address(page) - page_address(head_page));
191
192 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
193 sg_length, DPAA2_ETH_RX_BUF_SIZE);
194 }
195
196 if (dpaa2_sg_is_final(sge))
197 break;
198 }
199
200
201 ch->buf_count -= i + 2;
202
203 return skb;
204}
205
206
207static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
208 struct dpaa2_eth_channel *ch,
209 const struct dpaa2_fd *fd,
210 struct napi_struct *napi)
211{
212 dma_addr_t addr = dpaa2_fd_get_addr(fd);
213 u8 fd_format = dpaa2_fd_get_format(fd);
214 void *vaddr;
215 struct sk_buff *skb;
216 struct rtnl_link_stats64 *percpu_stats;
217 struct dpaa2_eth_drv_stats *percpu_extras;
218 struct device *dev = priv->net_dev->dev.parent;
219 struct dpaa2_fas *fas;
220 u32 status = 0;
221
222
223 trace_dpaa2_rx_fd(priv->net_dev, fd);
224
225 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
226 vaddr = phys_to_virt(addr);
227
228 prefetch(vaddr + priv->buf_layout.private_data_size);
229 prefetch(vaddr + dpaa2_fd_get_offset(fd));
230
231 percpu_stats = this_cpu_ptr(priv->percpu_stats);
232 percpu_extras = this_cpu_ptr(priv->percpu_extras);
233
234 if (fd_format == dpaa2_fd_single) {
235 skb = build_linear_skb(priv, ch, fd, vaddr);
236 } else if (fd_format == dpaa2_fd_sg) {
237 struct dpaa2_sg_entry *sgt =
238 vaddr + dpaa2_fd_get_offset(fd);
239 skb = build_frag_skb(priv, ch, sgt);
240 skb_free_frag(vaddr);
241 percpu_extras->rx_sg_frames++;
242 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
243 } else {
244
245 goto err_frame_format;
246 }
247
248 if (unlikely(!skb))
249 goto err_build_skb;
250
251 prefetch(skb->data);
252
253
254 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
255 fas = (struct dpaa2_fas *)
256 (vaddr + priv->buf_layout.private_data_size);
257 status = le32_to_cpu(fas->status);
258 validate_rx_csum(priv, status, skb);
259 }
260
261 skb->protocol = eth_type_trans(skb, priv->net_dev);
262
263 percpu_stats->rx_packets++;
264 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
265
266 if (priv->net_dev->features & NETIF_F_GRO)
267 napi_gro_receive(napi, skb);
268 else
269 netif_receive_skb(skb);
270
271 return;
272
273err_build_skb:
274 free_rx_fd(priv, fd, vaddr);
275err_frame_format:
276 percpu_stats->rx_dropped++;
277}
278
279
280
281
282
283
284
285static int consume_frames(struct dpaa2_eth_channel *ch)
286{
287 struct dpaa2_eth_priv *priv = ch->priv;
288 struct dpaa2_eth_fq *fq;
289 struct dpaa2_dq *dq;
290 const struct dpaa2_fd *fd;
291 int cleaned = 0;
292 int is_last;
293
294 do {
295 dq = dpaa2_io_store_next(ch->store, &is_last);
296 if (unlikely(!dq)) {
297
298
299
300
301
302 continue;
303 }
304
305 fd = dpaa2_dq_fd(dq);
306 fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
307 fq->stats.frames++;
308
309 fq->consume(priv, ch, fd, &ch->napi);
310 cleaned++;
311 } while (!is_last);
312
313 return cleaned;
314}
315
316
317static int build_sg_fd(struct dpaa2_eth_priv *priv,
318 struct sk_buff *skb,
319 struct dpaa2_fd *fd)
320{
321 struct device *dev = priv->net_dev->dev.parent;
322 void *sgt_buf = NULL;
323 void *hwa;
324 dma_addr_t addr;
325 int nr_frags = skb_shinfo(skb)->nr_frags;
326 struct dpaa2_sg_entry *sgt;
327 int i, err;
328 int sgt_buf_size;
329 struct scatterlist *scl, *crt_scl;
330 int num_sg;
331 int num_dma_bufs;
332 struct dpaa2_eth_swa *swa;
333
334
335
336
337
338
339 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
340 return -EINVAL;
341
342 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
343 if (unlikely(!scl))
344 return -ENOMEM;
345
346 sg_init_table(scl, nr_frags + 1);
347 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
348 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
349 if (unlikely(!num_dma_bufs)) {
350 err = -ENOMEM;
351 goto dma_map_sg_failed;
352 }
353
354
355 sgt_buf_size = priv->tx_data_offset +
356 sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
357 sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
358 if (unlikely(!sgt_buf)) {
359 err = -ENOMEM;
360 goto sgt_buf_alloc_failed;
361 }
362 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
363
364
365
366
367
368
369 hwa = sgt_buf + priv->buf_layout.private_data_size;
370 memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
371
372 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
373
374
375
376
377
378
379
380
381 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
382 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
383 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
384 }
385 dpaa2_sg_set_final(&sgt[i - 1], true);
386
387
388
389
390
391
392 swa = (struct dpaa2_eth_swa *)sgt_buf;
393 swa->skb = skb;
394 swa->scl = scl;
395 swa->num_sg = num_sg;
396 swa->num_dma_bufs = num_dma_bufs;
397
398
399 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
400 if (unlikely(dma_mapping_error(dev, addr))) {
401 err = -ENOMEM;
402 goto dma_map_single_failed;
403 }
404 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
405 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
406 dpaa2_fd_set_addr(fd, addr);
407 dpaa2_fd_set_len(fd, skb->len);
408 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
409 DPAA2_FD_CTRL_PTV1);
410
411 return 0;
412
413dma_map_single_failed:
414 kfree(sgt_buf);
415sgt_buf_alloc_failed:
416 dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
417dma_map_sg_failed:
418 kfree(scl);
419 return err;
420}
421
422
423static int build_single_fd(struct dpaa2_eth_priv *priv,
424 struct sk_buff *skb,
425 struct dpaa2_fd *fd)
426{
427 struct device *dev = priv->net_dev->dev.parent;
428 u8 *buffer_start;
429 void *hwa;
430 struct sk_buff **skbh;
431 dma_addr_t addr;
432
433 buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
434 DPAA2_ETH_TX_BUF_ALIGN,
435 DPAA2_ETH_TX_BUF_ALIGN);
436
437
438
439
440
441
442 hwa = buffer_start + priv->buf_layout.private_data_size;
443 memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
444
445
446
447
448
449 skbh = (struct sk_buff **)buffer_start;
450 *skbh = skb;
451
452 addr = dma_map_single(dev, buffer_start,
453 skb_tail_pointer(skb) - buffer_start,
454 DMA_TO_DEVICE);
455 if (unlikely(dma_mapping_error(dev, addr)))
456 return -ENOMEM;
457
458 dpaa2_fd_set_addr(fd, addr);
459 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
460 dpaa2_fd_set_len(fd, skb->len);
461 dpaa2_fd_set_format(fd, dpaa2_fd_single);
462 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
463 DPAA2_FD_CTRL_PTV1);
464
465 return 0;
466}
467
468
469
470
471
472
473
474
475
476
477static void free_tx_fd(const struct dpaa2_eth_priv *priv,
478 const struct dpaa2_fd *fd,
479 u32 *status)
480{
481 struct device *dev = priv->net_dev->dev.parent;
482 dma_addr_t fd_addr;
483 struct sk_buff **skbh, *skb;
484 unsigned char *buffer_start;
485 int unmap_size;
486 struct scatterlist *scl;
487 int num_sg, num_dma_bufs;
488 struct dpaa2_eth_swa *swa;
489 u8 fd_format = dpaa2_fd_get_format(fd);
490 struct dpaa2_fas *fas;
491
492 fd_addr = dpaa2_fd_get_addr(fd);
493 skbh = phys_to_virt(fd_addr);
494
495 if (fd_format == dpaa2_fd_single) {
496 skb = *skbh;
497 buffer_start = (unsigned char *)skbh;
498
499
500
501 dma_unmap_single(dev, fd_addr,
502 skb_tail_pointer(skb) - buffer_start,
503 DMA_TO_DEVICE);
504 } else if (fd_format == dpaa2_fd_sg) {
505 swa = (struct dpaa2_eth_swa *)skbh;
506 skb = swa->skb;
507 scl = swa->scl;
508 num_sg = swa->num_sg;
509 num_dma_bufs = swa->num_dma_bufs;
510
511
512 dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
513 kfree(scl);
514
515
516 unmap_size = priv->tx_data_offset +
517 sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
518 dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
519 } else {
520
521 if (status)
522 *status = ~0;
523 return;
524 }
525
526
527
528
529
530 if (status && (dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
531 fas = (struct dpaa2_fas *)
532 ((void *)skbh + priv->buf_layout.private_data_size);
533 *status = le32_to_cpu(fas->status);
534 }
535
536
537 if (fd_format != dpaa2_fd_single)
538 kfree(skbh);
539
540
541 dev_kfree_skb(skb);
542}
543
544static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
545{
546 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
547 struct dpaa2_fd fd;
548 struct rtnl_link_stats64 *percpu_stats;
549 struct dpaa2_eth_drv_stats *percpu_extras;
550 struct dpaa2_eth_fq *fq;
551 u16 queue_mapping;
552 int err, i;
553
554 percpu_stats = this_cpu_ptr(priv->percpu_stats);
555 percpu_extras = this_cpu_ptr(priv->percpu_extras);
556
557 if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
558 struct sk_buff *ns;
559
560 ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
561 if (unlikely(!ns)) {
562 percpu_stats->tx_dropped++;
563 goto err_alloc_headroom;
564 }
565 dev_kfree_skb(skb);
566 skb = ns;
567 }
568
569
570
571
572 skb = skb_unshare(skb, GFP_ATOMIC);
573 if (unlikely(!skb)) {
574
575 percpu_stats->tx_dropped++;
576 return NETDEV_TX_OK;
577 }
578
579
580 memset(&fd, 0, sizeof(fd));
581
582 if (skb_is_nonlinear(skb)) {
583 err = build_sg_fd(priv, skb, &fd);
584 percpu_extras->tx_sg_frames++;
585 percpu_extras->tx_sg_bytes += skb->len;
586 } else {
587 err = build_single_fd(priv, skb, &fd);
588 }
589
590 if (unlikely(err)) {
591 percpu_stats->tx_dropped++;
592 goto err_build_fd;
593 }
594
595
596 trace_dpaa2_tx_fd(net_dev, &fd);
597
598
599
600
601 queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
602 fq = &priv->fq[queue_mapping];
603 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
604 err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
605 fq->tx_qdbin, &fd);
606 if (err != -EBUSY)
607 break;
608 }
609 percpu_extras->tx_portal_busy += i;
610 if (unlikely(err < 0)) {
611 percpu_stats->tx_errors++;
612
613 free_tx_fd(priv, &fd, NULL);
614 } else {
615 percpu_stats->tx_packets++;
616 percpu_stats->tx_bytes += skb->len;
617 }
618
619 return NETDEV_TX_OK;
620
621err_build_fd:
622err_alloc_headroom:
623 dev_kfree_skb(skb);
624
625 return NETDEV_TX_OK;
626}
627
628
629static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
630 struct dpaa2_eth_channel *ch,
631 const struct dpaa2_fd *fd,
632 struct napi_struct *napi __always_unused)
633{
634 struct rtnl_link_stats64 *percpu_stats;
635 struct dpaa2_eth_drv_stats *percpu_extras;
636 u32 status = 0;
637
638
639 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
640
641 percpu_extras = this_cpu_ptr(priv->percpu_extras);
642 percpu_extras->tx_conf_frames++;
643 percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
644
645 free_tx_fd(priv, fd, &status);
646
647 if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
648 percpu_stats = this_cpu_ptr(priv->percpu_stats);
649
650 percpu_stats->tx_errors++;
651 }
652}
653
654static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
655{
656 int err;
657
658 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
659 DPNI_OFF_RX_L3_CSUM, enable);
660 if (err) {
661 netdev_err(priv->net_dev,
662 "dpni_set_offload(RX_L3_CSUM) failed\n");
663 return err;
664 }
665
666 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
667 DPNI_OFF_RX_L4_CSUM, enable);
668 if (err) {
669 netdev_err(priv->net_dev,
670 "dpni_set_offload(RX_L4_CSUM) failed\n");
671 return err;
672 }
673
674 return 0;
675}
676
677static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
678{
679 int err;
680
681 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
682 DPNI_OFF_TX_L3_CSUM, enable);
683 if (err) {
684 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
685 return err;
686 }
687
688 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
689 DPNI_OFF_TX_L4_CSUM, enable);
690 if (err) {
691 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
692 return err;
693 }
694
695 return 0;
696}
697
698
699
700
701static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
702{
703 struct device *dev = priv->net_dev->dev.parent;
704 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
705 void *buf;
706 dma_addr_t addr;
707 int i;
708
709 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
710
711
712
713 buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
714 if (unlikely(!buf))
715 goto err_alloc;
716
717 buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
718
719 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
720 DMA_FROM_DEVICE);
721 if (unlikely(dma_mapping_error(dev, addr)))
722 goto err_map;
723
724 buf_array[i] = addr;
725
726
727 trace_dpaa2_eth_buf_seed(priv->net_dev,
728 buf, DPAA2_ETH_BUF_RAW_SIZE,
729 addr, DPAA2_ETH_RX_BUF_SIZE,
730 bpid);
731 }
732
733release_bufs:
734
735
736
737
738
739
740
741
742
743 while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
744 cpu_relax();
745 return i;
746
747err_map:
748 skb_free_frag(buf);
749err_alloc:
750 if (i)
751 goto release_bufs;
752
753 return 0;
754}
755
756static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
757{
758 int i, j;
759 int new_count;
760
761
762
763
764
765
766
767 preempt_disable();
768 for (j = 0; j < priv->num_channels; j++) {
769 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
770 i += DPAA2_ETH_BUFS_PER_CMD) {
771 new_count = add_bufs(priv, bpid);
772 priv->channel[j]->buf_count += new_count;
773
774 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
775 preempt_enable();
776 return -ENOMEM;
777 }
778 }
779 }
780 preempt_enable();
781
782 return 0;
783}
784
785
786
787
788
789static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
790{
791 struct device *dev = priv->net_dev->dev.parent;
792 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
793 void *vaddr;
794 int ret, i;
795
796 do {
797 ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
798 buf_array, count);
799 if (ret < 0) {
800 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
801 return;
802 }
803 for (i = 0; i < ret; i++) {
804
805 dma_unmap_single(dev, buf_array[i],
806 DPAA2_ETH_RX_BUF_SIZE,
807 DMA_FROM_DEVICE);
808 vaddr = phys_to_virt(buf_array[i]);
809 skb_free_frag(vaddr);
810 }
811 } while (ret);
812}
813
814static void drain_pool(struct dpaa2_eth_priv *priv)
815{
816 int i;
817
818 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
819 drain_bufs(priv, 1);
820
821 for (i = 0; i < priv->num_channels; i++)
822 priv->channel[i]->buf_count = 0;
823}
824
825
826
827
828static int refill_pool(struct dpaa2_eth_priv *priv,
829 struct dpaa2_eth_channel *ch,
830 u16 bpid)
831{
832 int new_count;
833
834 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
835 return 0;
836
837 do {
838 new_count = add_bufs(priv, bpid);
839 if (unlikely(!new_count)) {
840
841 break;
842 }
843 ch->buf_count += new_count;
844 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
845
846 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
847 return -ENOMEM;
848
849 return 0;
850}
851
852static int pull_channel(struct dpaa2_eth_channel *ch)
853{
854 int err;
855 int dequeues = -1;
856
857
858 do {
859 err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
860 dequeues++;
861 cpu_relax();
862 } while (err == -EBUSY);
863
864 ch->stats.dequeue_portal_busy += dequeues;
865 if (unlikely(err))
866 ch->stats.pull_err++;
867
868 return err;
869}
870
871
872
873
874
875
876
877static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
878{
879 struct dpaa2_eth_channel *ch;
880 int cleaned = 0, store_cleaned;
881 struct dpaa2_eth_priv *priv;
882 int err;
883
884 ch = container_of(napi, struct dpaa2_eth_channel, napi);
885 priv = ch->priv;
886
887 while (cleaned < budget) {
888 err = pull_channel(ch);
889 if (unlikely(err))
890 break;
891
892
893 refill_pool(priv, ch, priv->dpbp_attrs.bpid);
894
895 store_cleaned = consume_frames(ch);
896 cleaned += store_cleaned;
897
898
899
900
901 if (store_cleaned == 0 ||
902 cleaned > budget - DPAA2_ETH_STORE_SIZE)
903 break;
904 }
905
906 if (cleaned < budget) {
907 napi_complete_done(napi, cleaned);
908
909 do {
910 err = dpaa2_io_service_rearm(NULL, &ch->nctx);
911 cpu_relax();
912 } while (err == -EBUSY);
913 }
914
915 ch->stats.frames += cleaned;
916
917 return cleaned;
918}
919
920static void enable_ch_napi(struct dpaa2_eth_priv *priv)
921{
922 struct dpaa2_eth_channel *ch;
923 int i;
924
925 for (i = 0; i < priv->num_channels; i++) {
926 ch = priv->channel[i];
927 napi_enable(&ch->napi);
928 }
929}
930
931static void disable_ch_napi(struct dpaa2_eth_priv *priv)
932{
933 struct dpaa2_eth_channel *ch;
934 int i;
935
936 for (i = 0; i < priv->num_channels; i++) {
937 ch = priv->channel[i];
938 napi_disable(&ch->napi);
939 }
940}
941
942static int link_state_update(struct dpaa2_eth_priv *priv)
943{
944 struct dpni_link_state state;
945 int err;
946
947 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
948 if (unlikely(err)) {
949 netdev_err(priv->net_dev,
950 "dpni_get_link_state() failed\n");
951 return err;
952 }
953
954
955 if (priv->link_state.up == state.up)
956 return 0;
957
958 priv->link_state = state;
959 if (state.up) {
960 netif_carrier_on(priv->net_dev);
961 netif_tx_start_all_queues(priv->net_dev);
962 } else {
963 netif_tx_stop_all_queues(priv->net_dev);
964 netif_carrier_off(priv->net_dev);
965 }
966
967 netdev_info(priv->net_dev, "Link Event: state %s",
968 state.up ? "up" : "down");
969
970 return 0;
971}
972
973static int dpaa2_eth_open(struct net_device *net_dev)
974{
975 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
976 int err;
977
978 err = seed_pool(priv, priv->dpbp_attrs.bpid);
979 if (err) {
980
981
982
983
984 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
985 priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
986 }
987
988
989
990
991
992 netif_tx_stop_all_queues(net_dev);
993 enable_ch_napi(priv);
994
995
996
997
998 netif_carrier_off(net_dev);
999
1000 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1001 if (err < 0) {
1002 netdev_err(net_dev, "dpni_enable() failed\n");
1003 goto enable_err;
1004 }
1005
1006
1007
1008
1009 err = link_state_update(priv);
1010 if (err < 0) {
1011 netdev_err(net_dev, "Can't update link state\n");
1012 goto link_state_err;
1013 }
1014
1015 return 0;
1016
1017link_state_err:
1018enable_err:
1019 disable_ch_napi(priv);
1020 drain_pool(priv);
1021 return err;
1022}
1023
1024
1025
1026
1027static u32 drain_channel(struct dpaa2_eth_priv *priv,
1028 struct dpaa2_eth_channel *ch)
1029{
1030 u32 drained = 0, total = 0;
1031
1032 do {
1033 pull_channel(ch);
1034 drained = consume_frames(ch);
1035 total += drained;
1036 } while (drained);
1037
1038 return total;
1039}
1040
1041static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1042{
1043 struct dpaa2_eth_channel *ch;
1044 int i;
1045 u32 drained = 0;
1046
1047 for (i = 0; i < priv->num_channels; i++) {
1048 ch = priv->channel[i];
1049 drained += drain_channel(priv, ch);
1050 }
1051
1052 return drained;
1053}
1054
1055static int dpaa2_eth_stop(struct net_device *net_dev)
1056{
1057 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1058 int dpni_enabled;
1059 int retries = 10;
1060 u32 drained;
1061
1062 netif_tx_stop_all_queues(net_dev);
1063 netif_carrier_off(net_dev);
1064
1065
1066
1067
1068 do {
1069 dpni_disable(priv->mc_io, 0, priv->mc_token);
1070 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1071 if (dpni_enabled)
1072
1073 msleep(100);
1074 } while (dpni_enabled && --retries);
1075 if (!retries) {
1076 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1077
1078
1079
1080 }
1081
1082
1083
1084
1085
1086
1087
1088 disable_ch_napi(priv);
1089
1090
1091 drained = drain_ingress_frames(priv);
1092 if (drained)
1093 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1094
1095
1096 drain_pool(priv);
1097
1098 return 0;
1099}
1100
1101static int dpaa2_eth_init(struct net_device *net_dev)
1102{
1103 u64 supported = 0;
1104 u64 not_supported = 0;
1105 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1106 u32 options = priv->dpni_attrs.options;
1107
1108
1109 supported |= IFF_LIVE_ADDR_CHANGE;
1110
1111 if (options & DPNI_OPT_NO_MAC_FILTER)
1112 not_supported |= IFF_UNICAST_FLT;
1113 else
1114 supported |= IFF_UNICAST_FLT;
1115
1116 net_dev->priv_flags |= supported;
1117 net_dev->priv_flags &= ~not_supported;
1118
1119
1120 net_dev->features = NETIF_F_RXCSUM |
1121 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1122 NETIF_F_SG | NETIF_F_HIGHDMA |
1123 NETIF_F_LLTX;
1124 net_dev->hw_features = net_dev->features;
1125
1126 return 0;
1127}
1128
1129static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1130{
1131 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1132 struct device *dev = net_dev->dev.parent;
1133 int err;
1134
1135 err = eth_mac_addr(net_dev, addr);
1136 if (err < 0) {
1137 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1138 return err;
1139 }
1140
1141 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1142 net_dev->dev_addr);
1143 if (err) {
1144 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1145 return err;
1146 }
1147
1148 return 0;
1149}
1150
1151
1152
1153
1154void dpaa2_eth_get_stats(struct net_device *net_dev,
1155 struct rtnl_link_stats64 *stats)
1156{
1157 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1158 struct rtnl_link_stats64 *percpu_stats;
1159 u64 *cpustats;
1160 u64 *netstats = (u64 *)stats;
1161 int i, j;
1162 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1163
1164 for_each_possible_cpu(i) {
1165 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1166 cpustats = (u64 *)percpu_stats;
1167 for (j = 0; j < num; j++)
1168 netstats[j] += cpustats[j];
1169 }
1170}
1171
1172static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
1173{
1174 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1175 int err;
1176
1177
1178
1179
1180 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
1181 (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
1182 if (err) {
1183 netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
1184 return err;
1185 }
1186
1187 net_dev->mtu = mtu;
1188 return 0;
1189}
1190
1191
1192
1193
1194static void add_uc_hw_addr(const struct net_device *net_dev,
1195 struct dpaa2_eth_priv *priv)
1196{
1197 struct netdev_hw_addr *ha;
1198 int err;
1199
1200 netdev_for_each_uc_addr(ha, net_dev) {
1201 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1202 ha->addr);
1203 if (err)
1204 netdev_warn(priv->net_dev,
1205 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1206 ha->addr, err);
1207 }
1208}
1209
1210
1211
1212
1213static void add_mc_hw_addr(const struct net_device *net_dev,
1214 struct dpaa2_eth_priv *priv)
1215{
1216 struct netdev_hw_addr *ha;
1217 int err;
1218
1219 netdev_for_each_mc_addr(ha, net_dev) {
1220 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1221 ha->addr);
1222 if (err)
1223 netdev_warn(priv->net_dev,
1224 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1225 ha->addr, err);
1226 }
1227}
1228
1229static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1230{
1231 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1232 int uc_count = netdev_uc_count(net_dev);
1233 int mc_count = netdev_mc_count(net_dev);
1234 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1235 u32 options = priv->dpni_attrs.options;
1236 u16 mc_token = priv->mc_token;
1237 struct fsl_mc_io *mc_io = priv->mc_io;
1238 int err;
1239
1240
1241 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1242 netdev_info(net_dev,
1243 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1244 max_mac);
1245
1246
1247 if (uc_count > max_mac) {
1248 netdev_info(net_dev,
1249 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1250 uc_count, max_mac);
1251 goto force_promisc;
1252 }
1253 if (mc_count + uc_count > max_mac) {
1254 netdev_info(net_dev,
1255 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1256 uc_count + mc_count, max_mac);
1257 goto force_mc_promisc;
1258 }
1259
1260
1261 if (net_dev->flags & IFF_PROMISC)
1262 goto force_promisc;
1263 if (net_dev->flags & IFF_ALLMULTI) {
1264
1265
1266
1267
1268
1269
1270
1271 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1272 if (err)
1273 netdev_warn(net_dev, "Can't set uc promisc\n");
1274
1275
1276 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1277 if (err)
1278 netdev_warn(net_dev, "Can't clear uc filters\n");
1279 add_uc_hw_addr(net_dev, priv);
1280
1281
1282 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1283 if (err)
1284 netdev_warn(net_dev, "Can't clear uc promisc\n");
1285 goto force_mc_promisc;
1286 }
1287
1288
1289
1290
1291 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1292 if (err)
1293 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1294 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1295 if (err)
1296 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1297
1298
1299 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1300 if (err)
1301 netdev_warn(net_dev, "Can't clear mac filters\n");
1302 add_mc_hw_addr(net_dev, priv);
1303 add_uc_hw_addr(net_dev, priv);
1304
1305
1306
1307
1308 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1309 if (err)
1310 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1311 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1312 if (err)
1313 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1314
1315 return;
1316
1317force_promisc:
1318 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1319 if (err)
1320 netdev_warn(net_dev, "Can't set ucast promisc\n");
1321force_mc_promisc:
1322 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1323 if (err)
1324 netdev_warn(net_dev, "Can't set mcast promisc\n");
1325}
1326
1327static int dpaa2_eth_set_features(struct net_device *net_dev,
1328 netdev_features_t features)
1329{
1330 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1331 netdev_features_t changed = features ^ net_dev->features;
1332 bool enable;
1333 int err;
1334
1335 if (changed & NETIF_F_RXCSUM) {
1336 enable = !!(features & NETIF_F_RXCSUM);
1337 err = set_rx_csum(priv, enable);
1338 if (err)
1339 return err;
1340 }
1341
1342 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1343 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1344 err = set_tx_csum(priv, enable);
1345 if (err)
1346 return err;
1347 }
1348
1349 return 0;
1350}
1351
1352static const struct net_device_ops dpaa2_eth_ops = {
1353 .ndo_open = dpaa2_eth_open,
1354 .ndo_start_xmit = dpaa2_eth_tx,
1355 .ndo_stop = dpaa2_eth_stop,
1356 .ndo_init = dpaa2_eth_init,
1357 .ndo_set_mac_address = dpaa2_eth_set_addr,
1358 .ndo_get_stats64 = dpaa2_eth_get_stats,
1359 .ndo_change_mtu = dpaa2_eth_change_mtu,
1360 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1361 .ndo_set_features = dpaa2_eth_set_features,
1362};
1363
1364static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1365{
1366 struct dpaa2_eth_channel *ch;
1367
1368 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1369
1370
1371 ch->stats.cdan++;
1372
1373 napi_schedule_irqoff(&ch->napi);
1374}
1375
1376
1377static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1378{
1379 struct fsl_mc_device *dpcon;
1380 struct device *dev = priv->net_dev->dev.parent;
1381 struct dpcon_attr attrs;
1382 int err;
1383
1384 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1385 FSL_MC_POOL_DPCON, &dpcon);
1386 if (err) {
1387 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1388 return NULL;
1389 }
1390
1391 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1392 if (err) {
1393 dev_err(dev, "dpcon_open() failed\n");
1394 goto err_open;
1395 }
1396
1397 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1398 if (err) {
1399 dev_err(dev, "dpcon_reset() failed\n");
1400 goto err_reset;
1401 }
1402
1403 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1404 if (err) {
1405 dev_err(dev, "dpcon_get_attributes() failed\n");
1406 goto err_get_attr;
1407 }
1408
1409 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1410 if (err) {
1411 dev_err(dev, "dpcon_enable() failed\n");
1412 goto err_enable;
1413 }
1414
1415 return dpcon;
1416
1417err_enable:
1418err_get_attr:
1419err_reset:
1420 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1421err_open:
1422 fsl_mc_object_free(dpcon);
1423
1424 return NULL;
1425}
1426
1427static void free_dpcon(struct dpaa2_eth_priv *priv,
1428 struct fsl_mc_device *dpcon)
1429{
1430 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1431 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1432 fsl_mc_object_free(dpcon);
1433}
1434
1435static struct dpaa2_eth_channel *
1436alloc_channel(struct dpaa2_eth_priv *priv)
1437{
1438 struct dpaa2_eth_channel *channel;
1439 struct dpcon_attr attr;
1440 struct device *dev = priv->net_dev->dev.parent;
1441 int err;
1442
1443 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1444 if (!channel)
1445 return NULL;
1446
1447 channel->dpcon = setup_dpcon(priv);
1448 if (!channel->dpcon)
1449 goto err_setup;
1450
1451 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1452 &attr);
1453 if (err) {
1454 dev_err(dev, "dpcon_get_attributes() failed\n");
1455 goto err_get_attr;
1456 }
1457
1458 channel->dpcon_id = attr.id;
1459 channel->ch_id = attr.qbman_ch_id;
1460 channel->priv = priv;
1461
1462 return channel;
1463
1464err_get_attr:
1465 free_dpcon(priv, channel->dpcon);
1466err_setup:
1467 kfree(channel);
1468 return NULL;
1469}
1470
1471static void free_channel(struct dpaa2_eth_priv *priv,
1472 struct dpaa2_eth_channel *channel)
1473{
1474 free_dpcon(priv, channel->dpcon);
1475 kfree(channel);
1476}
1477
1478
1479
1480
1481static int setup_dpio(struct dpaa2_eth_priv *priv)
1482{
1483 struct dpaa2_io_notification_ctx *nctx;
1484 struct dpaa2_eth_channel *channel;
1485 struct dpcon_notification_cfg dpcon_notif_cfg;
1486 struct device *dev = priv->net_dev->dev.parent;
1487 int i, err;
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 cpumask_clear(&priv->dpio_cpumask);
1499 for_each_online_cpu(i) {
1500
1501 channel = alloc_channel(priv);
1502 if (!channel) {
1503 dev_info(dev,
1504 "No affine channel for cpu %d and above\n", i);
1505 goto err_alloc_ch;
1506 }
1507
1508 priv->channel[priv->num_channels] = channel;
1509
1510 nctx = &channel->nctx;
1511 nctx->is_cdan = 1;
1512 nctx->cb = cdan_cb;
1513 nctx->id = channel->ch_id;
1514 nctx->desired_cpu = i;
1515
1516
1517 err = dpaa2_io_service_register(NULL, nctx);
1518 if (err) {
1519 dev_info(dev, "No affine DPIO for cpu %d\n", i);
1520
1521
1522
1523 goto err_service_reg;
1524 }
1525
1526
1527 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1528 dpcon_notif_cfg.priority = 0;
1529 dpcon_notif_cfg.user_ctx = nctx->qman64;
1530 err = dpcon_set_notification(priv->mc_io, 0,
1531 channel->dpcon->mc_handle,
1532 &dpcon_notif_cfg);
1533 if (err) {
1534 dev_err(dev, "dpcon_set_notification failed()\n");
1535 goto err_set_cdan;
1536 }
1537
1538
1539
1540
1541 cpumask_set_cpu(i, &priv->dpio_cpumask);
1542 priv->num_channels++;
1543
1544
1545
1546
1547 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1548 break;
1549 }
1550
1551 return 0;
1552
1553err_set_cdan:
1554 dpaa2_io_service_deregister(NULL, nctx);
1555err_service_reg:
1556 free_channel(priv, channel);
1557err_alloc_ch:
1558 if (cpumask_empty(&priv->dpio_cpumask)) {
1559 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1560 return -ENODEV;
1561 }
1562
1563 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1564 cpumask_pr_args(&priv->dpio_cpumask));
1565
1566 return 0;
1567}
1568
1569static void free_dpio(struct dpaa2_eth_priv *priv)
1570{
1571 int i;
1572 struct dpaa2_eth_channel *ch;
1573
1574
1575 for (i = 0; i < priv->num_channels; i++) {
1576 ch = priv->channel[i];
1577 dpaa2_io_service_deregister(NULL, &ch->nctx);
1578 free_channel(priv, ch);
1579 }
1580}
1581
1582static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1583 int cpu)
1584{
1585 struct device *dev = priv->net_dev->dev.parent;
1586 int i;
1587
1588 for (i = 0; i < priv->num_channels; i++)
1589 if (priv->channel[i]->nctx.desired_cpu == cpu)
1590 return priv->channel[i];
1591
1592
1593
1594
1595 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1596
1597 return priv->channel[0];
1598}
1599
1600static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1601{
1602 struct device *dev = priv->net_dev->dev.parent;
1603 struct dpaa2_eth_fq *fq;
1604 int rx_cpu, txc_cpu;
1605 int i;
1606
1607
1608
1609
1610
1611 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1612
1613 for (i = 0; i < priv->num_fqs; i++) {
1614 fq = &priv->fq[i];
1615 switch (fq->type) {
1616 case DPAA2_RX_FQ:
1617 fq->target_cpu = rx_cpu;
1618 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1619 if (rx_cpu >= nr_cpu_ids)
1620 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1621 break;
1622 case DPAA2_TX_CONF_FQ:
1623 fq->target_cpu = txc_cpu;
1624 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1625 if (txc_cpu >= nr_cpu_ids)
1626 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1627 break;
1628 default:
1629 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1630 }
1631 fq->channel = get_affine_channel(priv, fq->target_cpu);
1632 }
1633}
1634
1635static void setup_fqs(struct dpaa2_eth_priv *priv)
1636{
1637 int i;
1638
1639
1640
1641
1642
1643 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1644 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1645 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1646 priv->fq[priv->num_fqs++].flowid = (u16)i;
1647 }
1648
1649 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1650 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1651 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1652 priv->fq[priv->num_fqs++].flowid = (u16)i;
1653 }
1654
1655
1656 set_fq_affinity(priv);
1657}
1658
1659
1660static int setup_dpbp(struct dpaa2_eth_priv *priv)
1661{
1662 int err;
1663 struct fsl_mc_device *dpbp_dev;
1664 struct device *dev = priv->net_dev->dev.parent;
1665
1666 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1667 &dpbp_dev);
1668 if (err) {
1669 dev_err(dev, "DPBP device allocation failed\n");
1670 return err;
1671 }
1672
1673 priv->dpbp_dev = dpbp_dev;
1674
1675 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1676 &dpbp_dev->mc_handle);
1677 if (err) {
1678 dev_err(dev, "dpbp_open() failed\n");
1679 goto err_open;
1680 }
1681
1682 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1683 if (err) {
1684 dev_err(dev, "dpbp_enable() failed\n");
1685 goto err_enable;
1686 }
1687
1688 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1689 &priv->dpbp_attrs);
1690 if (err) {
1691 dev_err(dev, "dpbp_get_attributes() failed\n");
1692 goto err_get_attr;
1693 }
1694
1695 return 0;
1696
1697err_get_attr:
1698 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1699err_enable:
1700 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1701err_open:
1702 fsl_mc_object_free(dpbp_dev);
1703
1704 return err;
1705}
1706
1707static void free_dpbp(struct dpaa2_eth_priv *priv)
1708{
1709 drain_pool(priv);
1710 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1711 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1712 fsl_mc_object_free(priv->dpbp_dev);
1713}
1714
1715
1716static int setup_dpni(struct fsl_mc_device *ls_dev)
1717{
1718 struct device *dev = &ls_dev->dev;
1719 struct dpaa2_eth_priv *priv;
1720 struct net_device *net_dev;
1721 int err;
1722
1723 net_dev = dev_get_drvdata(dev);
1724 priv = netdev_priv(net_dev);
1725
1726 priv->dpni_id = ls_dev->obj_desc.id;
1727
1728
1729 err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
1730 if (err) {
1731 dev_err(dev, "dpni_open() failed\n");
1732 goto err_open;
1733 }
1734
1735 ls_dev->mc_io = priv->mc_io;
1736 ls_dev->mc_handle = priv->mc_token;
1737
1738 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1739 if (err) {
1740 dev_err(dev, "dpni_reset() failed\n");
1741 goto err_reset;
1742 }
1743
1744 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1745 &priv->dpni_attrs);
1746 if (err) {
1747 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1748 goto err_get_attr;
1749 }
1750
1751
1752
1753 priv->buf_layout.pass_parser_result = true;
1754 priv->buf_layout.pass_frame_status = true;
1755 priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1756 priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
1757 priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1758 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1759 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1760 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
1761 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1762 DPNI_QUEUE_RX, &priv->buf_layout);
1763 if (err) {
1764 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1765 goto err_buf_layout;
1766 }
1767
1768
1769 priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1770 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
1771 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1772 DPNI_QUEUE_TX, &priv->buf_layout);
1773 if (err) {
1774 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1775 goto err_buf_layout;
1776 }
1777
1778
1779 priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1780 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1781 DPNI_QUEUE_TX_CONFIRM, &priv->buf_layout);
1782 if (err) {
1783 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1784 goto err_buf_layout;
1785 }
1786
1787
1788
1789
1790 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1791 &priv->tx_data_offset);
1792 if (err) {
1793 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1794 goto err_data_offset;
1795 }
1796
1797 if ((priv->tx_data_offset % 64) != 0)
1798 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
1799 priv->tx_data_offset);
1800
1801
1802 priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
1803
1804 return 0;
1805
1806err_data_offset:
1807err_buf_layout:
1808err_get_attr:
1809err_reset:
1810 dpni_close(priv->mc_io, 0, priv->mc_token);
1811err_open:
1812 return err;
1813}
1814
1815static void free_dpni(struct dpaa2_eth_priv *priv)
1816{
1817 int err;
1818
1819 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1820 if (err)
1821 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1822 err);
1823
1824 dpni_close(priv->mc_io, 0, priv->mc_token);
1825}
1826
1827static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1828 struct dpaa2_eth_fq *fq)
1829{
1830 struct device *dev = priv->net_dev->dev.parent;
1831 struct dpni_queue queue;
1832 struct dpni_queue_id qid;
1833 struct dpni_taildrop td;
1834 int err;
1835
1836 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1837 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1838 if (err) {
1839 dev_err(dev, "dpni_get_queue(RX) failed\n");
1840 return err;
1841 }
1842
1843 fq->fqid = qid.fqid;
1844
1845 queue.destination.id = fq->channel->dpcon_id;
1846 queue.destination.type = DPNI_DEST_DPCON;
1847 queue.destination.priority = 1;
1848 queue.user_context = (u64)fq;
1849 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1850 DPNI_QUEUE_RX, 0, fq->flowid,
1851 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1852 &queue);
1853 if (err) {
1854 dev_err(dev, "dpni_set_queue(RX) failed\n");
1855 return err;
1856 }
1857
1858 td.enable = 1;
1859 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1860 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1861 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1862 if (err) {
1863 dev_err(dev, "dpni_set_threshold() failed\n");
1864 return err;
1865 }
1866
1867 return 0;
1868}
1869
1870static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1871 struct dpaa2_eth_fq *fq)
1872{
1873 struct device *dev = priv->net_dev->dev.parent;
1874 struct dpni_queue queue;
1875 struct dpni_queue_id qid;
1876 int err;
1877
1878 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1879 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1880 if (err) {
1881 dev_err(dev, "dpni_get_queue(TX) failed\n");
1882 return err;
1883 }
1884
1885 fq->tx_qdbin = qid.qdbin;
1886
1887 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1888 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1889 &queue, &qid);
1890 if (err) {
1891 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
1892 return err;
1893 }
1894
1895 fq->fqid = qid.fqid;
1896
1897 queue.destination.id = fq->channel->dpcon_id;
1898 queue.destination.type = DPNI_DEST_DPCON;
1899 queue.destination.priority = 0;
1900 queue.user_context = (u64)fq;
1901 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1902 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1903 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1904 &queue);
1905 if (err) {
1906 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
1907 return err;
1908 }
1909
1910 return 0;
1911}
1912
1913
1914static const struct dpaa2_eth_hash_fields hash_fields[] = {
1915 {
1916
1917 .rxnfc_field = RXH_IP_SRC,
1918 .cls_prot = NET_PROT_IP,
1919 .cls_field = NH_FLD_IP_SRC,
1920 .size = 4,
1921 }, {
1922 .rxnfc_field = RXH_IP_DST,
1923 .cls_prot = NET_PROT_IP,
1924 .cls_field = NH_FLD_IP_DST,
1925 .size = 4,
1926 }, {
1927 .rxnfc_field = RXH_L3_PROTO,
1928 .cls_prot = NET_PROT_IP,
1929 .cls_field = NH_FLD_IP_PROTO,
1930 .size = 1,
1931 }, {
1932
1933
1934
1935 .rxnfc_field = RXH_L4_B_0_1,
1936 .cls_prot = NET_PROT_UDP,
1937 .cls_field = NH_FLD_UDP_PORT_SRC,
1938 .size = 2,
1939 }, {
1940 .rxnfc_field = RXH_L4_B_2_3,
1941 .cls_prot = NET_PROT_UDP,
1942 .cls_field = NH_FLD_UDP_PORT_DST,
1943 .size = 2,
1944 },
1945};
1946
1947
1948
1949
1950int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
1951{
1952 struct device *dev = net_dev->dev.parent;
1953 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1954 struct dpkg_profile_cfg cls_cfg;
1955 struct dpni_rx_tc_dist_cfg dist_cfg;
1956 u8 *dma_mem;
1957 int i;
1958 int err = 0;
1959
1960 if (!dpaa2_eth_hash_enabled(priv)) {
1961 dev_err(dev, "Hashing support is not enabled\n");
1962 return -EOPNOTSUPP;
1963 }
1964
1965 memset(&cls_cfg, 0, sizeof(cls_cfg));
1966
1967 for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
1968 struct dpkg_extract *key =
1969 &cls_cfg.extracts[cls_cfg.num_extracts];
1970
1971 if (!(flags & hash_fields[i].rxnfc_field))
1972 continue;
1973
1974 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
1975 dev_err(dev, "error adding key extraction rule, too many rules?\n");
1976 return -E2BIG;
1977 }
1978
1979 key->type = DPKG_EXTRACT_FROM_HDR;
1980 key->extract.from_hdr.prot = hash_fields[i].cls_prot;
1981 key->extract.from_hdr.type = DPKG_FULL_FIELD;
1982 key->extract.from_hdr.field = hash_fields[i].cls_field;
1983 cls_cfg.num_extracts++;
1984
1985 priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
1986 }
1987
1988 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
1989 if (!dma_mem)
1990 return -ENOMEM;
1991
1992 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
1993 if (err) {
1994 dev_err(dev, "dpni_prepare_key_cfg error %d", err);
1995 goto err_prep_key;
1996 }
1997
1998 memset(&dist_cfg, 0, sizeof(dist_cfg));
1999
2000
2001 dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem,
2002 DPAA2_CLASSIFIER_DMA_SIZE,
2003 DMA_TO_DEVICE);
2004 if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) {
2005 dev_err(dev, "DMA mapping failed\n");
2006 err = -ENOMEM;
2007 goto err_dma_map;
2008 }
2009
2010 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2011 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2012
2013 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2014 dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova,
2015 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
2016 if (err)
2017 dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
2018
2019err_dma_map:
2020err_prep_key:
2021 kfree(dma_mem);
2022 return err;
2023}
2024
2025
2026
2027
2028static int bind_dpni(struct dpaa2_eth_priv *priv)
2029{
2030 struct net_device *net_dev = priv->net_dev;
2031 struct device *dev = net_dev->dev.parent;
2032 struct dpni_pools_cfg pools_params;
2033 struct dpni_error_cfg err_cfg;
2034 int err = 0;
2035 int i;
2036
2037 pools_params.num_dpbp = 1;
2038 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2039 pools_params.pools[0].backup_pool = 0;
2040 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2041 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2042 if (err) {
2043 dev_err(dev, "dpni_set_pools() failed\n");
2044 return err;
2045 }
2046
2047
2048
2049
2050 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
2051 if (err)
2052 netdev_err(net_dev, "Failed to configure hashing\n");
2053
2054
2055 err_cfg.errors = DPAA2_ETH_RX_ERR_MASK;
2056 err_cfg.set_frame_annotation = 1;
2057 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2058 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2059 &err_cfg);
2060 if (err) {
2061 dev_err(dev, "dpni_set_errors_behavior failed\n");
2062 return err;
2063 }
2064
2065
2066 for (i = 0; i < priv->num_fqs; i++) {
2067 switch (priv->fq[i].type) {
2068 case DPAA2_RX_FQ:
2069 err = setup_rx_flow(priv, &priv->fq[i]);
2070 break;
2071 case DPAA2_TX_CONF_FQ:
2072 err = setup_tx_flow(priv, &priv->fq[i]);
2073 break;
2074 default:
2075 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2076 return -EINVAL;
2077 }
2078 if (err)
2079 return err;
2080 }
2081
2082 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2083 DPNI_QUEUE_TX, &priv->tx_qdid);
2084 if (err) {
2085 dev_err(dev, "dpni_get_qdid() failed\n");
2086 return err;
2087 }
2088
2089 return 0;
2090}
2091
2092
2093static int alloc_rings(struct dpaa2_eth_priv *priv)
2094{
2095 struct net_device *net_dev = priv->net_dev;
2096 struct device *dev = net_dev->dev.parent;
2097 int i;
2098
2099 for (i = 0; i < priv->num_channels; i++) {
2100 priv->channel[i]->store =
2101 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2102 if (!priv->channel[i]->store) {
2103 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2104 goto err_ring;
2105 }
2106 }
2107
2108 return 0;
2109
2110err_ring:
2111 for (i = 0; i < priv->num_channels; i++) {
2112 if (!priv->channel[i]->store)
2113 break;
2114 dpaa2_io_store_destroy(priv->channel[i]->store);
2115 }
2116
2117 return -ENOMEM;
2118}
2119
2120static void free_rings(struct dpaa2_eth_priv *priv)
2121{
2122 int i;
2123
2124 for (i = 0; i < priv->num_channels; i++)
2125 dpaa2_io_store_destroy(priv->channel[i]->store);
2126}
2127
2128static int netdev_init(struct net_device *net_dev)
2129{
2130 int err;
2131 struct device *dev = net_dev->dev.parent;
2132 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2133 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2134 u8 bcast_addr[ETH_ALEN];
2135
2136 net_dev->netdev_ops = &dpaa2_eth_ops;
2137
2138
2139 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2140 if (err) {
2141 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2142 return err;
2143 }
2144
2145
2146 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2147 dpni_mac_addr);
2148 if (err) {
2149 dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
2150 return err;
2151 }
2152
2153
2154 if (!is_zero_ether_addr(mac_addr)) {
2155
2156 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2157 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2158 priv->mc_token,
2159 mac_addr);
2160 if (err) {
2161 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2162 return err;
2163 }
2164 }
2165 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2166 } else if (is_zero_ether_addr(dpni_mac_addr)) {
2167
2168
2169
2170 eth_hw_addr_random(net_dev);
2171
2172 dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
2173 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2174 net_dev->dev_addr);
2175 if (err) {
2176 dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err);
2177 return err;
2178 }
2179
2180
2181
2182
2183
2184 net_dev->addr_assign_type = NET_ADDR_PERM;
2185 } else {
2186
2187
2188
2189 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2190 }
2191
2192
2193
2194
2195 eth_broadcast_addr(bcast_addr);
2196 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2197 if (err) {
2198 dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
2199
2200 }
2201
2202
2203
2204
2205 net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
2206
2207
2208 net_dev->min_mtu = 68;
2209 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2210
2211
2212 err = register_netdev(net_dev);
2213 if (err < 0) {
2214 dev_err(dev, "register_netdev() failed\n");
2215 return err;
2216 }
2217
2218 return 0;
2219}
2220
2221static int poll_link_state(void *arg)
2222{
2223 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2224 int err;
2225
2226 while (!kthread_should_stop()) {
2227 err = link_state_update(priv);
2228 if (unlikely(err))
2229 return err;
2230
2231 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2232 }
2233
2234 return 0;
2235}
2236
2237static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
2238{
2239 return IRQ_WAKE_THREAD;
2240}
2241
2242static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2243{
2244 u32 status, clear = 0;
2245 struct device *dev = (struct device *)arg;
2246 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2247 struct net_device *net_dev = dev_get_drvdata(dev);
2248 int err;
2249
2250 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2251 DPNI_IRQ_INDEX, &status);
2252 if (unlikely(err)) {
2253 netdev_err(net_dev, "Can't get irq status (err %d)", err);
2254 clear = 0xffffffff;
2255 goto out;
2256 }
2257
2258 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
2259 clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
2260 link_state_update(netdev_priv(net_dev));
2261 }
2262
2263out:
2264 dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2265 DPNI_IRQ_INDEX, clear);
2266 return IRQ_HANDLED;
2267}
2268
2269static int setup_irqs(struct fsl_mc_device *ls_dev)
2270{
2271 int err = 0;
2272 struct fsl_mc_device_irq *irq;
2273
2274 err = fsl_mc_allocate_irqs(ls_dev);
2275 if (err) {
2276 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2277 return err;
2278 }
2279
2280 irq = ls_dev->irqs[0];
2281 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2282 dpni_irq0_handler,
2283 dpni_irq0_handler_thread,
2284 IRQF_NO_SUSPEND | IRQF_ONESHOT,
2285 dev_name(&ls_dev->dev), &ls_dev->dev);
2286 if (err < 0) {
2287 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
2288 goto free_mc_irq;
2289 }
2290
2291 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2292 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2293 if (err < 0) {
2294 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
2295 goto free_irq;
2296 }
2297
2298 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2299 DPNI_IRQ_INDEX, 1);
2300 if (err < 0) {
2301 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
2302 goto free_irq;
2303 }
2304
2305 return 0;
2306
2307free_irq:
2308 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2309free_mc_irq:
2310 fsl_mc_free_irqs(ls_dev);
2311
2312 return err;
2313}
2314
2315static void add_ch_napi(struct dpaa2_eth_priv *priv)
2316{
2317 int i;
2318 struct dpaa2_eth_channel *ch;
2319
2320 for (i = 0; i < priv->num_channels; i++) {
2321 ch = priv->channel[i];
2322
2323 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2324 NAPI_POLL_WEIGHT);
2325 }
2326}
2327
2328static void del_ch_napi(struct dpaa2_eth_priv *priv)
2329{
2330 int i;
2331 struct dpaa2_eth_channel *ch;
2332
2333 for (i = 0; i < priv->num_channels; i++) {
2334 ch = priv->channel[i];
2335 netif_napi_del(&ch->napi);
2336 }
2337}
2338
2339static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2340{
2341 struct device *dev;
2342 struct net_device *net_dev = NULL;
2343 struct dpaa2_eth_priv *priv = NULL;
2344 int err = 0;
2345
2346 dev = &dpni_dev->dev;
2347
2348
2349 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2350 if (!net_dev) {
2351 dev_err(dev, "alloc_etherdev_mq() failed\n");
2352 return -ENOMEM;
2353 }
2354
2355 SET_NETDEV_DEV(net_dev, dev);
2356 dev_set_drvdata(dev, net_dev);
2357
2358 priv = netdev_priv(net_dev);
2359 priv->net_dev = net_dev;
2360
2361
2362 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2363 &priv->mc_io);
2364 if (err) {
2365 dev_err(dev, "MC portal allocation failed\n");
2366 goto err_portal_alloc;
2367 }
2368
2369
2370 err = setup_dpni(dpni_dev);
2371 if (err)
2372 goto err_dpni_setup;
2373
2374 err = setup_dpio(priv);
2375 if (err)
2376 goto err_dpio_setup;
2377
2378 setup_fqs(priv);
2379
2380 err = setup_dpbp(priv);
2381 if (err)
2382 goto err_dpbp_setup;
2383
2384 err = bind_dpni(priv);
2385 if (err)
2386 goto err_bind;
2387
2388
2389 add_ch_napi(priv);
2390
2391
2392 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2393 if (!priv->percpu_stats) {
2394 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2395 err = -ENOMEM;
2396 goto err_alloc_percpu_stats;
2397 }
2398 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2399 if (!priv->percpu_extras) {
2400 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2401 err = -ENOMEM;
2402 goto err_alloc_percpu_extras;
2403 }
2404
2405 err = netdev_init(net_dev);
2406 if (err)
2407 goto err_netdev_init;
2408
2409
2410 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2411 if (err)
2412 goto err_csum;
2413
2414 err = set_tx_csum(priv, !!(net_dev->features &
2415 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2416 if (err)
2417 goto err_csum;
2418
2419 err = alloc_rings(priv);
2420 if (err)
2421 goto err_alloc_rings;
2422
2423 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2424
2425 err = setup_irqs(dpni_dev);
2426 if (err) {
2427 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2428 priv->poll_thread = kthread_run(poll_link_state, priv,
2429 "%s_poll_link", net_dev->name);
2430 if (IS_ERR(priv->poll_thread)) {
2431 netdev_err(net_dev, "Error starting polling thread\n");
2432 goto err_poll_thread;
2433 }
2434 priv->do_link_poll = true;
2435 }
2436
2437 dev_info(dev, "Probed interface %s\n", net_dev->name);
2438 return 0;
2439
2440err_poll_thread:
2441 free_rings(priv);
2442err_alloc_rings:
2443err_csum:
2444 unregister_netdev(net_dev);
2445err_netdev_init:
2446 free_percpu(priv->percpu_extras);
2447err_alloc_percpu_extras:
2448 free_percpu(priv->percpu_stats);
2449err_alloc_percpu_stats:
2450 del_ch_napi(priv);
2451err_bind:
2452 free_dpbp(priv);
2453err_dpbp_setup:
2454 free_dpio(priv);
2455err_dpio_setup:
2456 free_dpni(priv);
2457err_dpni_setup:
2458 fsl_mc_portal_free(priv->mc_io);
2459err_portal_alloc:
2460 dev_set_drvdata(dev, NULL);
2461 free_netdev(net_dev);
2462
2463 return err;
2464}
2465
2466static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2467{
2468 struct device *dev;
2469 struct net_device *net_dev;
2470 struct dpaa2_eth_priv *priv;
2471
2472 dev = &ls_dev->dev;
2473 net_dev = dev_get_drvdata(dev);
2474 priv = netdev_priv(net_dev);
2475
2476 unregister_netdev(net_dev);
2477 dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2478
2479 if (priv->do_link_poll)
2480 kthread_stop(priv->poll_thread);
2481 else
2482 fsl_mc_free_irqs(ls_dev);
2483
2484 free_rings(priv);
2485 free_percpu(priv->percpu_stats);
2486 free_percpu(priv->percpu_extras);
2487
2488 del_ch_napi(priv);
2489 free_dpbp(priv);
2490 free_dpio(priv);
2491 free_dpni(priv);
2492
2493 fsl_mc_portal_free(priv->mc_io);
2494
2495 dev_set_drvdata(dev, NULL);
2496 free_netdev(net_dev);
2497
2498 return 0;
2499}
2500
2501static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2502 {
2503 .vendor = FSL_MC_VENDOR_FREESCALE,
2504 .obj_type = "dpni",
2505 },
2506 { .vendor = 0x0 }
2507};
2508MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2509
2510static struct fsl_mc_driver dpaa2_eth_driver = {
2511 .driver = {
2512 .name = KBUILD_MODNAME,
2513 .owner = THIS_MODULE,
2514 },
2515 .probe = dpaa2_eth_probe,
2516 .remove = dpaa2_eth_remove,
2517 .match_id_table = dpaa2_eth_match_id_table
2518};
2519
2520module_fsl_mc_driver(dpaa2_eth_driver);
2521