1
2
3
4
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/platform_device.h>
8#include <linux/etherdevice.h>
9#include <linux/of_net.h>
10#include <linux/interrupt.h>
11#include <linux/msi.h>
12#include <linux/kthread.h>
13#include <linux/iommu.h>
14#include <linux/net_tstamp.h>
15#include <linux/fsl/mc.h>
16
17#include <net/sock.h>
18
19#include "dpaa2-eth.h"
20
21
22
23
24#define CREATE_TRACE_POINTS
25#include "dpaa2-eth-trace.h"
26
27MODULE_LICENSE("Dual BSD/GPL");
28MODULE_AUTHOR("Freescale Semiconductor, Inc");
29MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
30
31static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
32 dma_addr_t iova_addr)
33{
34 phys_addr_t phys_addr;
35
36 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
37
38 return phys_to_virt(phys_addr);
39}
40
41static void validate_rx_csum(struct dpaa2_eth_priv *priv,
42 u32 fd_status,
43 struct sk_buff *skb)
44{
45 skb_checksum_none_assert(skb);
46
47
48 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
49 return;
50
51
52 if (!((fd_status & DPAA2_FAS_L3CV) &&
53 (fd_status & DPAA2_FAS_L4CV)))
54 return;
55
56
57 skb->ip_summed = CHECKSUM_UNNECESSARY;
58}
59
60
61
62
63static void free_rx_fd(struct dpaa2_eth_priv *priv,
64 const struct dpaa2_fd *fd,
65 void *vaddr)
66{
67 struct device *dev = priv->net_dev->dev.parent;
68 dma_addr_t addr = dpaa2_fd_get_addr(fd);
69 u8 fd_format = dpaa2_fd_get_format(fd);
70 struct dpaa2_sg_entry *sgt;
71 void *sg_vaddr;
72 int i;
73
74
75 if (fd_format == dpaa2_fd_single)
76 goto free_buf;
77 else if (fd_format != dpaa2_fd_sg)
78
79 return;
80
81
82
83
84 sgt = vaddr + dpaa2_fd_get_offset(fd);
85 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
86 addr = dpaa2_sg_get_addr(&sgt[i]);
87 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
88 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
89 DMA_FROM_DEVICE);
90
91 skb_free_frag(sg_vaddr);
92 if (dpaa2_sg_is_final(&sgt[i]))
93 break;
94 }
95
96free_buf:
97 skb_free_frag(vaddr);
98}
99
100
101static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
102 struct dpaa2_eth_channel *ch,
103 const struct dpaa2_fd *fd,
104 void *fd_vaddr)
105{
106 struct sk_buff *skb = NULL;
107 u16 fd_offset = dpaa2_fd_get_offset(fd);
108 u32 fd_length = dpaa2_fd_get_len(fd);
109
110 ch->buf_count--;
111
112 skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
113 if (unlikely(!skb))
114 return NULL;
115
116 skb_reserve(skb, fd_offset);
117 skb_put(skb, fd_length);
118
119 return skb;
120}
121
122
123static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 struct dpaa2_eth_channel *ch,
125 struct dpaa2_sg_entry *sgt)
126{
127 struct sk_buff *skb = NULL;
128 struct device *dev = priv->net_dev->dev.parent;
129 void *sg_vaddr;
130 dma_addr_t sg_addr;
131 u16 sg_offset;
132 u32 sg_length;
133 struct page *page, *head_page;
134 int page_offset;
135 int i;
136
137 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 struct dpaa2_sg_entry *sge = &sgt[i];
139
140
141
142
143
144
145 sg_addr = dpaa2_sg_get_addr(sge);
146 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148 DMA_FROM_DEVICE);
149
150 sg_length = dpaa2_sg_get_len(sge);
151
152 if (i == 0) {
153
154 skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
155 if (unlikely(!skb)) {
156
157
158
159 skb_free_frag(sg_vaddr);
160
161
162
163
164 while (!dpaa2_sg_is_final(&sgt[i]) &&
165 i < DPAA2_ETH_MAX_SG_ENTRIES)
166 i++;
167 break;
168 }
169
170 sg_offset = dpaa2_sg_get_offset(sge);
171 skb_reserve(skb, sg_offset);
172 skb_put(skb, sg_length);
173 } else {
174
175 page = virt_to_page(sg_vaddr);
176 head_page = virt_to_head_page(sg_vaddr);
177
178
179
180
181
182
183 page_offset = ((unsigned long)sg_vaddr &
184 (PAGE_SIZE - 1)) +
185 (page_address(page) - page_address(head_page));
186
187 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 sg_length, DPAA2_ETH_RX_BUF_SIZE);
189 }
190
191 if (dpaa2_sg_is_final(sge))
192 break;
193 }
194
195 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196
197
198 ch->buf_count -= i + 2;
199
200 return skb;
201}
202
203
204static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
205 struct dpaa2_eth_channel *ch,
206 const struct dpaa2_fd *fd,
207 struct napi_struct *napi,
208 u16 queue_id)
209{
210 dma_addr_t addr = dpaa2_fd_get_addr(fd);
211 u8 fd_format = dpaa2_fd_get_format(fd);
212 void *vaddr;
213 struct sk_buff *skb;
214 struct rtnl_link_stats64 *percpu_stats;
215 struct dpaa2_eth_drv_stats *percpu_extras;
216 struct device *dev = priv->net_dev->dev.parent;
217 struct dpaa2_fas *fas;
218 void *buf_data;
219 u32 status = 0;
220
221
222 trace_dpaa2_rx_fd(priv->net_dev, fd);
223
224 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
225 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
226
227 fas = dpaa2_get_fas(vaddr, false);
228 prefetch(fas);
229 buf_data = vaddr + dpaa2_fd_get_offset(fd);
230 prefetch(buf_data);
231
232 percpu_stats = this_cpu_ptr(priv->percpu_stats);
233 percpu_extras = this_cpu_ptr(priv->percpu_extras);
234
235 if (fd_format == dpaa2_fd_single) {
236 skb = build_linear_skb(priv, ch, fd, vaddr);
237 } else if (fd_format == dpaa2_fd_sg) {
238 skb = build_frag_skb(priv, ch, buf_data);
239 skb_free_frag(vaddr);
240 percpu_extras->rx_sg_frames++;
241 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
242 } else {
243
244 goto err_frame_format;
245 }
246
247 if (unlikely(!skb))
248 goto err_build_skb;
249
250 prefetch(skb->data);
251
252
253 if (priv->rx_tstamp) {
254 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
255 __le64 *ts = dpaa2_get_ts(vaddr, false);
256 u64 ns;
257
258 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
259
260 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
261 shhwtstamps->hwtstamp = ns_to_ktime(ns);
262 }
263
264
265 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
266 status = le32_to_cpu(fas->status);
267 validate_rx_csum(priv, status, skb);
268 }
269
270 skb->protocol = eth_type_trans(skb, priv->net_dev);
271 skb_record_rx_queue(skb, queue_id);
272
273 percpu_stats->rx_packets++;
274 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
275
276 napi_gro_receive(napi, skb);
277
278 return;
279
280err_build_skb:
281 free_rx_fd(priv, fd, vaddr);
282err_frame_format:
283 percpu_stats->rx_dropped++;
284}
285
286
287
288
289
290
291
292static int consume_frames(struct dpaa2_eth_channel *ch)
293{
294 struct dpaa2_eth_priv *priv = ch->priv;
295 struct dpaa2_eth_fq *fq;
296 struct dpaa2_dq *dq;
297 const struct dpaa2_fd *fd;
298 int cleaned = 0;
299 int is_last;
300
301 do {
302 dq = dpaa2_io_store_next(ch->store, &is_last);
303 if (unlikely(!dq)) {
304
305
306
307
308
309 continue;
310 }
311
312 fd = dpaa2_dq_fd(dq);
313 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
314 fq->stats.frames++;
315
316 fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
317 cleaned++;
318 } while (!is_last);
319
320 return cleaned;
321}
322
323
324static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
325{
326 struct dpaa2_faead *faead;
327 u32 ctrl, frc;
328
329
330 frc = dpaa2_fd_get_frc(fd);
331 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
332
333
334 ctrl = dpaa2_fd_get_ctrl(fd);
335 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
336
337
338
339
340 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
341 faead = dpaa2_get_faead(buf_start, true);
342 faead->ctrl = cpu_to_le32(ctrl);
343}
344
345
346static int build_sg_fd(struct dpaa2_eth_priv *priv,
347 struct sk_buff *skb,
348 struct dpaa2_fd *fd)
349{
350 struct device *dev = priv->net_dev->dev.parent;
351 void *sgt_buf = NULL;
352 dma_addr_t addr;
353 int nr_frags = skb_shinfo(skb)->nr_frags;
354 struct dpaa2_sg_entry *sgt;
355 int i, err;
356 int sgt_buf_size;
357 struct scatterlist *scl, *crt_scl;
358 int num_sg;
359 int num_dma_bufs;
360 struct dpaa2_eth_swa *swa;
361
362
363
364
365
366
367 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
368 return -EINVAL;
369
370 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
371 if (unlikely(!scl))
372 return -ENOMEM;
373
374 sg_init_table(scl, nr_frags + 1);
375 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
376 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
377 if (unlikely(!num_dma_bufs)) {
378 err = -ENOMEM;
379 goto dma_map_sg_failed;
380 }
381
382
383 sgt_buf_size = priv->tx_data_offset +
384 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
385 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
386 if (unlikely(!sgt_buf)) {
387 err = -ENOMEM;
388 goto sgt_buf_alloc_failed;
389 }
390 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
391 memset(sgt_buf, 0, sgt_buf_size);
392
393 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
394
395
396
397
398
399
400
401
402 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
403 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
404 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
405 }
406 dpaa2_sg_set_final(&sgt[i - 1], true);
407
408
409
410
411
412
413 swa = (struct dpaa2_eth_swa *)sgt_buf;
414 swa->skb = skb;
415 swa->scl = scl;
416 swa->num_sg = num_sg;
417 swa->sgt_size = sgt_buf_size;
418
419
420 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
421 if (unlikely(dma_mapping_error(dev, addr))) {
422 err = -ENOMEM;
423 goto dma_map_single_failed;
424 }
425 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
426 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
427 dpaa2_fd_set_addr(fd, addr);
428 dpaa2_fd_set_len(fd, skb->len);
429 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
430
431 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
432 enable_tx_tstamp(fd, sgt_buf);
433
434 return 0;
435
436dma_map_single_failed:
437 skb_free_frag(sgt_buf);
438sgt_buf_alloc_failed:
439 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
440dma_map_sg_failed:
441 kfree(scl);
442 return err;
443}
444
445
446static int build_single_fd(struct dpaa2_eth_priv *priv,
447 struct sk_buff *skb,
448 struct dpaa2_fd *fd)
449{
450 struct device *dev = priv->net_dev->dev.parent;
451 u8 *buffer_start, *aligned_start;
452 struct sk_buff **skbh;
453 dma_addr_t addr;
454
455 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
456
457
458
459
460 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
461 DPAA2_ETH_TX_BUF_ALIGN);
462 if (aligned_start >= skb->head)
463 buffer_start = aligned_start;
464
465
466
467
468
469 skbh = (struct sk_buff **)buffer_start;
470 *skbh = skb;
471
472 addr = dma_map_single(dev, buffer_start,
473 skb_tail_pointer(skb) - buffer_start,
474 DMA_BIDIRECTIONAL);
475 if (unlikely(dma_mapping_error(dev, addr)))
476 return -ENOMEM;
477
478 dpaa2_fd_set_addr(fd, addr);
479 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
480 dpaa2_fd_set_len(fd, skb->len);
481 dpaa2_fd_set_format(fd, dpaa2_fd_single);
482 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
483
484 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
485 enable_tx_tstamp(fd, buffer_start);
486
487 return 0;
488}
489
490
491
492
493
494
495
496
497static void free_tx_fd(const struct dpaa2_eth_priv *priv,
498 const struct dpaa2_fd *fd)
499{
500 struct device *dev = priv->net_dev->dev.parent;
501 dma_addr_t fd_addr;
502 struct sk_buff **skbh, *skb;
503 unsigned char *buffer_start;
504 struct dpaa2_eth_swa *swa;
505 u8 fd_format = dpaa2_fd_get_format(fd);
506
507 fd_addr = dpaa2_fd_get_addr(fd);
508 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
509
510 if (fd_format == dpaa2_fd_single) {
511 skb = *skbh;
512 buffer_start = (unsigned char *)skbh;
513
514
515
516 dma_unmap_single(dev, fd_addr,
517 skb_tail_pointer(skb) - buffer_start,
518 DMA_BIDIRECTIONAL);
519 } else if (fd_format == dpaa2_fd_sg) {
520 swa = (struct dpaa2_eth_swa *)skbh;
521 skb = swa->skb;
522
523
524 dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
525 kfree(swa->scl);
526
527
528 dma_unmap_single(dev, fd_addr, swa->sgt_size,
529 DMA_BIDIRECTIONAL);
530 } else {
531 netdev_dbg(priv->net_dev, "Invalid FD format\n");
532 return;
533 }
534
535
536 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
537 struct skb_shared_hwtstamps shhwtstamps;
538 __le64 *ts = dpaa2_get_ts(skbh, true);
539 u64 ns;
540
541 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
542
543 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
544 shhwtstamps.hwtstamp = ns_to_ktime(ns);
545 skb_tstamp_tx(skb, &shhwtstamps);
546 }
547
548
549 if (fd_format != dpaa2_fd_single)
550 skb_free_frag(skbh);
551
552
553 dev_kfree_skb(skb);
554}
555
556static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
557{
558 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
559 struct dpaa2_fd fd;
560 struct rtnl_link_stats64 *percpu_stats;
561 struct dpaa2_eth_drv_stats *percpu_extras;
562 struct dpaa2_eth_fq *fq;
563 u16 queue_mapping;
564 unsigned int needed_headroom;
565 int err, i;
566
567 percpu_stats = this_cpu_ptr(priv->percpu_stats);
568 percpu_extras = this_cpu_ptr(priv->percpu_extras);
569
570 needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
571 if (skb_headroom(skb) < needed_headroom) {
572 struct sk_buff *ns;
573
574 ns = skb_realloc_headroom(skb, needed_headroom);
575 if (unlikely(!ns)) {
576 percpu_stats->tx_dropped++;
577 goto err_alloc_headroom;
578 }
579 percpu_extras->tx_reallocs++;
580
581 if (skb->sk)
582 skb_set_owner_w(ns, skb->sk);
583
584 dev_kfree_skb(skb);
585 skb = ns;
586 }
587
588
589
590
591 skb = skb_unshare(skb, GFP_ATOMIC);
592 if (unlikely(!skb)) {
593
594 percpu_stats->tx_dropped++;
595 return NETDEV_TX_OK;
596 }
597
598
599 memset(&fd, 0, sizeof(fd));
600
601 if (skb_is_nonlinear(skb)) {
602 err = build_sg_fd(priv, skb, &fd);
603 percpu_extras->tx_sg_frames++;
604 percpu_extras->tx_sg_bytes += skb->len;
605 } else {
606 err = build_single_fd(priv, skb, &fd);
607 }
608
609 if (unlikely(err)) {
610 percpu_stats->tx_dropped++;
611 goto err_build_fd;
612 }
613
614
615 trace_dpaa2_tx_fd(net_dev, &fd);
616
617
618
619
620
621 queue_mapping = skb_get_queue_mapping(skb);
622 fq = &priv->fq[queue_mapping];
623 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
624 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
625 priv->tx_qdid, 0,
626 fq->tx_qdbin, &fd);
627 if (err != -EBUSY)
628 break;
629 }
630 percpu_extras->tx_portal_busy += i;
631 if (unlikely(err < 0)) {
632 percpu_stats->tx_errors++;
633
634 free_tx_fd(priv, &fd);
635 } else {
636 percpu_stats->tx_packets++;
637 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
638 }
639
640 return NETDEV_TX_OK;
641
642err_build_fd:
643err_alloc_headroom:
644 dev_kfree_skb(skb);
645
646 return NETDEV_TX_OK;
647}
648
649
650static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
651 struct dpaa2_eth_channel *ch,
652 const struct dpaa2_fd *fd,
653 struct napi_struct *napi __always_unused,
654 u16 queue_id __always_unused)
655{
656 struct rtnl_link_stats64 *percpu_stats;
657 struct dpaa2_eth_drv_stats *percpu_extras;
658 u32 fd_errors;
659
660
661 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
662
663 percpu_extras = this_cpu_ptr(priv->percpu_extras);
664 percpu_extras->tx_conf_frames++;
665 percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
666
667
668 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
669 free_tx_fd(priv, fd);
670
671 if (likely(!fd_errors))
672 return;
673
674 if (net_ratelimit())
675 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
676 fd_errors);
677
678 percpu_stats = this_cpu_ptr(priv->percpu_stats);
679
680 percpu_stats->tx_errors++;
681}
682
683static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
684{
685 int err;
686
687 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
688 DPNI_OFF_RX_L3_CSUM, enable);
689 if (err) {
690 netdev_err(priv->net_dev,
691 "dpni_set_offload(RX_L3_CSUM) failed\n");
692 return err;
693 }
694
695 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
696 DPNI_OFF_RX_L4_CSUM, enable);
697 if (err) {
698 netdev_err(priv->net_dev,
699 "dpni_set_offload(RX_L4_CSUM) failed\n");
700 return err;
701 }
702
703 return 0;
704}
705
706static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
707{
708 int err;
709
710 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
711 DPNI_OFF_TX_L3_CSUM, enable);
712 if (err) {
713 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
714 return err;
715 }
716
717 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
718 DPNI_OFF_TX_L4_CSUM, enable);
719 if (err) {
720 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
721 return err;
722 }
723
724 return 0;
725}
726
727
728
729
730static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
731{
732 struct device *dev = priv->net_dev->dev.parent;
733 void *vaddr;
734 int i;
735
736 for (i = 0; i < count; i++) {
737 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
738 dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
739 DMA_FROM_DEVICE);
740 skb_free_frag(vaddr);
741 }
742}
743
744
745
746
747static int add_bufs(struct dpaa2_eth_priv *priv,
748 struct dpaa2_eth_channel *ch, u16 bpid)
749{
750 struct device *dev = priv->net_dev->dev.parent;
751 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
752 void *buf;
753 dma_addr_t addr;
754 int i, err;
755
756 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
757
758
759
760 buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
761 if (unlikely(!buf))
762 goto err_alloc;
763
764 buf = PTR_ALIGN(buf, priv->rx_buf_align);
765
766 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
767 DMA_FROM_DEVICE);
768 if (unlikely(dma_mapping_error(dev, addr)))
769 goto err_map;
770
771 buf_array[i] = addr;
772
773
774 trace_dpaa2_eth_buf_seed(priv->net_dev,
775 buf, dpaa2_eth_buf_raw_size(priv),
776 addr, DPAA2_ETH_RX_BUF_SIZE,
777 bpid);
778 }
779
780release_bufs:
781
782 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
783 buf_array, i)) == -EBUSY)
784 cpu_relax();
785
786
787
788
789 if (err) {
790 free_bufs(priv, buf_array, i);
791 return 0;
792 }
793
794 return i;
795
796err_map:
797 skb_free_frag(buf);
798err_alloc:
799
800
801
802 if (i)
803 goto release_bufs;
804
805 return 0;
806}
807
808static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
809{
810 int i, j;
811 int new_count;
812
813
814
815
816
817
818
819 preempt_disable();
820 for (j = 0; j < priv->num_channels; j++) {
821 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
822 i += DPAA2_ETH_BUFS_PER_CMD) {
823 new_count = add_bufs(priv, priv->channel[j], bpid);
824 priv->channel[j]->buf_count += new_count;
825
826 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
827 preempt_enable();
828 return -ENOMEM;
829 }
830 }
831 }
832 preempt_enable();
833
834 return 0;
835}
836
837
838
839
840
841static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
842{
843 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
844 int ret;
845
846 do {
847 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
848 buf_array, count);
849 if (ret < 0) {
850 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
851 return;
852 }
853 free_bufs(priv, buf_array, ret);
854 } while (ret);
855}
856
857static void drain_pool(struct dpaa2_eth_priv *priv)
858{
859 int i;
860
861 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
862 drain_bufs(priv, 1);
863
864 for (i = 0; i < priv->num_channels; i++)
865 priv->channel[i]->buf_count = 0;
866}
867
868
869
870
871static int refill_pool(struct dpaa2_eth_priv *priv,
872 struct dpaa2_eth_channel *ch,
873 u16 bpid)
874{
875 int new_count;
876
877 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
878 return 0;
879
880 do {
881 new_count = add_bufs(priv, ch, bpid);
882 if (unlikely(!new_count)) {
883
884 break;
885 }
886 ch->buf_count += new_count;
887 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
888
889 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
890 return -ENOMEM;
891
892 return 0;
893}
894
895static int pull_channel(struct dpaa2_eth_channel *ch)
896{
897 int err;
898 int dequeues = -1;
899
900
901 do {
902 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
903 ch->store);
904 dequeues++;
905 cpu_relax();
906 } while (err == -EBUSY);
907
908 ch->stats.dequeue_portal_busy += dequeues;
909 if (unlikely(err))
910 ch->stats.pull_err++;
911
912 return err;
913}
914
915
916
917
918
919
920
921static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
922{
923 struct dpaa2_eth_channel *ch;
924 int cleaned = 0, store_cleaned;
925 struct dpaa2_eth_priv *priv;
926 int err;
927
928 ch = container_of(napi, struct dpaa2_eth_channel, napi);
929 priv = ch->priv;
930
931 while (cleaned < budget) {
932 err = pull_channel(ch);
933 if (unlikely(err))
934 break;
935
936
937 refill_pool(priv, ch, priv->bpid);
938
939 store_cleaned = consume_frames(ch);
940 cleaned += store_cleaned;
941
942
943
944
945 if (store_cleaned == 0 ||
946 cleaned > budget - DPAA2_ETH_STORE_SIZE)
947 break;
948 }
949
950 if (cleaned < budget && napi_complete_done(napi, cleaned)) {
951
952 do {
953 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
954 cpu_relax();
955 } while (err == -EBUSY);
956 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
957 ch->nctx.desired_cpu);
958 }
959
960 ch->stats.frames += cleaned;
961
962 return cleaned;
963}
964
965static void enable_ch_napi(struct dpaa2_eth_priv *priv)
966{
967 struct dpaa2_eth_channel *ch;
968 int i;
969
970 for (i = 0; i < priv->num_channels; i++) {
971 ch = priv->channel[i];
972 napi_enable(&ch->napi);
973 }
974}
975
976static void disable_ch_napi(struct dpaa2_eth_priv *priv)
977{
978 struct dpaa2_eth_channel *ch;
979 int i;
980
981 for (i = 0; i < priv->num_channels; i++) {
982 ch = priv->channel[i];
983 napi_disable(&ch->napi);
984 }
985}
986
987static int link_state_update(struct dpaa2_eth_priv *priv)
988{
989 struct dpni_link_state state;
990 int err;
991
992 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
993 if (unlikely(err)) {
994 netdev_err(priv->net_dev,
995 "dpni_get_link_state() failed\n");
996 return err;
997 }
998
999
1000 if (priv->link_state.up == state.up)
1001 return 0;
1002
1003 priv->link_state = state;
1004 if (state.up) {
1005 netif_carrier_on(priv->net_dev);
1006 netif_tx_start_all_queues(priv->net_dev);
1007 } else {
1008 netif_tx_stop_all_queues(priv->net_dev);
1009 netif_carrier_off(priv->net_dev);
1010 }
1011
1012 netdev_info(priv->net_dev, "Link Event: state %s\n",
1013 state.up ? "up" : "down");
1014
1015 return 0;
1016}
1017
1018static int dpaa2_eth_open(struct net_device *net_dev)
1019{
1020 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1021 int err;
1022
1023 err = seed_pool(priv, priv->bpid);
1024 if (err) {
1025
1026
1027
1028
1029 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1030 priv->dpbp_dev->obj_desc.id, priv->bpid);
1031 }
1032
1033
1034
1035
1036
1037 netif_tx_stop_all_queues(net_dev);
1038 enable_ch_napi(priv);
1039
1040
1041
1042
1043 netif_carrier_off(net_dev);
1044
1045 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1046 if (err < 0) {
1047 netdev_err(net_dev, "dpni_enable() failed\n");
1048 goto enable_err;
1049 }
1050
1051
1052
1053
1054 err = link_state_update(priv);
1055 if (err < 0) {
1056 netdev_err(net_dev, "Can't update link state\n");
1057 goto link_state_err;
1058 }
1059
1060 return 0;
1061
1062link_state_err:
1063enable_err:
1064 disable_ch_napi(priv);
1065 drain_pool(priv);
1066 return err;
1067}
1068
1069
1070
1071
1072static u32 drain_channel(struct dpaa2_eth_priv *priv,
1073 struct dpaa2_eth_channel *ch)
1074{
1075 u32 drained = 0, total = 0;
1076
1077 do {
1078 pull_channel(ch);
1079 drained = consume_frames(ch);
1080 total += drained;
1081 } while (drained);
1082
1083 return total;
1084}
1085
1086static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1087{
1088 struct dpaa2_eth_channel *ch;
1089 int i;
1090 u32 drained = 0;
1091
1092 for (i = 0; i < priv->num_channels; i++) {
1093 ch = priv->channel[i];
1094 drained += drain_channel(priv, ch);
1095 }
1096
1097 return drained;
1098}
1099
1100static int dpaa2_eth_stop(struct net_device *net_dev)
1101{
1102 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1103 int dpni_enabled;
1104 int retries = 10;
1105 u32 drained;
1106
1107 netif_tx_stop_all_queues(net_dev);
1108 netif_carrier_off(net_dev);
1109
1110
1111
1112
1113 do {
1114 dpni_disable(priv->mc_io, 0, priv->mc_token);
1115 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1116 if (dpni_enabled)
1117
1118 msleep(100);
1119 } while (dpni_enabled && --retries);
1120 if (!retries) {
1121 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1122
1123
1124
1125 }
1126
1127
1128
1129
1130
1131
1132
1133 disable_ch_napi(priv);
1134
1135
1136 drained = drain_ingress_frames(priv);
1137 if (drained)
1138 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1139
1140
1141 drain_pool(priv);
1142
1143 return 0;
1144}
1145
1146static int dpaa2_eth_init(struct net_device *net_dev)
1147{
1148 u64 supported = 0;
1149 u64 not_supported = 0;
1150 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1151 u32 options = priv->dpni_attrs.options;
1152
1153
1154 supported |= IFF_LIVE_ADDR_CHANGE;
1155
1156 if (options & DPNI_OPT_NO_MAC_FILTER)
1157 not_supported |= IFF_UNICAST_FLT;
1158 else
1159 supported |= IFF_UNICAST_FLT;
1160
1161 net_dev->priv_flags |= supported;
1162 net_dev->priv_flags &= ~not_supported;
1163
1164
1165 net_dev->features = NETIF_F_RXCSUM |
1166 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1167 NETIF_F_SG | NETIF_F_HIGHDMA |
1168 NETIF_F_LLTX;
1169 net_dev->hw_features = net_dev->features;
1170
1171 return 0;
1172}
1173
1174static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1175{
1176 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1177 struct device *dev = net_dev->dev.parent;
1178 int err;
1179
1180 err = eth_mac_addr(net_dev, addr);
1181 if (err < 0) {
1182 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1183 return err;
1184 }
1185
1186 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1187 net_dev->dev_addr);
1188 if (err) {
1189 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1190 return err;
1191 }
1192
1193 return 0;
1194}
1195
1196
1197
1198
1199static void dpaa2_eth_get_stats(struct net_device *net_dev,
1200 struct rtnl_link_stats64 *stats)
1201{
1202 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1203 struct rtnl_link_stats64 *percpu_stats;
1204 u64 *cpustats;
1205 u64 *netstats = (u64 *)stats;
1206 int i, j;
1207 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1208
1209 for_each_possible_cpu(i) {
1210 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1211 cpustats = (u64 *)percpu_stats;
1212 for (j = 0; j < num; j++)
1213 netstats[j] += cpustats[j];
1214 }
1215}
1216
1217
1218
1219
1220static void add_uc_hw_addr(const struct net_device *net_dev,
1221 struct dpaa2_eth_priv *priv)
1222{
1223 struct netdev_hw_addr *ha;
1224 int err;
1225
1226 netdev_for_each_uc_addr(ha, net_dev) {
1227 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1228 ha->addr);
1229 if (err)
1230 netdev_warn(priv->net_dev,
1231 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1232 ha->addr, err);
1233 }
1234}
1235
1236
1237
1238
1239static void add_mc_hw_addr(const struct net_device *net_dev,
1240 struct dpaa2_eth_priv *priv)
1241{
1242 struct netdev_hw_addr *ha;
1243 int err;
1244
1245 netdev_for_each_mc_addr(ha, net_dev) {
1246 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1247 ha->addr);
1248 if (err)
1249 netdev_warn(priv->net_dev,
1250 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1251 ha->addr, err);
1252 }
1253}
1254
1255static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1256{
1257 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1258 int uc_count = netdev_uc_count(net_dev);
1259 int mc_count = netdev_mc_count(net_dev);
1260 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1261 u32 options = priv->dpni_attrs.options;
1262 u16 mc_token = priv->mc_token;
1263 struct fsl_mc_io *mc_io = priv->mc_io;
1264 int err;
1265
1266
1267 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1268 netdev_info(net_dev,
1269 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1270 max_mac);
1271
1272
1273 if (uc_count > max_mac) {
1274 netdev_info(net_dev,
1275 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1276 uc_count, max_mac);
1277 goto force_promisc;
1278 }
1279 if (mc_count + uc_count > max_mac) {
1280 netdev_info(net_dev,
1281 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1282 uc_count + mc_count, max_mac);
1283 goto force_mc_promisc;
1284 }
1285
1286
1287 if (net_dev->flags & IFF_PROMISC)
1288 goto force_promisc;
1289 if (net_dev->flags & IFF_ALLMULTI) {
1290
1291
1292
1293
1294
1295
1296
1297 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1298 if (err)
1299 netdev_warn(net_dev, "Can't set uc promisc\n");
1300
1301
1302 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1303 if (err)
1304 netdev_warn(net_dev, "Can't clear uc filters\n");
1305 add_uc_hw_addr(net_dev, priv);
1306
1307
1308 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1309 if (err)
1310 netdev_warn(net_dev, "Can't clear uc promisc\n");
1311 goto force_mc_promisc;
1312 }
1313
1314
1315
1316
1317 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1318 if (err)
1319 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1320 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1321 if (err)
1322 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1323
1324
1325 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1326 if (err)
1327 netdev_warn(net_dev, "Can't clear mac filters\n");
1328 add_mc_hw_addr(net_dev, priv);
1329 add_uc_hw_addr(net_dev, priv);
1330
1331
1332
1333
1334 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1335 if (err)
1336 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1337 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1338 if (err)
1339 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1340
1341 return;
1342
1343force_promisc:
1344 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1345 if (err)
1346 netdev_warn(net_dev, "Can't set ucast promisc\n");
1347force_mc_promisc:
1348 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1349 if (err)
1350 netdev_warn(net_dev, "Can't set mcast promisc\n");
1351}
1352
1353static int dpaa2_eth_set_features(struct net_device *net_dev,
1354 netdev_features_t features)
1355{
1356 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1357 netdev_features_t changed = features ^ net_dev->features;
1358 bool enable;
1359 int err;
1360
1361 if (changed & NETIF_F_RXCSUM) {
1362 enable = !!(features & NETIF_F_RXCSUM);
1363 err = set_rx_csum(priv, enable);
1364 if (err)
1365 return err;
1366 }
1367
1368 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1369 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1370 err = set_tx_csum(priv, enable);
1371 if (err)
1372 return err;
1373 }
1374
1375 return 0;
1376}
1377
1378static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1379{
1380 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1381 struct hwtstamp_config config;
1382
1383 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1384 return -EFAULT;
1385
1386 switch (config.tx_type) {
1387 case HWTSTAMP_TX_OFF:
1388 priv->tx_tstamp = false;
1389 break;
1390 case HWTSTAMP_TX_ON:
1391 priv->tx_tstamp = true;
1392 break;
1393 default:
1394 return -ERANGE;
1395 }
1396
1397 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1398 priv->rx_tstamp = false;
1399 } else {
1400 priv->rx_tstamp = true;
1401
1402 config.rx_filter = HWTSTAMP_FILTER_ALL;
1403 }
1404
1405 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1406 -EFAULT : 0;
1407}
1408
1409static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1410{
1411 if (cmd == SIOCSHWTSTAMP)
1412 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1413
1414 return -EINVAL;
1415}
1416
1417static const struct net_device_ops dpaa2_eth_ops = {
1418 .ndo_open = dpaa2_eth_open,
1419 .ndo_start_xmit = dpaa2_eth_tx,
1420 .ndo_stop = dpaa2_eth_stop,
1421 .ndo_init = dpaa2_eth_init,
1422 .ndo_set_mac_address = dpaa2_eth_set_addr,
1423 .ndo_get_stats64 = dpaa2_eth_get_stats,
1424 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1425 .ndo_set_features = dpaa2_eth_set_features,
1426 .ndo_do_ioctl = dpaa2_eth_ioctl,
1427};
1428
1429static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1430{
1431 struct dpaa2_eth_channel *ch;
1432
1433 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1434
1435
1436 ch->stats.cdan++;
1437
1438 napi_schedule_irqoff(&ch->napi);
1439}
1440
1441
1442static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1443{
1444 struct fsl_mc_device *dpcon;
1445 struct device *dev = priv->net_dev->dev.parent;
1446 struct dpcon_attr attrs;
1447 int err;
1448
1449 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1450 FSL_MC_POOL_DPCON, &dpcon);
1451 if (err) {
1452 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1453 return NULL;
1454 }
1455
1456 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1457 if (err) {
1458 dev_err(dev, "dpcon_open() failed\n");
1459 goto free;
1460 }
1461
1462 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1463 if (err) {
1464 dev_err(dev, "dpcon_reset() failed\n");
1465 goto close;
1466 }
1467
1468 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1469 if (err) {
1470 dev_err(dev, "dpcon_get_attributes() failed\n");
1471 goto close;
1472 }
1473
1474 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1475 if (err) {
1476 dev_err(dev, "dpcon_enable() failed\n");
1477 goto close;
1478 }
1479
1480 return dpcon;
1481
1482close:
1483 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1484free:
1485 fsl_mc_object_free(dpcon);
1486
1487 return NULL;
1488}
1489
1490static void free_dpcon(struct dpaa2_eth_priv *priv,
1491 struct fsl_mc_device *dpcon)
1492{
1493 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1494 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1495 fsl_mc_object_free(dpcon);
1496}
1497
1498static struct dpaa2_eth_channel *
1499alloc_channel(struct dpaa2_eth_priv *priv)
1500{
1501 struct dpaa2_eth_channel *channel;
1502 struct dpcon_attr attr;
1503 struct device *dev = priv->net_dev->dev.parent;
1504 int err;
1505
1506 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1507 if (!channel)
1508 return NULL;
1509
1510 channel->dpcon = setup_dpcon(priv);
1511 if (!channel->dpcon)
1512 goto err_setup;
1513
1514 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1515 &attr);
1516 if (err) {
1517 dev_err(dev, "dpcon_get_attributes() failed\n");
1518 goto err_get_attr;
1519 }
1520
1521 channel->dpcon_id = attr.id;
1522 channel->ch_id = attr.qbman_ch_id;
1523 channel->priv = priv;
1524
1525 return channel;
1526
1527err_get_attr:
1528 free_dpcon(priv, channel->dpcon);
1529err_setup:
1530 kfree(channel);
1531 return NULL;
1532}
1533
1534static void free_channel(struct dpaa2_eth_priv *priv,
1535 struct dpaa2_eth_channel *channel)
1536{
1537 free_dpcon(priv, channel->dpcon);
1538 kfree(channel);
1539}
1540
1541
1542
1543
1544static int setup_dpio(struct dpaa2_eth_priv *priv)
1545{
1546 struct dpaa2_io_notification_ctx *nctx;
1547 struct dpaa2_eth_channel *channel;
1548 struct dpcon_notification_cfg dpcon_notif_cfg;
1549 struct device *dev = priv->net_dev->dev.parent;
1550 int i, err;
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 cpumask_clear(&priv->dpio_cpumask);
1562 for_each_online_cpu(i) {
1563
1564 channel = alloc_channel(priv);
1565 if (!channel) {
1566 dev_info(dev,
1567 "No affine channel for cpu %d and above\n", i);
1568 err = -ENODEV;
1569 goto err_alloc_ch;
1570 }
1571
1572 priv->channel[priv->num_channels] = channel;
1573
1574 nctx = &channel->nctx;
1575 nctx->is_cdan = 1;
1576 nctx->cb = cdan_cb;
1577 nctx->id = channel->ch_id;
1578 nctx->desired_cpu = i;
1579
1580
1581 channel->dpio = dpaa2_io_service_select(i);
1582 err = dpaa2_io_service_register(channel->dpio, nctx);
1583 if (err) {
1584 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
1585
1586
1587
1588
1589
1590 err = -EPROBE_DEFER;
1591 goto err_service_reg;
1592 }
1593
1594
1595 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1596 dpcon_notif_cfg.priority = 0;
1597 dpcon_notif_cfg.user_ctx = nctx->qman64;
1598 err = dpcon_set_notification(priv->mc_io, 0,
1599 channel->dpcon->mc_handle,
1600 &dpcon_notif_cfg);
1601 if (err) {
1602 dev_err(dev, "dpcon_set_notification failed()\n");
1603 goto err_set_cdan;
1604 }
1605
1606
1607
1608
1609 cpumask_set_cpu(i, &priv->dpio_cpumask);
1610 priv->num_channels++;
1611
1612
1613
1614
1615 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1616 break;
1617 }
1618
1619 return 0;
1620
1621err_set_cdan:
1622 dpaa2_io_service_deregister(channel->dpio, nctx);
1623err_service_reg:
1624 free_channel(priv, channel);
1625err_alloc_ch:
1626 if (cpumask_empty(&priv->dpio_cpumask)) {
1627 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1628 return err;
1629 }
1630
1631 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1632 cpumask_pr_args(&priv->dpio_cpumask));
1633
1634 return 0;
1635}
1636
1637static void free_dpio(struct dpaa2_eth_priv *priv)
1638{
1639 int i;
1640 struct dpaa2_eth_channel *ch;
1641
1642
1643 for (i = 0; i < priv->num_channels; i++) {
1644 ch = priv->channel[i];
1645 dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
1646 free_channel(priv, ch);
1647 }
1648}
1649
1650static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1651 int cpu)
1652{
1653 struct device *dev = priv->net_dev->dev.parent;
1654 int i;
1655
1656 for (i = 0; i < priv->num_channels; i++)
1657 if (priv->channel[i]->nctx.desired_cpu == cpu)
1658 return priv->channel[i];
1659
1660
1661
1662
1663 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1664
1665 return priv->channel[0];
1666}
1667
1668static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1669{
1670 struct device *dev = priv->net_dev->dev.parent;
1671 struct cpumask xps_mask;
1672 struct dpaa2_eth_fq *fq;
1673 int rx_cpu, txc_cpu;
1674 int i, err;
1675
1676
1677
1678
1679
1680 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1681
1682 for (i = 0; i < priv->num_fqs; i++) {
1683 fq = &priv->fq[i];
1684 switch (fq->type) {
1685 case DPAA2_RX_FQ:
1686 fq->target_cpu = rx_cpu;
1687 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1688 if (rx_cpu >= nr_cpu_ids)
1689 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1690 break;
1691 case DPAA2_TX_CONF_FQ:
1692 fq->target_cpu = txc_cpu;
1693
1694
1695
1696
1697 cpumask_clear(&xps_mask);
1698 cpumask_set_cpu(txc_cpu, &xps_mask);
1699 err = netif_set_xps_queue(priv->net_dev, &xps_mask,
1700 fq->flowid);
1701 if (err)
1702 dev_err(dev, "Error setting XPS queue\n");
1703
1704 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1705 if (txc_cpu >= nr_cpu_ids)
1706 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1707 break;
1708 default:
1709 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1710 }
1711 fq->channel = get_affine_channel(priv, fq->target_cpu);
1712 }
1713}
1714
1715static void setup_fqs(struct dpaa2_eth_priv *priv)
1716{
1717 int i;
1718
1719
1720
1721
1722
1723 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1724 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1725 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1726 priv->fq[priv->num_fqs++].flowid = (u16)i;
1727 }
1728
1729 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1730 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1731 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1732 priv->fq[priv->num_fqs++].flowid = (u16)i;
1733 }
1734
1735
1736 set_fq_affinity(priv);
1737}
1738
1739
1740static int setup_dpbp(struct dpaa2_eth_priv *priv)
1741{
1742 int err;
1743 struct fsl_mc_device *dpbp_dev;
1744 struct device *dev = priv->net_dev->dev.parent;
1745 struct dpbp_attr dpbp_attrs;
1746
1747 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1748 &dpbp_dev);
1749 if (err) {
1750 dev_err(dev, "DPBP device allocation failed\n");
1751 return err;
1752 }
1753
1754 priv->dpbp_dev = dpbp_dev;
1755
1756 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1757 &dpbp_dev->mc_handle);
1758 if (err) {
1759 dev_err(dev, "dpbp_open() failed\n");
1760 goto err_open;
1761 }
1762
1763 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
1764 if (err) {
1765 dev_err(dev, "dpbp_reset() failed\n");
1766 goto err_reset;
1767 }
1768
1769 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1770 if (err) {
1771 dev_err(dev, "dpbp_enable() failed\n");
1772 goto err_enable;
1773 }
1774
1775 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1776 &dpbp_attrs);
1777 if (err) {
1778 dev_err(dev, "dpbp_get_attributes() failed\n");
1779 goto err_get_attr;
1780 }
1781 priv->bpid = dpbp_attrs.bpid;
1782
1783 return 0;
1784
1785err_get_attr:
1786 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1787err_enable:
1788err_reset:
1789 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1790err_open:
1791 fsl_mc_object_free(dpbp_dev);
1792
1793 return err;
1794}
1795
1796static void free_dpbp(struct dpaa2_eth_priv *priv)
1797{
1798 drain_pool(priv);
1799 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1800 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1801 fsl_mc_object_free(priv->dpbp_dev);
1802}
1803
1804static int set_buffer_layout(struct dpaa2_eth_priv *priv)
1805{
1806 struct device *dev = priv->net_dev->dev.parent;
1807 struct dpni_buffer_layout buf_layout = {0};
1808 int err;
1809
1810
1811
1812
1813
1814 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
1815 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
1816 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
1817 else
1818 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
1819
1820
1821 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1822 buf_layout.pass_timestamp = true;
1823 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1824 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1825 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1826 DPNI_QUEUE_TX, &buf_layout);
1827 if (err) {
1828 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1829 return err;
1830 }
1831
1832
1833 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1834 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1835 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
1836 if (err) {
1837 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1838 return err;
1839 }
1840
1841
1842
1843
1844 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1845 &priv->tx_data_offset);
1846 if (err) {
1847 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1848 return err;
1849 }
1850
1851 if ((priv->tx_data_offset % 64) != 0)
1852 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
1853 priv->tx_data_offset);
1854
1855
1856 buf_layout.pass_frame_status = true;
1857 buf_layout.pass_parser_result = true;
1858 buf_layout.data_align = priv->rx_buf_align;
1859 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
1860 buf_layout.private_data_size = 0;
1861 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1862 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1863 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
1864 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
1865 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1866 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1867 DPNI_QUEUE_RX, &buf_layout);
1868 if (err) {
1869 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1870 return err;
1871 }
1872
1873 return 0;
1874}
1875
1876
1877static int setup_dpni(struct fsl_mc_device *ls_dev)
1878{
1879 struct device *dev = &ls_dev->dev;
1880 struct dpaa2_eth_priv *priv;
1881 struct net_device *net_dev;
1882 int err;
1883
1884 net_dev = dev_get_drvdata(dev);
1885 priv = netdev_priv(net_dev);
1886
1887
1888 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
1889 if (err) {
1890 dev_err(dev, "dpni_open() failed\n");
1891 return err;
1892 }
1893
1894
1895 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
1896 &priv->dpni_ver_minor);
1897 if (err) {
1898 dev_err(dev, "dpni_get_api_version() failed\n");
1899 goto close;
1900 }
1901 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
1902 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
1903 priv->dpni_ver_major, priv->dpni_ver_minor,
1904 DPNI_VER_MAJOR, DPNI_VER_MINOR);
1905 err = -ENOTSUPP;
1906 goto close;
1907 }
1908
1909 ls_dev->mc_io = priv->mc_io;
1910 ls_dev->mc_handle = priv->mc_token;
1911
1912 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1913 if (err) {
1914 dev_err(dev, "dpni_reset() failed\n");
1915 goto close;
1916 }
1917
1918 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1919 &priv->dpni_attrs);
1920 if (err) {
1921 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1922 goto close;
1923 }
1924
1925 err = set_buffer_layout(priv);
1926 if (err)
1927 goto close;
1928
1929 return 0;
1930
1931close:
1932 dpni_close(priv->mc_io, 0, priv->mc_token);
1933
1934 return err;
1935}
1936
1937static void free_dpni(struct dpaa2_eth_priv *priv)
1938{
1939 int err;
1940
1941 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1942 if (err)
1943 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1944 err);
1945
1946 dpni_close(priv->mc_io, 0, priv->mc_token);
1947}
1948
1949static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1950 struct dpaa2_eth_fq *fq)
1951{
1952 struct device *dev = priv->net_dev->dev.parent;
1953 struct dpni_queue queue;
1954 struct dpni_queue_id qid;
1955 struct dpni_taildrop td;
1956 int err;
1957
1958 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1959 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1960 if (err) {
1961 dev_err(dev, "dpni_get_queue(RX) failed\n");
1962 return err;
1963 }
1964
1965 fq->fqid = qid.fqid;
1966
1967 queue.destination.id = fq->channel->dpcon_id;
1968 queue.destination.type = DPNI_DEST_DPCON;
1969 queue.destination.priority = 1;
1970 queue.user_context = (u64)(uintptr_t)fq;
1971 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1972 DPNI_QUEUE_RX, 0, fq->flowid,
1973 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1974 &queue);
1975 if (err) {
1976 dev_err(dev, "dpni_set_queue(RX) failed\n");
1977 return err;
1978 }
1979
1980 td.enable = 1;
1981 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1982 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1983 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1984 if (err) {
1985 dev_err(dev, "dpni_set_threshold() failed\n");
1986 return err;
1987 }
1988
1989 return 0;
1990}
1991
1992static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1993 struct dpaa2_eth_fq *fq)
1994{
1995 struct device *dev = priv->net_dev->dev.parent;
1996 struct dpni_queue queue;
1997 struct dpni_queue_id qid;
1998 int err;
1999
2000 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2001 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
2002 if (err) {
2003 dev_err(dev, "dpni_get_queue(TX) failed\n");
2004 return err;
2005 }
2006
2007 fq->tx_qdbin = qid.qdbin;
2008
2009 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2010 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2011 &queue, &qid);
2012 if (err) {
2013 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2014 return err;
2015 }
2016
2017 fq->fqid = qid.fqid;
2018
2019 queue.destination.id = fq->channel->dpcon_id;
2020 queue.destination.type = DPNI_DEST_DPCON;
2021 queue.destination.priority = 0;
2022 queue.user_context = (u64)(uintptr_t)fq;
2023 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2024 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2025 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2026 &queue);
2027 if (err) {
2028 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2029 return err;
2030 }
2031
2032 return 0;
2033}
2034
2035
2036static const struct dpaa2_eth_hash_fields hash_fields[] = {
2037 {
2038
2039 .rxnfc_field = RXH_IP_SRC,
2040 .cls_prot = NET_PROT_IP,
2041 .cls_field = NH_FLD_IP_SRC,
2042 .size = 4,
2043 }, {
2044 .rxnfc_field = RXH_IP_DST,
2045 .cls_prot = NET_PROT_IP,
2046 .cls_field = NH_FLD_IP_DST,
2047 .size = 4,
2048 }, {
2049 .rxnfc_field = RXH_L3_PROTO,
2050 .cls_prot = NET_PROT_IP,
2051 .cls_field = NH_FLD_IP_PROTO,
2052 .size = 1,
2053 }, {
2054
2055
2056
2057 .rxnfc_field = RXH_L4_B_0_1,
2058 .cls_prot = NET_PROT_UDP,
2059 .cls_field = NH_FLD_UDP_PORT_SRC,
2060 .size = 2,
2061 }, {
2062 .rxnfc_field = RXH_L4_B_2_3,
2063 .cls_prot = NET_PROT_UDP,
2064 .cls_field = NH_FLD_UDP_PORT_DST,
2065 .size = 2,
2066 },
2067};
2068
2069
2070
2071
2072static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2073{
2074 struct device *dev = net_dev->dev.parent;
2075 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2076 struct dpkg_profile_cfg cls_cfg;
2077 struct dpni_rx_tc_dist_cfg dist_cfg;
2078 u8 *dma_mem;
2079 int i;
2080 int err = 0;
2081
2082 if (!dpaa2_eth_hash_enabled(priv)) {
2083 dev_dbg(dev, "Hashing support is not enabled\n");
2084 return 0;
2085 }
2086
2087 memset(&cls_cfg, 0, sizeof(cls_cfg));
2088
2089 for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
2090 struct dpkg_extract *key =
2091 &cls_cfg.extracts[cls_cfg.num_extracts];
2092
2093 if (!(flags & hash_fields[i].rxnfc_field))
2094 continue;
2095
2096 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2097 dev_err(dev, "error adding key extraction rule, too many rules?\n");
2098 return -E2BIG;
2099 }
2100
2101 key->type = DPKG_EXTRACT_FROM_HDR;
2102 key->extract.from_hdr.prot = hash_fields[i].cls_prot;
2103 key->extract.from_hdr.type = DPKG_FULL_FIELD;
2104 key->extract.from_hdr.field = hash_fields[i].cls_field;
2105 cls_cfg.num_extracts++;
2106
2107 priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
2108 }
2109
2110 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2111 if (!dma_mem)
2112 return -ENOMEM;
2113
2114 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2115 if (err) {
2116 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2117 goto err_prep_key;
2118 }
2119
2120 memset(&dist_cfg, 0, sizeof(dist_cfg));
2121
2122
2123 dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
2124 DPAA2_CLASSIFIER_DMA_SIZE,
2125 DMA_TO_DEVICE);
2126 if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
2127 dev_err(dev, "DMA mapping failed\n");
2128 err = -ENOMEM;
2129 goto err_dma_map;
2130 }
2131
2132 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2133 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2134
2135 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2136 dma_unmap_single(dev, dist_cfg.key_cfg_iova,
2137 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
2138 if (err)
2139 dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
2140
2141err_dma_map:
2142err_prep_key:
2143 kfree(dma_mem);
2144 return err;
2145}
2146
2147
2148
2149
2150static int bind_dpni(struct dpaa2_eth_priv *priv)
2151{
2152 struct net_device *net_dev = priv->net_dev;
2153 struct device *dev = net_dev->dev.parent;
2154 struct dpni_pools_cfg pools_params;
2155 struct dpni_error_cfg err_cfg;
2156 int err = 0;
2157 int i;
2158
2159 pools_params.num_dpbp = 1;
2160 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2161 pools_params.pools[0].backup_pool = 0;
2162 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2163 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2164 if (err) {
2165 dev_err(dev, "dpni_set_pools() failed\n");
2166 return err;
2167 }
2168
2169
2170
2171
2172 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
2173 if (err)
2174 dev_err(dev, "Failed to configure hashing\n");
2175
2176
2177 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
2178 err_cfg.set_frame_annotation = 1;
2179 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2180 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2181 &err_cfg);
2182 if (err) {
2183 dev_err(dev, "dpni_set_errors_behavior failed\n");
2184 return err;
2185 }
2186
2187
2188 for (i = 0; i < priv->num_fqs; i++) {
2189 switch (priv->fq[i].type) {
2190 case DPAA2_RX_FQ:
2191 err = setup_rx_flow(priv, &priv->fq[i]);
2192 break;
2193 case DPAA2_TX_CONF_FQ:
2194 err = setup_tx_flow(priv, &priv->fq[i]);
2195 break;
2196 default:
2197 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2198 return -EINVAL;
2199 }
2200 if (err)
2201 return err;
2202 }
2203
2204 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2205 DPNI_QUEUE_TX, &priv->tx_qdid);
2206 if (err) {
2207 dev_err(dev, "dpni_get_qdid() failed\n");
2208 return err;
2209 }
2210
2211 return 0;
2212}
2213
2214
2215static int alloc_rings(struct dpaa2_eth_priv *priv)
2216{
2217 struct net_device *net_dev = priv->net_dev;
2218 struct device *dev = net_dev->dev.parent;
2219 int i;
2220
2221 for (i = 0; i < priv->num_channels; i++) {
2222 priv->channel[i]->store =
2223 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2224 if (!priv->channel[i]->store) {
2225 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2226 goto err_ring;
2227 }
2228 }
2229
2230 return 0;
2231
2232err_ring:
2233 for (i = 0; i < priv->num_channels; i++) {
2234 if (!priv->channel[i]->store)
2235 break;
2236 dpaa2_io_store_destroy(priv->channel[i]->store);
2237 }
2238
2239 return -ENOMEM;
2240}
2241
2242static void free_rings(struct dpaa2_eth_priv *priv)
2243{
2244 int i;
2245
2246 for (i = 0; i < priv->num_channels; i++)
2247 dpaa2_io_store_destroy(priv->channel[i]->store);
2248}
2249
2250static int set_mac_addr(struct dpaa2_eth_priv *priv)
2251{
2252 struct net_device *net_dev = priv->net_dev;
2253 struct device *dev = net_dev->dev.parent;
2254 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2255 int err;
2256
2257
2258 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2259 if (err) {
2260 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2261 return err;
2262 }
2263
2264
2265 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2266 dpni_mac_addr);
2267 if (err) {
2268 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
2269 return err;
2270 }
2271
2272
2273 if (!is_zero_ether_addr(mac_addr)) {
2274
2275 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2276 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2277 priv->mc_token,
2278 mac_addr);
2279 if (err) {
2280 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2281 return err;
2282 }
2283 }
2284 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2285 } else if (is_zero_ether_addr(dpni_mac_addr)) {
2286
2287
2288
2289 eth_hw_addr_random(net_dev);
2290 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2291
2292 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2293 net_dev->dev_addr);
2294 if (err) {
2295 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2296 return err;
2297 }
2298
2299
2300
2301
2302
2303
2304 net_dev->addr_assign_type = NET_ADDR_PERM;
2305 } else {
2306
2307
2308
2309 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2310 }
2311
2312 return 0;
2313}
2314
2315static int netdev_init(struct net_device *net_dev)
2316{
2317 struct device *dev = net_dev->dev.parent;
2318 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2319 u8 bcast_addr[ETH_ALEN];
2320 u8 num_queues;
2321 int err;
2322
2323 net_dev->netdev_ops = &dpaa2_eth_ops;
2324
2325 err = set_mac_addr(priv);
2326 if (err)
2327 return err;
2328
2329
2330 eth_broadcast_addr(bcast_addr);
2331 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2332 if (err) {
2333 dev_err(dev, "dpni_add_mac_addr() failed\n");
2334 return err;
2335 }
2336
2337
2338 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2339 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
2340 DPAA2_ETH_MFL);
2341 if (err) {
2342 dev_err(dev, "dpni_set_max_frame_length() failed\n");
2343 return err;
2344 }
2345
2346
2347 num_queues = dpaa2_eth_queue_count(priv);
2348 err = netif_set_real_num_tx_queues(net_dev, num_queues);
2349 if (err) {
2350 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
2351 return err;
2352 }
2353 err = netif_set_real_num_rx_queues(net_dev, num_queues);
2354 if (err) {
2355 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
2356 return err;
2357 }
2358
2359
2360 err = register_netdev(net_dev);
2361 if (err < 0) {
2362 dev_err(dev, "register_netdev() failed\n");
2363 return err;
2364 }
2365
2366 return 0;
2367}
2368
2369static int poll_link_state(void *arg)
2370{
2371 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2372 int err;
2373
2374 while (!kthread_should_stop()) {
2375 err = link_state_update(priv);
2376 if (unlikely(err))
2377 return err;
2378
2379 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2380 }
2381
2382 return 0;
2383}
2384
2385static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2386{
2387 u32 status = ~0;
2388 struct device *dev = (struct device *)arg;
2389 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2390 struct net_device *net_dev = dev_get_drvdata(dev);
2391 int err;
2392
2393 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2394 DPNI_IRQ_INDEX, &status);
2395 if (unlikely(err)) {
2396 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
2397 return IRQ_HANDLED;
2398 }
2399
2400 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
2401 link_state_update(netdev_priv(net_dev));
2402
2403 return IRQ_HANDLED;
2404}
2405
2406static int setup_irqs(struct fsl_mc_device *ls_dev)
2407{
2408 int err = 0;
2409 struct fsl_mc_device_irq *irq;
2410
2411 err = fsl_mc_allocate_irqs(ls_dev);
2412 if (err) {
2413 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2414 return err;
2415 }
2416
2417 irq = ls_dev->irqs[0];
2418 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2419 NULL, dpni_irq0_handler_thread,
2420 IRQF_NO_SUSPEND | IRQF_ONESHOT,
2421 dev_name(&ls_dev->dev), &ls_dev->dev);
2422 if (err < 0) {
2423 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
2424 goto free_mc_irq;
2425 }
2426
2427 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2428 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2429 if (err < 0) {
2430 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
2431 goto free_irq;
2432 }
2433
2434 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2435 DPNI_IRQ_INDEX, 1);
2436 if (err < 0) {
2437 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
2438 goto free_irq;
2439 }
2440
2441 return 0;
2442
2443free_irq:
2444 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2445free_mc_irq:
2446 fsl_mc_free_irqs(ls_dev);
2447
2448 return err;
2449}
2450
2451static void add_ch_napi(struct dpaa2_eth_priv *priv)
2452{
2453 int i;
2454 struct dpaa2_eth_channel *ch;
2455
2456 for (i = 0; i < priv->num_channels; i++) {
2457 ch = priv->channel[i];
2458
2459 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2460 NAPI_POLL_WEIGHT);
2461 }
2462}
2463
2464static void del_ch_napi(struct dpaa2_eth_priv *priv)
2465{
2466 int i;
2467 struct dpaa2_eth_channel *ch;
2468
2469 for (i = 0; i < priv->num_channels; i++) {
2470 ch = priv->channel[i];
2471 netif_napi_del(&ch->napi);
2472 }
2473}
2474
2475static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2476{
2477 struct device *dev;
2478 struct net_device *net_dev = NULL;
2479 struct dpaa2_eth_priv *priv = NULL;
2480 int err = 0;
2481
2482 dev = &dpni_dev->dev;
2483
2484
2485 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2486 if (!net_dev) {
2487 dev_err(dev, "alloc_etherdev_mq() failed\n");
2488 return -ENOMEM;
2489 }
2490
2491 SET_NETDEV_DEV(net_dev, dev);
2492 dev_set_drvdata(dev, net_dev);
2493
2494 priv = netdev_priv(net_dev);
2495 priv->net_dev = net_dev;
2496
2497 priv->iommu_domain = iommu_get_domain_for_dev(dev);
2498
2499
2500 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2501 &priv->mc_io);
2502 if (err) {
2503 if (err == -ENXIO)
2504 err = -EPROBE_DEFER;
2505 else
2506 dev_err(dev, "MC portal allocation failed\n");
2507 goto err_portal_alloc;
2508 }
2509
2510
2511 err = setup_dpni(dpni_dev);
2512 if (err)
2513 goto err_dpni_setup;
2514
2515 err = setup_dpio(priv);
2516 if (err)
2517 goto err_dpio_setup;
2518
2519 setup_fqs(priv);
2520
2521 err = setup_dpbp(priv);
2522 if (err)
2523 goto err_dpbp_setup;
2524
2525 err = bind_dpni(priv);
2526 if (err)
2527 goto err_bind;
2528
2529
2530 add_ch_napi(priv);
2531
2532
2533 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2534 if (!priv->percpu_stats) {
2535 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2536 err = -ENOMEM;
2537 goto err_alloc_percpu_stats;
2538 }
2539 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2540 if (!priv->percpu_extras) {
2541 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2542 err = -ENOMEM;
2543 goto err_alloc_percpu_extras;
2544 }
2545
2546 err = netdev_init(net_dev);
2547 if (err)
2548 goto err_netdev_init;
2549
2550
2551 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2552 if (err)
2553 goto err_csum;
2554
2555 err = set_tx_csum(priv, !!(net_dev->features &
2556 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2557 if (err)
2558 goto err_csum;
2559
2560 err = alloc_rings(priv);
2561 if (err)
2562 goto err_alloc_rings;
2563
2564 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2565
2566 err = setup_irqs(dpni_dev);
2567 if (err) {
2568 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2569 priv->poll_thread = kthread_run(poll_link_state, priv,
2570 "%s_poll_link", net_dev->name);
2571 if (IS_ERR(priv->poll_thread)) {
2572 netdev_err(net_dev, "Error starting polling thread\n");
2573 goto err_poll_thread;
2574 }
2575 priv->do_link_poll = true;
2576 }
2577
2578 dev_info(dev, "Probed interface %s\n", net_dev->name);
2579 return 0;
2580
2581err_poll_thread:
2582 free_rings(priv);
2583err_alloc_rings:
2584err_csum:
2585 unregister_netdev(net_dev);
2586err_netdev_init:
2587 free_percpu(priv->percpu_extras);
2588err_alloc_percpu_extras:
2589 free_percpu(priv->percpu_stats);
2590err_alloc_percpu_stats:
2591 del_ch_napi(priv);
2592err_bind:
2593 free_dpbp(priv);
2594err_dpbp_setup:
2595 free_dpio(priv);
2596err_dpio_setup:
2597 free_dpni(priv);
2598err_dpni_setup:
2599 fsl_mc_portal_free(priv->mc_io);
2600err_portal_alloc:
2601 dev_set_drvdata(dev, NULL);
2602 free_netdev(net_dev);
2603
2604 return err;
2605}
2606
2607static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2608{
2609 struct device *dev;
2610 struct net_device *net_dev;
2611 struct dpaa2_eth_priv *priv;
2612
2613 dev = &ls_dev->dev;
2614 net_dev = dev_get_drvdata(dev);
2615 priv = netdev_priv(net_dev);
2616
2617 unregister_netdev(net_dev);
2618
2619 if (priv->do_link_poll)
2620 kthread_stop(priv->poll_thread);
2621 else
2622 fsl_mc_free_irqs(ls_dev);
2623
2624 free_rings(priv);
2625 free_percpu(priv->percpu_stats);
2626 free_percpu(priv->percpu_extras);
2627
2628 del_ch_napi(priv);
2629 free_dpbp(priv);
2630 free_dpio(priv);
2631 free_dpni(priv);
2632
2633 fsl_mc_portal_free(priv->mc_io);
2634
2635 free_netdev(net_dev);
2636
2637 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2638
2639 return 0;
2640}
2641
2642static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2643 {
2644 .vendor = FSL_MC_VENDOR_FREESCALE,
2645 .obj_type = "dpni",
2646 },
2647 { .vendor = 0x0 }
2648};
2649MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2650
2651static struct fsl_mc_driver dpaa2_eth_driver = {
2652 .driver = {
2653 .name = KBUILD_MODNAME,
2654 .owner = THIS_MODULE,
2655 },
2656 .probe = dpaa2_eth_probe,
2657 .remove = dpaa2_eth_remove,
2658 .match_id_table = dpaa2_eth_match_id_table
2659};
2660
2661module_fsl_mc_driver(dpaa2_eth_driver);
2662