1
2
3
4
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/platform_device.h>
8#include <linux/etherdevice.h>
9#include <linux/of_net.h>
10#include <linux/interrupt.h>
11#include <linux/msi.h>
12#include <linux/kthread.h>
13#include <linux/iommu.h>
14#include <linux/fsl/mc.h>
15#include <linux/bpf.h>
16#include <linux/bpf_trace.h>
17#include <linux/fsl/ptp_qoriq.h>
18#include <linux/ptp_classify.h>
19#include <net/pkt_cls.h>
20#include <net/sock.h>
21#include <net/tso.h>
22
23#include "dpaa2-eth.h"
24
25
26
27
28#define CREATE_TRACE_POINTS
29#include "dpaa2-eth-trace.h"
30
31MODULE_LICENSE("Dual BSD/GPL");
32MODULE_AUTHOR("Freescale Semiconductor, Inc");
33MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
34
35struct ptp_qoriq *dpaa2_ptp;
36EXPORT_SYMBOL(dpaa2_ptp);
37
38static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
39{
40 priv->features = 0;
41
42 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
43 DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
44 priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
45}
46
47static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
48 u32 offset, u8 udp)
49{
50 struct dpni_single_step_cfg cfg;
51
52 cfg.en = 1;
53 cfg.ch_update = udp;
54 cfg.offset = offset;
55 cfg.peer_delay = 0;
56
57 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
58 WARN_ONCE(1, "Failed to set single step register");
59}
60
61static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
62 u32 offset, u8 udp)
63{
64 u32 val = 0;
65
66 val = DPAA2_PTP_SINGLE_STEP_ENABLE |
67 DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
68
69 if (udp)
70 val |= DPAA2_PTP_SINGLE_STEP_CH;
71
72 if (priv->onestep_reg_base)
73 writel(val, priv->onestep_reg_base);
74}
75
76static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
77{
78 struct device *dev = priv->net_dev->dev.parent;
79 struct dpni_single_step_cfg ptp_cfg;
80
81 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
82
83 if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
84 return;
85
86 if (dpni_get_single_step_cfg(priv->mc_io, 0,
87 priv->mc_token, &ptp_cfg)) {
88 dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
89 return;
90 }
91
92 if (!ptp_cfg.ptp_onestep_reg_base) {
93 dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
94 return;
95 }
96
97 priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
98 sizeof(u32));
99 if (!priv->onestep_reg_base) {
100 dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
101 return;
102 }
103
104 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
105}
106
107static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
108 dma_addr_t iova_addr)
109{
110 phys_addr_t phys_addr;
111
112 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
113
114 return phys_to_virt(phys_addr);
115}
116
117static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
118 u32 fd_status,
119 struct sk_buff *skb)
120{
121 skb_checksum_none_assert(skb);
122
123
124 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
125 return;
126
127
128 if (!((fd_status & DPAA2_FAS_L3CV) &&
129 (fd_status & DPAA2_FAS_L4CV)))
130 return;
131
132
133 skb->ip_summed = CHECKSUM_UNNECESSARY;
134}
135
136
137
138
139static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
140 const struct dpaa2_fd *fd,
141 void *vaddr)
142{
143 struct device *dev = priv->net_dev->dev.parent;
144 dma_addr_t addr = dpaa2_fd_get_addr(fd);
145 u8 fd_format = dpaa2_fd_get_format(fd);
146 struct dpaa2_sg_entry *sgt;
147 void *sg_vaddr;
148 int i;
149
150
151 if (fd_format == dpaa2_fd_single)
152 goto free_buf;
153 else if (fd_format != dpaa2_fd_sg)
154
155 return;
156
157
158
159
160 sgt = vaddr + dpaa2_fd_get_offset(fd);
161 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
162 addr = dpaa2_sg_get_addr(&sgt[i]);
163 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
164 dma_unmap_page(dev, addr, priv->rx_buf_size,
165 DMA_BIDIRECTIONAL);
166
167 free_pages((unsigned long)sg_vaddr, 0);
168 if (dpaa2_sg_is_final(&sgt[i]))
169 break;
170 }
171
172free_buf:
173 free_pages((unsigned long)vaddr, 0);
174}
175
176
177static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
178 const struct dpaa2_fd *fd,
179 void *fd_vaddr)
180{
181 struct sk_buff *skb = NULL;
182 u16 fd_offset = dpaa2_fd_get_offset(fd);
183 u32 fd_length = dpaa2_fd_get_len(fd);
184
185 ch->buf_count--;
186
187 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
188 if (unlikely(!skb))
189 return NULL;
190
191 skb_reserve(skb, fd_offset);
192 skb_put(skb, fd_length);
193
194 return skb;
195}
196
197
198static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
199 struct dpaa2_eth_channel *ch,
200 struct dpaa2_sg_entry *sgt)
201{
202 struct sk_buff *skb = NULL;
203 struct device *dev = priv->net_dev->dev.parent;
204 void *sg_vaddr;
205 dma_addr_t sg_addr;
206 u16 sg_offset;
207 u32 sg_length;
208 struct page *page, *head_page;
209 int page_offset;
210 int i;
211
212 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
213 struct dpaa2_sg_entry *sge = &sgt[i];
214
215
216
217
218
219
220 sg_addr = dpaa2_sg_get_addr(sge);
221 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
222 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
223 DMA_BIDIRECTIONAL);
224
225 sg_length = dpaa2_sg_get_len(sge);
226
227 if (i == 0) {
228
229 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
230 if (unlikely(!skb)) {
231
232
233
234 free_pages((unsigned long)sg_vaddr, 0);
235
236
237
238
239 while (!dpaa2_sg_is_final(&sgt[i]) &&
240 i < DPAA2_ETH_MAX_SG_ENTRIES)
241 i++;
242 break;
243 }
244
245 sg_offset = dpaa2_sg_get_offset(sge);
246 skb_reserve(skb, sg_offset);
247 skb_put(skb, sg_length);
248 } else {
249
250 page = virt_to_page(sg_vaddr);
251 head_page = virt_to_head_page(sg_vaddr);
252
253
254
255
256
257
258 page_offset = ((unsigned long)sg_vaddr &
259 (PAGE_SIZE - 1)) +
260 (page_address(page) - page_address(head_page));
261
262 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
263 sg_length, priv->rx_buf_size);
264 }
265
266 if (dpaa2_sg_is_final(sge))
267 break;
268 }
269
270 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
271
272
273 ch->buf_count -= i + 2;
274
275 return skb;
276}
277
278
279
280
281static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
282 int count)
283{
284 struct device *dev = priv->net_dev->dev.parent;
285 void *vaddr;
286 int i;
287
288 for (i = 0; i < count; i++) {
289 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
290 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
291 DMA_BIDIRECTIONAL);
292 free_pages((unsigned long)vaddr, 0);
293 }
294}
295
296static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
297 struct dpaa2_eth_channel *ch,
298 dma_addr_t addr)
299{
300 int retries = 0;
301 int err;
302
303 ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
304 if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
305 return;
306
307 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
308 ch->recycled_bufs,
309 ch->recycled_bufs_cnt)) == -EBUSY) {
310 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
311 break;
312 cpu_relax();
313 }
314
315 if (err) {
316 dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
317 ch->buf_count -= ch->recycled_bufs_cnt;
318 }
319
320 ch->recycled_bufs_cnt = 0;
321}
322
323static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
324 struct dpaa2_eth_fq *fq,
325 struct dpaa2_eth_xdp_fds *xdp_fds)
326{
327 int total_enqueued = 0, retries = 0, enqueued;
328 struct dpaa2_eth_drv_stats *percpu_extras;
329 int num_fds, err, max_retries;
330 struct dpaa2_fd *fds;
331
332 percpu_extras = this_cpu_ptr(priv->percpu_extras);
333
334
335 fds = xdp_fds->fds;
336 num_fds = xdp_fds->num;
337 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
338 while (total_enqueued < num_fds && retries < max_retries) {
339 err = priv->enqueue(priv, fq, &fds[total_enqueued],
340 0, num_fds - total_enqueued, &enqueued);
341 if (err == -EBUSY) {
342 percpu_extras->tx_portal_busy += ++retries;
343 continue;
344 }
345 total_enqueued += enqueued;
346 }
347 xdp_fds->num = 0;
348
349 return total_enqueued;
350}
351
352static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
353 struct dpaa2_eth_channel *ch,
354 struct dpaa2_eth_fq *fq)
355{
356 struct rtnl_link_stats64 *percpu_stats;
357 struct dpaa2_fd *fds;
358 int enqueued, i;
359
360 percpu_stats = this_cpu_ptr(priv->percpu_stats);
361
362
363 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
364
365
366 percpu_stats->tx_packets += enqueued;
367 fds = fq->xdp_tx_fds.fds;
368 for (i = 0; i < enqueued; i++) {
369 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
370 ch->stats.xdp_tx++;
371 }
372 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
373 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
374 percpu_stats->tx_errors++;
375 ch->stats.xdp_tx_err++;
376 }
377 fq->xdp_tx_fds.num = 0;
378}
379
380static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
381 struct dpaa2_eth_channel *ch,
382 struct dpaa2_fd *fd,
383 void *buf_start, u16 queue_id)
384{
385 struct dpaa2_faead *faead;
386 struct dpaa2_fd *dest_fd;
387 struct dpaa2_eth_fq *fq;
388 u32 ctrl, frc;
389
390
391 frc = dpaa2_fd_get_frc(fd);
392 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
393 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
394
395
396
397
398
399 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
400 faead = dpaa2_get_faead(buf_start, false);
401 faead->ctrl = cpu_to_le32(ctrl);
402 faead->conf_fqid = 0;
403
404 fq = &priv->fq[queue_id];
405 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
406 memcpy(dest_fd, fd, sizeof(*dest_fd));
407
408 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
409 return;
410
411 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
412}
413
414static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
415 struct dpaa2_eth_channel *ch,
416 struct dpaa2_eth_fq *rx_fq,
417 struct dpaa2_fd *fd, void *vaddr)
418{
419 dma_addr_t addr = dpaa2_fd_get_addr(fd);
420 struct bpf_prog *xdp_prog;
421 struct xdp_buff xdp;
422 u32 xdp_act = XDP_PASS;
423 int err, offset;
424
425 xdp_prog = READ_ONCE(ch->xdp.prog);
426 if (!xdp_prog)
427 goto out;
428
429 offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
430 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
431 xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
432 dpaa2_fd_get_len(fd), false);
433
434 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
435
436
437 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
438 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
439
440 switch (xdp_act) {
441 case XDP_PASS:
442 break;
443 case XDP_TX:
444 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
445 break;
446 default:
447 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
448 fallthrough;
449 case XDP_ABORTED:
450 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
451 fallthrough;
452 case XDP_DROP:
453 dpaa2_eth_recycle_buf(priv, ch, addr);
454 ch->stats.xdp_drop++;
455 break;
456 case XDP_REDIRECT:
457 dma_unmap_page(priv->net_dev->dev.parent, addr,
458 priv->rx_buf_size, DMA_BIDIRECTIONAL);
459 ch->buf_count--;
460
461
462 xdp.data_hard_start = vaddr;
463 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
464
465 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
466 if (unlikely(err)) {
467 addr = dma_map_page(priv->net_dev->dev.parent,
468 virt_to_page(vaddr), 0,
469 priv->rx_buf_size, DMA_BIDIRECTIONAL);
470 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
471 free_pages((unsigned long)vaddr, 0);
472 } else {
473 ch->buf_count++;
474 dpaa2_eth_recycle_buf(priv, ch, addr);
475 }
476 ch->stats.xdp_drop++;
477 } else {
478 ch->stats.xdp_redirect++;
479 }
480 break;
481 }
482
483 ch->xdp.res |= xdp_act;
484out:
485 return xdp_act;
486}
487
488static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
489 const struct dpaa2_fd *fd,
490 void *fd_vaddr)
491{
492 u16 fd_offset = dpaa2_fd_get_offset(fd);
493 struct dpaa2_eth_priv *priv = ch->priv;
494 u32 fd_length = dpaa2_fd_get_len(fd);
495 struct sk_buff *skb = NULL;
496 unsigned int skb_len;
497
498 if (fd_length > priv->rx_copybreak)
499 return NULL;
500
501 skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
502
503 skb = napi_alloc_skb(&ch->napi, skb_len);
504 if (!skb)
505 return NULL;
506
507 skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
508 skb_put(skb, fd_length);
509
510 memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
511
512 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
513
514 return skb;
515}
516
517
518static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
519 struct dpaa2_eth_channel *ch,
520 const struct dpaa2_fd *fd,
521 struct dpaa2_eth_fq *fq)
522{
523 dma_addr_t addr = dpaa2_fd_get_addr(fd);
524 u8 fd_format = dpaa2_fd_get_format(fd);
525 void *vaddr;
526 struct sk_buff *skb;
527 struct rtnl_link_stats64 *percpu_stats;
528 struct dpaa2_eth_drv_stats *percpu_extras;
529 struct device *dev = priv->net_dev->dev.parent;
530 struct dpaa2_fas *fas;
531 void *buf_data;
532 u32 status = 0;
533 u32 xdp_act;
534
535
536 trace_dpaa2_rx_fd(priv->net_dev, fd);
537
538 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
539 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
540 DMA_BIDIRECTIONAL);
541
542 fas = dpaa2_get_fas(vaddr, false);
543 prefetch(fas);
544 buf_data = vaddr + dpaa2_fd_get_offset(fd);
545 prefetch(buf_data);
546
547 percpu_stats = this_cpu_ptr(priv->percpu_stats);
548 percpu_extras = this_cpu_ptr(priv->percpu_extras);
549
550 if (fd_format == dpaa2_fd_single) {
551 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
552 if (xdp_act != XDP_PASS) {
553 percpu_stats->rx_packets++;
554 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
555 return;
556 }
557
558 skb = dpaa2_eth_copybreak(ch, fd, vaddr);
559 if (!skb) {
560 dma_unmap_page(dev, addr, priv->rx_buf_size,
561 DMA_BIDIRECTIONAL);
562 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
563 }
564 } else if (fd_format == dpaa2_fd_sg) {
565 WARN_ON(priv->xdp_prog);
566
567 dma_unmap_page(dev, addr, priv->rx_buf_size,
568 DMA_BIDIRECTIONAL);
569 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
570 free_pages((unsigned long)vaddr, 0);
571 percpu_extras->rx_sg_frames++;
572 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
573 } else {
574
575 goto err_frame_format;
576 }
577
578 if (unlikely(!skb))
579 goto err_build_skb;
580
581 prefetch(skb->data);
582
583
584 if (priv->rx_tstamp) {
585 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
586 __le64 *ts = dpaa2_get_ts(vaddr, false);
587 u64 ns;
588
589 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
590
591 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
592 shhwtstamps->hwtstamp = ns_to_ktime(ns);
593 }
594
595
596 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
597 status = le32_to_cpu(fas->status);
598 dpaa2_eth_validate_rx_csum(priv, status, skb);
599 }
600
601 skb->protocol = eth_type_trans(skb, priv->net_dev);
602 skb_record_rx_queue(skb, fq->flowid);
603
604 percpu_stats->rx_packets++;
605 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
606 ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
607
608 list_add_tail(&skb->list, ch->rx_list);
609
610 return;
611
612err_build_skb:
613 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
614err_frame_format:
615 percpu_stats->rx_dropped++;
616}
617
618
619
620
621static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
622 struct dpaa2_eth_channel *ch,
623 const struct dpaa2_fd *fd,
624 struct dpaa2_eth_fq *fq __always_unused)
625{
626 struct device *dev = priv->net_dev->dev.parent;
627 dma_addr_t addr = dpaa2_fd_get_addr(fd);
628 u8 fd_format = dpaa2_fd_get_format(fd);
629 struct rtnl_link_stats64 *percpu_stats;
630 struct dpaa2_eth_trap_item *trap_item;
631 struct dpaa2_fapr *fapr;
632 struct sk_buff *skb;
633 void *buf_data;
634 void *vaddr;
635
636 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
637 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
638 DMA_BIDIRECTIONAL);
639
640 buf_data = vaddr + dpaa2_fd_get_offset(fd);
641
642 if (fd_format == dpaa2_fd_single) {
643 dma_unmap_page(dev, addr, priv->rx_buf_size,
644 DMA_BIDIRECTIONAL);
645 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
646 } else if (fd_format == dpaa2_fd_sg) {
647 dma_unmap_page(dev, addr, priv->rx_buf_size,
648 DMA_BIDIRECTIONAL);
649 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
650 free_pages((unsigned long)vaddr, 0);
651 } else {
652
653 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
654 goto err_frame_format;
655 }
656
657 fapr = dpaa2_get_fapr(vaddr, false);
658 trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
659 if (trap_item)
660 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
661 &priv->devlink_port, NULL);
662 consume_skb(skb);
663
664err_frame_format:
665 percpu_stats = this_cpu_ptr(priv->percpu_stats);
666 percpu_stats->rx_errors++;
667 ch->buf_count--;
668}
669
670
671
672
673
674
675
676static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
677 struct dpaa2_eth_fq **src)
678{
679 struct dpaa2_eth_priv *priv = ch->priv;
680 struct dpaa2_eth_fq *fq = NULL;
681 struct dpaa2_dq *dq;
682 const struct dpaa2_fd *fd;
683 int cleaned = 0, retries = 0;
684 int is_last;
685
686 do {
687 dq = dpaa2_io_store_next(ch->store, &is_last);
688 if (unlikely(!dq)) {
689
690
691
692
693
694 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
695 netdev_err_once(priv->net_dev,
696 "Unable to read a valid dequeue response\n");
697 return -ETIMEDOUT;
698 }
699 continue;
700 }
701
702 fd = dpaa2_dq_fd(dq);
703 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
704
705 fq->consume(priv, ch, fd, fq);
706 cleaned++;
707 retries = 0;
708 } while (!is_last);
709
710 if (!cleaned)
711 return 0;
712
713 fq->stats.frames += cleaned;
714 ch->stats.frames += cleaned;
715 ch->stats.frames_per_cdan += cleaned;
716
717
718
719
720 if (src)
721 *src = fq;
722
723 return cleaned;
724}
725
726static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
727 u8 *msgtype, u8 *twostep, u8 *udp,
728 u16 *correction_offset,
729 u16 *origintimestamp_offset)
730{
731 unsigned int ptp_class;
732 struct ptp_header *hdr;
733 unsigned int type;
734 u8 *base;
735
736 ptp_class = ptp_classify_raw(skb);
737 if (ptp_class == PTP_CLASS_NONE)
738 return -EINVAL;
739
740 hdr = ptp_parse_header(skb, ptp_class);
741 if (!hdr)
742 return -EINVAL;
743
744 *msgtype = ptp_get_msgtype(hdr, ptp_class);
745 *twostep = hdr->flag_field[0] & 0x2;
746
747 type = ptp_class & PTP_CLASS_PMASK;
748 if (type == PTP_CLASS_IPV4 ||
749 type == PTP_CLASS_IPV6)
750 *udp = 1;
751 else
752 *udp = 0;
753
754 base = skb_mac_header(skb);
755 *correction_offset = (u8 *)&hdr->correction - base;
756 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
757
758 return 0;
759}
760
761
762static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
763 struct dpaa2_fd *fd,
764 void *buf_start,
765 struct sk_buff *skb)
766{
767 struct ptp_tstamp origin_timestamp;
768 u8 msgtype, twostep, udp;
769 struct dpaa2_faead *faead;
770 struct dpaa2_fas *fas;
771 struct timespec64 ts;
772 u16 offset1, offset2;
773 u32 ctrl, frc;
774 __le64 *ns;
775 u8 *data;
776
777
778 frc = dpaa2_fd_get_frc(fd);
779 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
780
781
782 ctrl = dpaa2_fd_get_ctrl(fd);
783 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
784
785
786
787
788 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
789 faead = dpaa2_get_faead(buf_start, true);
790 faead->ctrl = cpu_to_le32(ctrl);
791
792 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
793 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
794 &offset1, &offset2) ||
795 msgtype != PTP_MSGTYPE_SYNC || twostep) {
796 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
797 return;
798 }
799
800
801 frc = dpaa2_fd_get_frc(fd);
802 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
803
804
805 fas = dpaa2_get_fas(buf_start, true);
806 fas->status = cpu_to_le32(DPAA2_FAS_PTP);
807
808 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
809 ns = dpaa2_get_ts(buf_start, true);
810 *ns = cpu_to_le64(timespec64_to_ns(&ts) /
811 DPAA2_PTP_CLK_PERIOD_NS);
812
813
814 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
815 data = skb_mac_header(skb);
816 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
817 *(__be32 *)(data + offset2 + 2) =
818 htonl(origin_timestamp.sec_lsb);
819 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
820
821 if (priv->ptp_correction_off == offset1)
822 return;
823
824 priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
825 priv->ptp_correction_off = offset1;
826
827 }
828}
829
830static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
831{
832 struct dpaa2_eth_sgt_cache *sgt_cache;
833 void *sgt_buf = NULL;
834 int sgt_buf_size;
835
836 sgt_cache = this_cpu_ptr(priv->sgt_cache);
837 sgt_buf_size = priv->tx_data_offset +
838 DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
839
840 if (sgt_cache->count == 0)
841 sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
842 else
843 sgt_buf = sgt_cache->buf[--sgt_cache->count];
844 if (!sgt_buf)
845 return NULL;
846
847 memset(sgt_buf, 0, sgt_buf_size);
848
849 return sgt_buf;
850}
851
852static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
853{
854 struct dpaa2_eth_sgt_cache *sgt_cache;
855
856 sgt_cache = this_cpu_ptr(priv->sgt_cache);
857 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
858 skb_free_frag(sgt_buf);
859 else
860 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
861}
862
863
864static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
865 struct sk_buff *skb,
866 struct dpaa2_fd *fd,
867 void **swa_addr)
868{
869 struct device *dev = priv->net_dev->dev.parent;
870 void *sgt_buf = NULL;
871 dma_addr_t addr;
872 int nr_frags = skb_shinfo(skb)->nr_frags;
873 struct dpaa2_sg_entry *sgt;
874 int i, err;
875 int sgt_buf_size;
876 struct scatterlist *scl, *crt_scl;
877 int num_sg;
878 int num_dma_bufs;
879 struct dpaa2_eth_swa *swa;
880
881
882
883
884
885
886 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
887 return -EINVAL;
888
889 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
890 if (unlikely(!scl))
891 return -ENOMEM;
892
893 sg_init_table(scl, nr_frags + 1);
894 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
895 if (unlikely(num_sg < 0)) {
896 err = -ENOMEM;
897 goto dma_map_sg_failed;
898 }
899 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
900 if (unlikely(!num_dma_bufs)) {
901 err = -ENOMEM;
902 goto dma_map_sg_failed;
903 }
904
905
906 sgt_buf_size = priv->tx_data_offset +
907 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
908 sgt_buf = dpaa2_eth_sgt_get(priv);
909 if (unlikely(!sgt_buf)) {
910 err = -ENOMEM;
911 goto sgt_buf_alloc_failed;
912 }
913
914 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
915
916
917
918
919
920
921
922
923 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
924 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
925 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
926 }
927 dpaa2_sg_set_final(&sgt[i - 1], true);
928
929
930
931
932
933
934 *swa_addr = (void *)sgt_buf;
935 swa = (struct dpaa2_eth_swa *)sgt_buf;
936 swa->type = DPAA2_ETH_SWA_SG;
937 swa->sg.skb = skb;
938 swa->sg.scl = scl;
939 swa->sg.num_sg = num_sg;
940 swa->sg.sgt_size = sgt_buf_size;
941
942
943 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
944 if (unlikely(dma_mapping_error(dev, addr))) {
945 err = -ENOMEM;
946 goto dma_map_single_failed;
947 }
948 memset(fd, 0, sizeof(struct dpaa2_fd));
949 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
950 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
951 dpaa2_fd_set_addr(fd, addr);
952 dpaa2_fd_set_len(fd, skb->len);
953 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
954
955 return 0;
956
957dma_map_single_failed:
958 dpaa2_eth_sgt_recycle(priv, sgt_buf);
959sgt_buf_alloc_failed:
960 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
961dma_map_sg_failed:
962 kfree(scl);
963 return err;
964}
965
966
967
968
969
970
971
972static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
973 struct sk_buff *skb,
974 struct dpaa2_fd *fd,
975 void **swa_addr)
976{
977 struct device *dev = priv->net_dev->dev.parent;
978 struct dpaa2_sg_entry *sgt;
979 struct dpaa2_eth_swa *swa;
980 dma_addr_t addr, sgt_addr;
981 void *sgt_buf = NULL;
982 int sgt_buf_size;
983 int err;
984
985
986 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
987 sgt_buf = dpaa2_eth_sgt_get(priv);
988 if (unlikely(!sgt_buf))
989 return -ENOMEM;
990 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
991
992 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
993 if (unlikely(dma_mapping_error(dev, addr))) {
994 err = -ENOMEM;
995 goto data_map_failed;
996 }
997
998
999 dpaa2_sg_set_addr(sgt, addr);
1000 dpaa2_sg_set_len(sgt, skb->len);
1001 dpaa2_sg_set_final(sgt, true);
1002
1003
1004 *swa_addr = (void *)sgt_buf;
1005 swa = (struct dpaa2_eth_swa *)sgt_buf;
1006 swa->type = DPAA2_ETH_SWA_SINGLE;
1007 swa->single.skb = skb;
1008 swa->single.sgt_size = sgt_buf_size;
1009
1010
1011 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1012 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1013 err = -ENOMEM;
1014 goto sgt_map_failed;
1015 }
1016
1017 memset(fd, 0, sizeof(struct dpaa2_fd));
1018 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1019 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1020 dpaa2_fd_set_addr(fd, sgt_addr);
1021 dpaa2_fd_set_len(fd, skb->len);
1022 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1023
1024 return 0;
1025
1026sgt_map_failed:
1027 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
1028data_map_failed:
1029 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1030
1031 return err;
1032}
1033
1034
1035static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
1036 struct sk_buff *skb,
1037 struct dpaa2_fd *fd,
1038 void **swa_addr)
1039{
1040 struct device *dev = priv->net_dev->dev.parent;
1041 u8 *buffer_start, *aligned_start;
1042 struct dpaa2_eth_swa *swa;
1043 dma_addr_t addr;
1044
1045 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
1046
1047
1048
1049
1050 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1051 DPAA2_ETH_TX_BUF_ALIGN);
1052 if (aligned_start >= skb->head)
1053 buffer_start = aligned_start;
1054
1055
1056
1057
1058
1059 *swa_addr = (void *)buffer_start;
1060 swa = (struct dpaa2_eth_swa *)buffer_start;
1061 swa->type = DPAA2_ETH_SWA_SINGLE;
1062 swa->single.skb = skb;
1063
1064 addr = dma_map_single(dev, buffer_start,
1065 skb_tail_pointer(skb) - buffer_start,
1066 DMA_BIDIRECTIONAL);
1067 if (unlikely(dma_mapping_error(dev, addr)))
1068 return -ENOMEM;
1069
1070 memset(fd, 0, sizeof(struct dpaa2_fd));
1071 dpaa2_fd_set_addr(fd, addr);
1072 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1073 dpaa2_fd_set_len(fd, skb->len);
1074 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1075 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1076
1077 return 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
1088 struct dpaa2_eth_fq *fq,
1089 const struct dpaa2_fd *fd, bool in_napi)
1090{
1091 struct device *dev = priv->net_dev->dev.parent;
1092 dma_addr_t fd_addr, sg_addr;
1093 struct sk_buff *skb = NULL;
1094 unsigned char *buffer_start;
1095 struct dpaa2_eth_swa *swa;
1096 u8 fd_format = dpaa2_fd_get_format(fd);
1097 u32 fd_len = dpaa2_fd_get_len(fd);
1098 struct dpaa2_sg_entry *sgt;
1099 int should_free_skb = 1;
1100 int i;
1101
1102 fd_addr = dpaa2_fd_get_addr(fd);
1103 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1104 swa = (struct dpaa2_eth_swa *)buffer_start;
1105
1106 if (fd_format == dpaa2_fd_single) {
1107 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1108 skb = swa->single.skb;
1109
1110
1111
1112 dma_unmap_single(dev, fd_addr,
1113 skb_tail_pointer(skb) - buffer_start,
1114 DMA_BIDIRECTIONAL);
1115 } else {
1116 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1117 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1118 DMA_BIDIRECTIONAL);
1119 }
1120 } else if (fd_format == dpaa2_fd_sg) {
1121 if (swa->type == DPAA2_ETH_SWA_SG) {
1122 skb = swa->sg.skb;
1123
1124
1125 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1126 DMA_BIDIRECTIONAL);
1127 kfree(swa->sg.scl);
1128
1129
1130 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1131 DMA_BIDIRECTIONAL);
1132 } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
1133 skb = swa->tso.skb;
1134
1135 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1136 priv->tx_data_offset);
1137
1138
1139 dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
1140 DMA_TO_DEVICE);
1141 kfree(dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)));
1142
1143
1144 for (i = 1; i < swa->tso.num_sg; i++)
1145 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1146 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1147
1148
1149 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1150 DMA_BIDIRECTIONAL);
1151
1152 if (!swa->tso.is_last_fd)
1153 should_free_skb = 0;
1154 } else {
1155 skb = swa->single.skb;
1156
1157
1158 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1159 DMA_BIDIRECTIONAL);
1160
1161 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1162 priv->tx_data_offset);
1163 sg_addr = dpaa2_sg_get_addr(sgt);
1164 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1165 }
1166 } else {
1167 netdev_dbg(priv->net_dev, "Invalid FD format\n");
1168 return;
1169 }
1170
1171 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1172 fq->dq_frames++;
1173 fq->dq_bytes += fd_len;
1174 }
1175
1176 if (swa->type == DPAA2_ETH_SWA_XDP) {
1177 xdp_return_frame(swa->xdp.xdpf);
1178 return;
1179 }
1180
1181
1182 if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
1183 if (skb->cb[0] == TX_TSTAMP) {
1184 struct skb_shared_hwtstamps shhwtstamps;
1185 __le64 *ts = dpaa2_get_ts(buffer_start, true);
1186 u64 ns;
1187
1188 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1189
1190 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1191 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1192 skb_tstamp_tx(skb, &shhwtstamps);
1193 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1194 mutex_unlock(&priv->onestep_tstamp_lock);
1195 }
1196 }
1197
1198
1199 if (fd_format != dpaa2_fd_single)
1200 dpaa2_eth_sgt_recycle(priv, buffer_start);
1201
1202
1203
1204
1205
1206 if (should_free_skb)
1207 napi_consume_skb(skb, in_napi);
1208}
1209
1210static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
1211 struct sk_buff *skb, struct dpaa2_fd *fd,
1212 int *num_fds, u32 *total_fds_len)
1213{
1214 struct device *dev = priv->net_dev->dev.parent;
1215 int hdr_len, total_len, data_left, fd_len;
1216 int num_sge, err, i, sgt_buf_size;
1217 struct dpaa2_fd *fd_start = fd;
1218 struct dpaa2_sg_entry *sgt;
1219 struct dpaa2_eth_swa *swa;
1220 dma_addr_t sgt_addr, addr;
1221 dma_addr_t tso_hdr_dma;
1222 unsigned int index = 0;
1223 struct tso_t tso;
1224 char *tso_hdr;
1225 void *sgt_buf;
1226
1227
1228 hdr_len = tso_start(skb, &tso);
1229 *total_fds_len = 0;
1230
1231 total_len = skb->len - hdr_len;
1232 while (total_len > 0) {
1233
1234 sgt_buf = dpaa2_eth_sgt_get(priv);
1235 if (unlikely(!sgt_buf)) {
1236 netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
1237 err = -ENOMEM;
1238 goto err_sgt_get;
1239 }
1240 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1241
1242
1243 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1244 total_len -= data_left;
1245 fd_len = data_left + hdr_len;
1246
1247
1248 tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
1249 if (!tso_hdr) {
1250 err = -ENOMEM;
1251 goto err_alloc_tso_hdr;
1252 }
1253
1254 tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
1255 tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1256 if (dma_mapping_error(dev, tso_hdr_dma)) {
1257 netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
1258 err = -ENOMEM;
1259 goto err_map_tso_hdr;
1260 }
1261
1262
1263 dpaa2_sg_set_addr(sgt, tso_hdr_dma);
1264 dpaa2_sg_set_len(sgt, hdr_len);
1265 dpaa2_sg_set_final(sgt, data_left <= 0);
1266
1267
1268 num_sge = 1;
1269 while (data_left > 0) {
1270 int size;
1271
1272
1273 sgt++;
1274 size = min_t(int, tso.size, data_left);
1275
1276 addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
1277 if (dma_mapping_error(dev, addr)) {
1278 netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
1279 err = -ENOMEM;
1280 goto err_map_data;
1281 }
1282 dpaa2_sg_set_addr(sgt, addr);
1283 dpaa2_sg_set_len(sgt, size);
1284 dpaa2_sg_set_final(sgt, size == data_left);
1285
1286 num_sge++;
1287
1288
1289 data_left -= size;
1290 tso_build_data(skb, &tso, size);
1291 }
1292
1293
1294 sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
1295 swa = (struct dpaa2_eth_swa *)sgt_buf;
1296 swa->type = DPAA2_ETH_SWA_SW_TSO;
1297 swa->tso.skb = skb;
1298 swa->tso.num_sg = num_sge;
1299 swa->tso.sgt_size = sgt_buf_size;
1300 swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
1301
1302
1303 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1304 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1305 netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
1306 err = -ENOMEM;
1307 goto err_map_sgt;
1308 }
1309
1310
1311 memset(fd, 0, sizeof(struct dpaa2_fd));
1312 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1313 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1314 dpaa2_fd_set_addr(fd, sgt_addr);
1315 dpaa2_fd_set_len(fd, fd_len);
1316 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1317
1318 *total_fds_len += fd_len;
1319
1320 fd++;
1321 index++;
1322 }
1323
1324 *num_fds = index;
1325
1326 return 0;
1327
1328err_map_sgt:
1329err_map_data:
1330
1331 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1332 for (i = 1; i < num_sge; i++)
1333 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1334 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1335
1336
1337 dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1338err_map_tso_hdr:
1339 kfree(tso_hdr);
1340err_alloc_tso_hdr:
1341 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1342err_sgt_get:
1343
1344 for (i = 0; i < index; i++)
1345 dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
1346
1347 return err;
1348}
1349
1350static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1351 struct net_device *net_dev)
1352{
1353 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1354 int total_enqueued = 0, retries = 0, enqueued;
1355 struct dpaa2_eth_drv_stats *percpu_extras;
1356 struct rtnl_link_stats64 *percpu_stats;
1357 unsigned int needed_headroom;
1358 int num_fds = 1, max_retries;
1359 struct dpaa2_eth_fq *fq;
1360 struct netdev_queue *nq;
1361 struct dpaa2_fd *fd;
1362 u16 queue_mapping;
1363 void *swa = NULL;
1364 u8 prio = 0;
1365 int err, i;
1366 u32 fd_len;
1367
1368 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1369 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1370 fd = (this_cpu_ptr(priv->fd))->array;
1371
1372 needed_headroom = dpaa2_eth_needed_headroom(skb);
1373
1374
1375
1376
1377 skb = skb_unshare(skb, GFP_ATOMIC);
1378 if (unlikely(!skb)) {
1379
1380 percpu_stats->tx_dropped++;
1381 return NETDEV_TX_OK;
1382 }
1383
1384
1385
1386 if (skb_is_gso(skb)) {
1387 err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
1388 percpu_extras->tx_sg_frames += num_fds;
1389 percpu_extras->tx_sg_bytes += fd_len;
1390 percpu_extras->tx_tso_frames += num_fds;
1391 percpu_extras->tx_tso_bytes += fd_len;
1392 } else if (skb_is_nonlinear(skb)) {
1393 err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
1394 percpu_extras->tx_sg_frames++;
1395 percpu_extras->tx_sg_bytes += skb->len;
1396 fd_len = dpaa2_fd_get_len(fd);
1397 } else if (skb_headroom(skb) < needed_headroom) {
1398 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
1399 percpu_extras->tx_sg_frames++;
1400 percpu_extras->tx_sg_bytes += skb->len;
1401 percpu_extras->tx_converted_sg_frames++;
1402 percpu_extras->tx_converted_sg_bytes += skb->len;
1403 fd_len = dpaa2_fd_get_len(fd);
1404 } else {
1405 err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
1406 fd_len = dpaa2_fd_get_len(fd);
1407 }
1408
1409 if (unlikely(err)) {
1410 percpu_stats->tx_dropped++;
1411 goto err_build_fd;
1412 }
1413
1414 if (swa && skb->cb[0])
1415 dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
1416
1417
1418 for (i = 0; i < num_fds; i++)
1419 trace_dpaa2_tx_fd(net_dev, &fd[i]);
1420
1421
1422
1423
1424
1425 queue_mapping = skb_get_queue_mapping(skb);
1426
1427 if (net_dev->num_tc) {
1428 prio = netdev_txq_to_tc(net_dev, queue_mapping);
1429
1430
1431
1432 prio = net_dev->num_tc - prio - 1;
1433
1434
1435
1436 queue_mapping %= dpaa2_eth_queue_count(priv);
1437 }
1438 fq = &priv->fq[queue_mapping];
1439 nq = netdev_get_tx_queue(net_dev, queue_mapping);
1440 netdev_tx_sent_queue(nq, fd_len);
1441
1442
1443
1444
1445 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
1446 while (total_enqueued < num_fds && retries < max_retries) {
1447 err = priv->enqueue(priv, fq, &fd[total_enqueued],
1448 prio, num_fds - total_enqueued, &enqueued);
1449 if (err == -EBUSY) {
1450 retries++;
1451 continue;
1452 }
1453
1454 total_enqueued += enqueued;
1455 }
1456 percpu_extras->tx_portal_busy += retries;
1457
1458 if (unlikely(err < 0)) {
1459 percpu_stats->tx_errors++;
1460
1461 dpaa2_eth_free_tx_fd(priv, fq, fd, false);
1462 netdev_tx_completed_queue(nq, 1, fd_len);
1463 } else {
1464 percpu_stats->tx_packets += total_enqueued;
1465 percpu_stats->tx_bytes += fd_len;
1466 }
1467
1468 return NETDEV_TX_OK;
1469
1470err_build_fd:
1471 dev_kfree_skb(skb);
1472
1473 return NETDEV_TX_OK;
1474}
1475
1476static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1477{
1478 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1479 tx_onestep_tstamp);
1480 struct sk_buff *skb;
1481
1482 while (true) {
1483 skb = skb_dequeue(&priv->tx_skbs);
1484 if (!skb)
1485 return;
1486
1487
1488
1489
1490
1491
1492 mutex_lock(&priv->onestep_tstamp_lock);
1493 __dpaa2_eth_tx(skb, priv->net_dev);
1494 }
1495}
1496
1497static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1498{
1499 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1500 u8 msgtype, twostep, udp;
1501 u16 offset1, offset2;
1502
1503
1504 skb->cb[0] = 0;
1505
1506 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1507 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1508 skb->cb[0] = TX_TSTAMP;
1509 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1510 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1511 }
1512
1513
1514 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1515 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1516 &offset1, &offset2))
1517 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1518 skb_queue_tail(&priv->tx_skbs, skb);
1519 queue_work(priv->dpaa2_ptp_wq,
1520 &priv->tx_onestep_tstamp);
1521 return NETDEV_TX_OK;
1522 }
1523
1524
1525
1526 skb->cb[0] = TX_TSTAMP;
1527 }
1528
1529
1530 return __dpaa2_eth_tx(skb, net_dev);
1531}
1532
1533
1534static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1535 struct dpaa2_eth_channel *ch,
1536 const struct dpaa2_fd *fd,
1537 struct dpaa2_eth_fq *fq)
1538{
1539 struct rtnl_link_stats64 *percpu_stats;
1540 struct dpaa2_eth_drv_stats *percpu_extras;
1541 u32 fd_len = dpaa2_fd_get_len(fd);
1542 u32 fd_errors;
1543
1544
1545 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1546
1547 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1548 percpu_extras->tx_conf_frames++;
1549 percpu_extras->tx_conf_bytes += fd_len;
1550 ch->stats.bytes_per_cdan += fd_len;
1551
1552
1553 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1554 dpaa2_eth_free_tx_fd(priv, fq, fd, true);
1555
1556 if (likely(!fd_errors))
1557 return;
1558
1559 if (net_ratelimit())
1560 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1561 fd_errors);
1562
1563 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1564
1565 percpu_stats->tx_errors++;
1566}
1567
1568static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1569 bool enable)
1570{
1571 int err;
1572
1573 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1574
1575 if (err) {
1576 netdev_err(priv->net_dev,
1577 "dpni_enable_vlan_filter failed\n");
1578 return err;
1579 }
1580
1581 return 0;
1582}
1583
1584static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1585{
1586 int err;
1587
1588 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1589 DPNI_OFF_RX_L3_CSUM, enable);
1590 if (err) {
1591 netdev_err(priv->net_dev,
1592 "dpni_set_offload(RX_L3_CSUM) failed\n");
1593 return err;
1594 }
1595
1596 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1597 DPNI_OFF_RX_L4_CSUM, enable);
1598 if (err) {
1599 netdev_err(priv->net_dev,
1600 "dpni_set_offload(RX_L4_CSUM) failed\n");
1601 return err;
1602 }
1603
1604 return 0;
1605}
1606
1607static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1608{
1609 int err;
1610
1611 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1612 DPNI_OFF_TX_L3_CSUM, enable);
1613 if (err) {
1614 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1615 return err;
1616 }
1617
1618 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1619 DPNI_OFF_TX_L4_CSUM, enable);
1620 if (err) {
1621 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1622 return err;
1623 }
1624
1625 return 0;
1626}
1627
1628
1629
1630
1631static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1632 struct dpaa2_eth_channel *ch, u16 bpid)
1633{
1634 struct device *dev = priv->net_dev->dev.parent;
1635 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1636 struct page *page;
1637 dma_addr_t addr;
1638 int retries = 0;
1639 int i, err;
1640
1641 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1642
1643
1644
1645
1646
1647
1648
1649 page = dev_alloc_pages(0);
1650 if (!page)
1651 goto err_alloc;
1652
1653 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1654 DMA_BIDIRECTIONAL);
1655 if (unlikely(dma_mapping_error(dev, addr)))
1656 goto err_map;
1657
1658 buf_array[i] = addr;
1659
1660
1661 trace_dpaa2_eth_buf_seed(priv->net_dev,
1662 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
1663 addr, priv->rx_buf_size,
1664 bpid);
1665 }
1666
1667release_bufs:
1668
1669 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
1670 buf_array, i)) == -EBUSY) {
1671 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1672 break;
1673 cpu_relax();
1674 }
1675
1676
1677
1678
1679 if (err) {
1680 dpaa2_eth_free_bufs(priv, buf_array, i);
1681 return 0;
1682 }
1683
1684 return i;
1685
1686err_map:
1687 __free_pages(page, 0);
1688err_alloc:
1689
1690
1691
1692 if (i)
1693 goto release_bufs;
1694
1695 return 0;
1696}
1697
1698static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1699{
1700 int i, j;
1701 int new_count;
1702
1703 for (j = 0; j < priv->num_channels; j++) {
1704 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1705 i += DPAA2_ETH_BUFS_PER_CMD) {
1706 new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
1707 priv->channel[j]->buf_count += new_count;
1708
1709 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1710 return -ENOMEM;
1711 }
1712 }
1713 }
1714
1715 return 0;
1716}
1717
1718
1719
1720
1721
1722static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
1723{
1724 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1725 int retries = 0;
1726 int ret;
1727
1728 do {
1729 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1730 buf_array, count);
1731 if (ret < 0) {
1732 if (ret == -EBUSY &&
1733 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1734 continue;
1735 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1736 return;
1737 }
1738 dpaa2_eth_free_bufs(priv, buf_array, ret);
1739 retries = 0;
1740 } while (ret);
1741}
1742
1743static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
1744{
1745 int i;
1746
1747 dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1748 dpaa2_eth_drain_bufs(priv, 1);
1749
1750 for (i = 0; i < priv->num_channels; i++)
1751 priv->channel[i]->buf_count = 0;
1752}
1753
1754
1755
1756
1757static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1758 struct dpaa2_eth_channel *ch,
1759 u16 bpid)
1760{
1761 int new_count;
1762
1763 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1764 return 0;
1765
1766 do {
1767 new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
1768 if (unlikely(!new_count)) {
1769
1770 break;
1771 }
1772 ch->buf_count += new_count;
1773 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1774
1775 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1776 return -ENOMEM;
1777
1778 return 0;
1779}
1780
1781static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1782{
1783 struct dpaa2_eth_sgt_cache *sgt_cache;
1784 u16 count;
1785 int k, i;
1786
1787 for_each_possible_cpu(k) {
1788 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1789 count = sgt_cache->count;
1790
1791 for (i = 0; i < count; i++)
1792 skb_free_frag(sgt_cache->buf[i]);
1793 sgt_cache->count = 0;
1794 }
1795}
1796
1797static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1798{
1799 int err;
1800 int dequeues = -1;
1801
1802
1803 do {
1804 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1805 ch->store);
1806 dequeues++;
1807 cpu_relax();
1808 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1809
1810 ch->stats.dequeue_portal_busy += dequeues;
1811 if (unlikely(err))
1812 ch->stats.pull_err++;
1813
1814 return err;
1815}
1816
1817
1818
1819
1820
1821
1822
1823static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1824{
1825 struct dpaa2_eth_channel *ch;
1826 struct dpaa2_eth_priv *priv;
1827 int rx_cleaned = 0, txconf_cleaned = 0;
1828 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1829 struct netdev_queue *nq;
1830 int store_cleaned, work_done;
1831 struct list_head rx_list;
1832 int retries = 0;
1833 u16 flowid;
1834 int err;
1835
1836 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1837 ch->xdp.res = 0;
1838 priv = ch->priv;
1839
1840 INIT_LIST_HEAD(&rx_list);
1841 ch->rx_list = &rx_list;
1842
1843 do {
1844 err = dpaa2_eth_pull_channel(ch);
1845 if (unlikely(err))
1846 break;
1847
1848
1849 dpaa2_eth_refill_pool(priv, ch, priv->bpid);
1850
1851 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1852 if (store_cleaned <= 0)
1853 break;
1854 if (fq->type == DPAA2_RX_FQ) {
1855 rx_cleaned += store_cleaned;
1856 flowid = fq->flowid;
1857 } else {
1858 txconf_cleaned += store_cleaned;
1859
1860 txc_fq = fq;
1861 }
1862
1863
1864
1865
1866 if (rx_cleaned >= budget ||
1867 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1868 work_done = budget;
1869 goto out;
1870 }
1871 } while (store_cleaned);
1872
1873
1874 dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
1875 ch->stats.bytes_per_cdan);
1876 ch->stats.frames_per_cdan = 0;
1877 ch->stats.bytes_per_cdan = 0;
1878
1879
1880
1881
1882 napi_complete_done(napi, rx_cleaned);
1883 do {
1884 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1885 cpu_relax();
1886 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1887 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1888 ch->nctx.desired_cpu);
1889
1890 work_done = max(rx_cleaned, 1);
1891
1892out:
1893 netif_receive_skb_list(ch->rx_list);
1894
1895 if (txc_fq && txc_fq->dq_frames) {
1896 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1897 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1898 txc_fq->dq_bytes);
1899 txc_fq->dq_frames = 0;
1900 txc_fq->dq_bytes = 0;
1901 }
1902
1903 if (ch->xdp.res & XDP_REDIRECT)
1904 xdp_do_flush_map();
1905 else if (rx_cleaned && ch->xdp.res & XDP_TX)
1906 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
1907
1908 return work_done;
1909}
1910
1911static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
1912{
1913 struct dpaa2_eth_channel *ch;
1914 int i;
1915
1916 for (i = 0; i < priv->num_channels; i++) {
1917 ch = priv->channel[i];
1918 napi_enable(&ch->napi);
1919 }
1920}
1921
1922static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
1923{
1924 struct dpaa2_eth_channel *ch;
1925 int i;
1926
1927 for (i = 0; i < priv->num_channels; i++) {
1928 ch = priv->channel[i];
1929 napi_disable(&ch->napi);
1930 }
1931}
1932
1933void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1934 bool tx_pause, bool pfc)
1935{
1936 struct dpni_taildrop td = {0};
1937 struct dpaa2_eth_fq *fq;
1938 int i, err;
1939
1940
1941
1942
1943
1944
1945 td.enable = !tx_pause;
1946 if (priv->rx_fqtd_enabled == td.enable)
1947 goto set_cgtd;
1948
1949 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1950 td.units = DPNI_CONGESTION_UNIT_BYTES;
1951
1952 for (i = 0; i < priv->num_fqs; i++) {
1953 fq = &priv->fq[i];
1954 if (fq->type != DPAA2_RX_FQ)
1955 continue;
1956 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1957 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1958 fq->tc, fq->flowid, &td);
1959 if (err) {
1960 netdev_err(priv->net_dev,
1961 "dpni_set_taildrop(FQ) failed\n");
1962 return;
1963 }
1964 }
1965
1966 priv->rx_fqtd_enabled = td.enable;
1967
1968set_cgtd:
1969
1970
1971
1972
1973
1974
1975
1976 td.enable = !tx_pause || pfc;
1977 if (priv->rx_cgtd_enabled == td.enable)
1978 return;
1979
1980 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1981 td.units = DPNI_CONGESTION_UNIT_FRAMES;
1982 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1983 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1984 DPNI_CP_GROUP, DPNI_QUEUE_RX,
1985 i, 0, &td);
1986 if (err) {
1987 netdev_err(priv->net_dev,
1988 "dpni_set_taildrop(CG) failed\n");
1989 return;
1990 }
1991 }
1992
1993 priv->rx_cgtd_enabled = td.enable;
1994}
1995
1996static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
1997{
1998 struct dpni_link_state state = {0};
1999 bool tx_pause;
2000 int err;
2001
2002 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2003 if (unlikely(err)) {
2004 netdev_err(priv->net_dev,
2005 "dpni_get_link_state() failed\n");
2006 return err;
2007 }
2008
2009
2010
2011
2012
2013 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
2014 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
2015
2016
2017
2018
2019 if (dpaa2_eth_is_type_phy(priv))
2020 goto out;
2021
2022
2023 if (priv->link_state.up == state.up)
2024 goto out;
2025
2026 if (state.up) {
2027 netif_carrier_on(priv->net_dev);
2028 netif_tx_start_all_queues(priv->net_dev);
2029 } else {
2030 netif_tx_stop_all_queues(priv->net_dev);
2031 netif_carrier_off(priv->net_dev);
2032 }
2033
2034 netdev_info(priv->net_dev, "Link Event: state %s\n",
2035 state.up ? "up" : "down");
2036
2037out:
2038 priv->link_state = state;
2039
2040 return 0;
2041}
2042
2043static int dpaa2_eth_open(struct net_device *net_dev)
2044{
2045 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2046 int err;
2047
2048 err = dpaa2_eth_seed_pool(priv, priv->bpid);
2049 if (err) {
2050
2051
2052
2053
2054 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
2055 priv->dpbp_dev->obj_desc.id, priv->bpid);
2056 }
2057
2058 if (!dpaa2_eth_is_type_phy(priv)) {
2059
2060
2061
2062
2063 netif_tx_stop_all_queues(net_dev);
2064
2065
2066
2067
2068
2069
2070 netif_carrier_off(net_dev);
2071 }
2072 dpaa2_eth_enable_ch_napi(priv);
2073
2074 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2075 if (err < 0) {
2076 netdev_err(net_dev, "dpni_enable() failed\n");
2077 goto enable_err;
2078 }
2079
2080 if (dpaa2_eth_is_type_phy(priv)) {
2081 dpaa2_mac_start(priv->mac);
2082 phylink_start(priv->mac->phylink);
2083 }
2084
2085 return 0;
2086
2087enable_err:
2088 dpaa2_eth_disable_ch_napi(priv);
2089 dpaa2_eth_drain_pool(priv);
2090 return err;
2091}
2092
2093
2094static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
2095{
2096 struct dpaa2_eth_fq *fq;
2097 u32 fcnt = 0, bcnt = 0, total = 0;
2098 int i, err;
2099
2100 for (i = 0; i < priv->num_fqs; i++) {
2101 fq = &priv->fq[i];
2102 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
2103 if (err) {
2104 netdev_warn(priv->net_dev, "query_fq_count failed");
2105 break;
2106 }
2107 total += fcnt;
2108 }
2109
2110 return total;
2111}
2112
2113static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
2114{
2115 int retries = 10;
2116 u32 pending;
2117
2118 do {
2119 pending = dpaa2_eth_ingress_fq_count(priv);
2120 if (pending)
2121 msleep(100);
2122 } while (pending && --retries);
2123}
2124
2125#define DPNI_TX_PENDING_VER_MAJOR 7
2126#define DPNI_TX_PENDING_VER_MINOR 13
2127static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
2128{
2129 union dpni_statistics stats;
2130 int retries = 10;
2131 int err;
2132
2133 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
2134 DPNI_TX_PENDING_VER_MINOR) < 0)
2135 goto out;
2136
2137 do {
2138 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
2139 &stats);
2140 if (err)
2141 goto out;
2142 if (stats.page_6.tx_pending_frames == 0)
2143 return;
2144 } while (--retries);
2145
2146out:
2147 msleep(500);
2148}
2149
2150static int dpaa2_eth_stop(struct net_device *net_dev)
2151{
2152 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2153 int dpni_enabled = 0;
2154 int retries = 10;
2155
2156 if (dpaa2_eth_is_type_phy(priv)) {
2157 phylink_stop(priv->mac->phylink);
2158 dpaa2_mac_stop(priv->mac);
2159 } else {
2160 netif_tx_stop_all_queues(net_dev);
2161 netif_carrier_off(net_dev);
2162 }
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174 dpaa2_eth_wait_for_egress_fq_empty(priv);
2175
2176 do {
2177 dpni_disable(priv->mc_io, 0, priv->mc_token);
2178 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2179 if (dpni_enabled)
2180
2181 msleep(100);
2182 } while (dpni_enabled && --retries);
2183 if (!retries) {
2184 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2185
2186
2187
2188 }
2189
2190 dpaa2_eth_wait_for_ingress_fq_empty(priv);
2191 dpaa2_eth_disable_ch_napi(priv);
2192
2193
2194 dpaa2_eth_drain_pool(priv);
2195
2196
2197 dpaa2_eth_sgt_cache_drain(priv);
2198
2199 return 0;
2200}
2201
2202static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2203{
2204 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2205 struct device *dev = net_dev->dev.parent;
2206 int err;
2207
2208 err = eth_mac_addr(net_dev, addr);
2209 if (err < 0) {
2210 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2211 return err;
2212 }
2213
2214 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2215 net_dev->dev_addr);
2216 if (err) {
2217 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2218 return err;
2219 }
2220
2221 return 0;
2222}
2223
2224
2225
2226
2227static void dpaa2_eth_get_stats(struct net_device *net_dev,
2228 struct rtnl_link_stats64 *stats)
2229{
2230 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2231 struct rtnl_link_stats64 *percpu_stats;
2232 u64 *cpustats;
2233 u64 *netstats = (u64 *)stats;
2234 int i, j;
2235 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2236
2237 for_each_possible_cpu(i) {
2238 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2239 cpustats = (u64 *)percpu_stats;
2240 for (j = 0; j < num; j++)
2241 netstats[j] += cpustats[j];
2242 }
2243}
2244
2245
2246
2247
2248static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
2249 struct dpaa2_eth_priv *priv)
2250{
2251 struct netdev_hw_addr *ha;
2252 int err;
2253
2254 netdev_for_each_uc_addr(ha, net_dev) {
2255 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2256 ha->addr);
2257 if (err)
2258 netdev_warn(priv->net_dev,
2259 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2260 ha->addr, err);
2261 }
2262}
2263
2264
2265
2266
2267static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
2268 struct dpaa2_eth_priv *priv)
2269{
2270 struct netdev_hw_addr *ha;
2271 int err;
2272
2273 netdev_for_each_mc_addr(ha, net_dev) {
2274 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2275 ha->addr);
2276 if (err)
2277 netdev_warn(priv->net_dev,
2278 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2279 ha->addr, err);
2280 }
2281}
2282
2283static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2284 __be16 vlan_proto, u16 vid)
2285{
2286 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2287 int err;
2288
2289 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2290 vid, 0, 0, 0);
2291
2292 if (err) {
2293 netdev_warn(priv->net_dev,
2294 "Could not add the vlan id %u\n",
2295 vid);
2296 return err;
2297 }
2298
2299 return 0;
2300}
2301
2302static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2303 __be16 vlan_proto, u16 vid)
2304{
2305 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2306 int err;
2307
2308 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2309
2310 if (err) {
2311 netdev_warn(priv->net_dev,
2312 "Could not remove the vlan id %u\n",
2313 vid);
2314 return err;
2315 }
2316
2317 return 0;
2318}
2319
2320static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2321{
2322 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2323 int uc_count = netdev_uc_count(net_dev);
2324 int mc_count = netdev_mc_count(net_dev);
2325 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2326 u32 options = priv->dpni_attrs.options;
2327 u16 mc_token = priv->mc_token;
2328 struct fsl_mc_io *mc_io = priv->mc_io;
2329 int err;
2330
2331
2332 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2333 netdev_info(net_dev,
2334 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2335 max_mac);
2336
2337
2338 if (uc_count > max_mac) {
2339 netdev_info(net_dev,
2340 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2341 uc_count, max_mac);
2342 goto force_promisc;
2343 }
2344 if (mc_count + uc_count > max_mac) {
2345 netdev_info(net_dev,
2346 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2347 uc_count + mc_count, max_mac);
2348 goto force_mc_promisc;
2349 }
2350
2351
2352 if (net_dev->flags & IFF_PROMISC)
2353 goto force_promisc;
2354 if (net_dev->flags & IFF_ALLMULTI) {
2355
2356
2357
2358
2359
2360
2361
2362 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2363 if (err)
2364 netdev_warn(net_dev, "Can't set uc promisc\n");
2365
2366
2367 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2368 if (err)
2369 netdev_warn(net_dev, "Can't clear uc filters\n");
2370 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2371
2372
2373 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2374 if (err)
2375 netdev_warn(net_dev, "Can't clear uc promisc\n");
2376 goto force_mc_promisc;
2377 }
2378
2379
2380
2381
2382 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2383 if (err)
2384 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2385 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2386 if (err)
2387 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2388
2389
2390 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2391 if (err)
2392 netdev_warn(net_dev, "Can't clear mac filters\n");
2393 dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2394 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2395
2396
2397
2398
2399 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2400 if (err)
2401 netdev_warn(net_dev, "Can't clear ucast promisc\n");
2402 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2403 if (err)
2404 netdev_warn(net_dev, "Can't clear mcast promisc\n");
2405
2406 return;
2407
2408force_promisc:
2409 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2410 if (err)
2411 netdev_warn(net_dev, "Can't set ucast promisc\n");
2412force_mc_promisc:
2413 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2414 if (err)
2415 netdev_warn(net_dev, "Can't set mcast promisc\n");
2416}
2417
2418static int dpaa2_eth_set_features(struct net_device *net_dev,
2419 netdev_features_t features)
2420{
2421 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2422 netdev_features_t changed = features ^ net_dev->features;
2423 bool enable;
2424 int err;
2425
2426 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2427 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2428 err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2429 if (err)
2430 return err;
2431 }
2432
2433 if (changed & NETIF_F_RXCSUM) {
2434 enable = !!(features & NETIF_F_RXCSUM);
2435 err = dpaa2_eth_set_rx_csum(priv, enable);
2436 if (err)
2437 return err;
2438 }
2439
2440 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2441 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2442 err = dpaa2_eth_set_tx_csum(priv, enable);
2443 if (err)
2444 return err;
2445 }
2446
2447 return 0;
2448}
2449
2450static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2451{
2452 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2453 struct hwtstamp_config config;
2454
2455 if (!dpaa2_ptp)
2456 return -EINVAL;
2457
2458 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2459 return -EFAULT;
2460
2461 switch (config.tx_type) {
2462 case HWTSTAMP_TX_OFF:
2463 case HWTSTAMP_TX_ON:
2464 case HWTSTAMP_TX_ONESTEP_SYNC:
2465 priv->tx_tstamp_type = config.tx_type;
2466 break;
2467 default:
2468 return -ERANGE;
2469 }
2470
2471 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2472 priv->rx_tstamp = false;
2473 } else {
2474 priv->rx_tstamp = true;
2475
2476 config.rx_filter = HWTSTAMP_FILTER_ALL;
2477 }
2478
2479 if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
2480 dpaa2_ptp_onestep_reg_update_method(priv);
2481
2482 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2483 -EFAULT : 0;
2484}
2485
2486static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2487{
2488 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2489
2490 if (cmd == SIOCSHWTSTAMP)
2491 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2492
2493 if (dpaa2_eth_is_type_phy(priv))
2494 return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2495
2496 return -EOPNOTSUPP;
2497}
2498
2499static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2500{
2501 int mfl, linear_mfl;
2502
2503 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2504 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2505 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2506
2507 if (mfl > linear_mfl) {
2508 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2509 linear_mfl - VLAN_ETH_HLEN);
2510 return false;
2511 }
2512
2513 return true;
2514}
2515
2516static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2517{
2518 int mfl, err;
2519
2520
2521
2522
2523
2524
2525 if (has_xdp)
2526 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2527 else
2528 mfl = DPAA2_ETH_MFL;
2529
2530 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2531 if (err) {
2532 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2533 return err;
2534 }
2535
2536 return 0;
2537}
2538
2539static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2540{
2541 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2542 int err;
2543
2544 if (!priv->xdp_prog)
2545 goto out;
2546
2547 if (!xdp_mtu_valid(priv, new_mtu))
2548 return -EINVAL;
2549
2550 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2551 if (err)
2552 return err;
2553
2554out:
2555 dev->mtu = new_mtu;
2556 return 0;
2557}
2558
2559static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2560{
2561 struct dpni_buffer_layout buf_layout = {0};
2562 int err;
2563
2564 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2565 DPNI_QUEUE_RX, &buf_layout);
2566 if (err) {
2567 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2568 return err;
2569 }
2570
2571
2572 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2573 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2574 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2575 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2576 DPNI_QUEUE_RX, &buf_layout);
2577 if (err) {
2578 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2579 return err;
2580 }
2581
2582 return 0;
2583}
2584
2585static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2586{
2587 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2588 struct dpaa2_eth_channel *ch;
2589 struct bpf_prog *old;
2590 bool up, need_update;
2591 int i, err;
2592
2593 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2594 return -EINVAL;
2595
2596 if (prog)
2597 bpf_prog_add(prog, priv->num_channels);
2598
2599 up = netif_running(dev);
2600 need_update = (!!priv->xdp_prog != !!prog);
2601
2602 if (up)
2603 dpaa2_eth_stop(dev);
2604
2605
2606
2607
2608
2609
2610 if (need_update) {
2611 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2612 if (err)
2613 goto out_err;
2614 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2615 if (err)
2616 goto out_err;
2617 }
2618
2619 old = xchg(&priv->xdp_prog, prog);
2620 if (old)
2621 bpf_prog_put(old);
2622
2623 for (i = 0; i < priv->num_channels; i++) {
2624 ch = priv->channel[i];
2625 old = xchg(&ch->xdp.prog, prog);
2626 if (old)
2627 bpf_prog_put(old);
2628 }
2629
2630 if (up) {
2631 err = dpaa2_eth_open(dev);
2632 if (err)
2633 return err;
2634 }
2635
2636 return 0;
2637
2638out_err:
2639 if (prog)
2640 bpf_prog_sub(prog, priv->num_channels);
2641 if (up)
2642 dpaa2_eth_open(dev);
2643
2644 return err;
2645}
2646
2647static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2648{
2649 switch (xdp->command) {
2650 case XDP_SETUP_PROG:
2651 return dpaa2_eth_setup_xdp(dev, xdp->prog);
2652 default:
2653 return -EINVAL;
2654 }
2655
2656 return 0;
2657}
2658
2659static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2660 struct xdp_frame *xdpf,
2661 struct dpaa2_fd *fd)
2662{
2663 struct device *dev = net_dev->dev.parent;
2664 unsigned int needed_headroom;
2665 struct dpaa2_eth_swa *swa;
2666 void *buffer_start, *aligned_start;
2667 dma_addr_t addr;
2668
2669
2670
2671
2672 needed_headroom = dpaa2_eth_needed_headroom(NULL);
2673 if (xdpf->headroom < needed_headroom)
2674 return -EINVAL;
2675
2676
2677 memset(fd, 0, sizeof(*fd));
2678
2679
2680 buffer_start = xdpf->data - needed_headroom;
2681 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2682 DPAA2_ETH_TX_BUF_ALIGN);
2683 if (aligned_start >= xdpf->data - xdpf->headroom)
2684 buffer_start = aligned_start;
2685
2686 swa = (struct dpaa2_eth_swa *)buffer_start;
2687
2688 swa->type = DPAA2_ETH_SWA_XDP;
2689 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2690 swa->xdp.xdpf = xdpf;
2691
2692 addr = dma_map_single(dev, buffer_start,
2693 swa->xdp.dma_size,
2694 DMA_BIDIRECTIONAL);
2695 if (unlikely(dma_mapping_error(dev, addr)))
2696 return -ENOMEM;
2697
2698 dpaa2_fd_set_addr(fd, addr);
2699 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2700 dpaa2_fd_set_len(fd, xdpf->len);
2701 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2702 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2703
2704 return 0;
2705}
2706
2707static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2708 struct xdp_frame **frames, u32 flags)
2709{
2710 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2711 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2712 struct rtnl_link_stats64 *percpu_stats;
2713 struct dpaa2_eth_fq *fq;
2714 struct dpaa2_fd *fds;
2715 int enqueued, i, err;
2716
2717 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2718 return -EINVAL;
2719
2720 if (!netif_running(net_dev))
2721 return -ENETDOWN;
2722
2723 fq = &priv->fq[smp_processor_id()];
2724 xdp_redirect_fds = &fq->xdp_redirect_fds;
2725 fds = xdp_redirect_fds->fds;
2726
2727 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2728
2729
2730 for (i = 0; i < n; i++) {
2731 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2732 if (err)
2733 break;
2734 }
2735 xdp_redirect_fds->num = i;
2736
2737
2738 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2739
2740
2741 percpu_stats->tx_packets += enqueued;
2742 for (i = 0; i < enqueued; i++)
2743 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2744
2745 return enqueued;
2746}
2747
2748static int update_xps(struct dpaa2_eth_priv *priv)
2749{
2750 struct net_device *net_dev = priv->net_dev;
2751 struct cpumask xps_mask;
2752 struct dpaa2_eth_fq *fq;
2753 int i, num_queues, netdev_queues;
2754 int err = 0;
2755
2756 num_queues = dpaa2_eth_queue_count(priv);
2757 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2758
2759
2760
2761
2762 for (i = 0; i < netdev_queues; i++) {
2763 fq = &priv->fq[i % num_queues];
2764
2765 cpumask_clear(&xps_mask);
2766 cpumask_set_cpu(fq->target_cpu, &xps_mask);
2767
2768 err = netif_set_xps_queue(net_dev, &xps_mask, i);
2769 if (err) {
2770 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2771 break;
2772 }
2773 }
2774
2775 return err;
2776}
2777
2778static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2779 struct tc_mqprio_qopt *mqprio)
2780{
2781 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2782 u8 num_tc, num_queues;
2783 int i;
2784
2785 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2786 num_queues = dpaa2_eth_queue_count(priv);
2787 num_tc = mqprio->num_tc;
2788
2789 if (num_tc == net_dev->num_tc)
2790 return 0;
2791
2792 if (num_tc > dpaa2_eth_tc_count(priv)) {
2793 netdev_err(net_dev, "Max %d traffic classes supported\n",
2794 dpaa2_eth_tc_count(priv));
2795 return -EOPNOTSUPP;
2796 }
2797
2798 if (!num_tc) {
2799 netdev_reset_tc(net_dev);
2800 netif_set_real_num_tx_queues(net_dev, num_queues);
2801 goto out;
2802 }
2803
2804 netdev_set_num_tc(net_dev, num_tc);
2805 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2806
2807 for (i = 0; i < num_tc; i++)
2808 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2809
2810out:
2811 update_xps(priv);
2812
2813 return 0;
2814}
2815
2816#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2817
2818static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2819{
2820 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2821 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2822 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2823 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2824 int err;
2825
2826 if (p->command == TC_TBF_STATS)
2827 return -EOPNOTSUPP;
2828
2829
2830 if (p->parent != TC_H_ROOT)
2831 return -EOPNOTSUPP;
2832
2833 if (p->command == TC_TBF_REPLACE) {
2834 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2835 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2836 DPAA2_ETH_MAX_BURST_SIZE);
2837 return -EINVAL;
2838 }
2839
2840 tx_cr_shaper.max_burst_size = cfg->max_size;
2841
2842
2843
2844 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2845 }
2846
2847 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2848 &tx_er_shaper, 0);
2849 if (err) {
2850 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2851 return err;
2852 }
2853
2854 return 0;
2855}
2856
2857static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2858 enum tc_setup_type type, void *type_data)
2859{
2860 switch (type) {
2861 case TC_SETUP_QDISC_MQPRIO:
2862 return dpaa2_eth_setup_mqprio(net_dev, type_data);
2863 case TC_SETUP_QDISC_TBF:
2864 return dpaa2_eth_setup_tbf(net_dev, type_data);
2865 default:
2866 return -EOPNOTSUPP;
2867 }
2868}
2869
2870static const struct net_device_ops dpaa2_eth_ops = {
2871 .ndo_open = dpaa2_eth_open,
2872 .ndo_start_xmit = dpaa2_eth_tx,
2873 .ndo_stop = dpaa2_eth_stop,
2874 .ndo_set_mac_address = dpaa2_eth_set_addr,
2875 .ndo_get_stats64 = dpaa2_eth_get_stats,
2876 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2877 .ndo_set_features = dpaa2_eth_set_features,
2878 .ndo_eth_ioctl = dpaa2_eth_ioctl,
2879 .ndo_change_mtu = dpaa2_eth_change_mtu,
2880 .ndo_bpf = dpaa2_eth_xdp,
2881 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2882 .ndo_setup_tc = dpaa2_eth_setup_tc,
2883 .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
2884 .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
2885};
2886
2887static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2888{
2889 struct dpaa2_eth_channel *ch;
2890
2891 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2892
2893
2894 ch->stats.cdan++;
2895
2896 napi_schedule(&ch->napi);
2897}
2898
2899
2900static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
2901{
2902 struct fsl_mc_device *dpcon;
2903 struct device *dev = priv->net_dev->dev.parent;
2904 int err;
2905
2906 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2907 FSL_MC_POOL_DPCON, &dpcon);
2908 if (err) {
2909 if (err == -ENXIO)
2910 err = -EPROBE_DEFER;
2911 else
2912 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2913 return ERR_PTR(err);
2914 }
2915
2916 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2917 if (err) {
2918 dev_err(dev, "dpcon_open() failed\n");
2919 goto free;
2920 }
2921
2922 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2923 if (err) {
2924 dev_err(dev, "dpcon_reset() failed\n");
2925 goto close;
2926 }
2927
2928 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2929 if (err) {
2930 dev_err(dev, "dpcon_enable() failed\n");
2931 goto close;
2932 }
2933
2934 return dpcon;
2935
2936close:
2937 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2938free:
2939 fsl_mc_object_free(dpcon);
2940
2941 return ERR_PTR(err);
2942}
2943
2944static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
2945 struct fsl_mc_device *dpcon)
2946{
2947 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2948 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2949 fsl_mc_object_free(dpcon);
2950}
2951
2952static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
2953{
2954 struct dpaa2_eth_channel *channel;
2955 struct dpcon_attr attr;
2956 struct device *dev = priv->net_dev->dev.parent;
2957 int err;
2958
2959 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2960 if (!channel)
2961 return NULL;
2962
2963 channel->dpcon = dpaa2_eth_setup_dpcon(priv);
2964 if (IS_ERR(channel->dpcon)) {
2965 err = PTR_ERR(channel->dpcon);
2966 goto err_setup;
2967 }
2968
2969 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2970 &attr);
2971 if (err) {
2972 dev_err(dev, "dpcon_get_attributes() failed\n");
2973 goto err_get_attr;
2974 }
2975
2976 channel->dpcon_id = attr.id;
2977 channel->ch_id = attr.qbman_ch_id;
2978 channel->priv = priv;
2979
2980 return channel;
2981
2982err_get_attr:
2983 dpaa2_eth_free_dpcon(priv, channel->dpcon);
2984err_setup:
2985 kfree(channel);
2986 return ERR_PTR(err);
2987}
2988
2989static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
2990 struct dpaa2_eth_channel *channel)
2991{
2992 dpaa2_eth_free_dpcon(priv, channel->dpcon);
2993 kfree(channel);
2994}
2995
2996
2997
2998
2999static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
3000{
3001 struct dpaa2_io_notification_ctx *nctx;
3002 struct dpaa2_eth_channel *channel;
3003 struct dpcon_notification_cfg dpcon_notif_cfg;
3004 struct device *dev = priv->net_dev->dev.parent;
3005 int i, err;
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016 cpumask_clear(&priv->dpio_cpumask);
3017 for_each_online_cpu(i) {
3018
3019 channel = dpaa2_eth_alloc_channel(priv);
3020 if (IS_ERR_OR_NULL(channel)) {
3021 err = PTR_ERR_OR_ZERO(channel);
3022 if (err != -EPROBE_DEFER)
3023 dev_info(dev,
3024 "No affine channel for cpu %d and above\n", i);
3025 goto err_alloc_ch;
3026 }
3027
3028 priv->channel[priv->num_channels] = channel;
3029
3030 nctx = &channel->nctx;
3031 nctx->is_cdan = 1;
3032 nctx->cb = dpaa2_eth_cdan_cb;
3033 nctx->id = channel->ch_id;
3034 nctx->desired_cpu = i;
3035
3036
3037 channel->dpio = dpaa2_io_service_select(i);
3038 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3039 if (err) {
3040 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3041
3042
3043
3044
3045
3046 err = -EPROBE_DEFER;
3047 goto err_service_reg;
3048 }
3049
3050
3051 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
3052 dpcon_notif_cfg.priority = 0;
3053 dpcon_notif_cfg.user_ctx = nctx->qman64;
3054 err = dpcon_set_notification(priv->mc_io, 0,
3055 channel->dpcon->mc_handle,
3056 &dpcon_notif_cfg);
3057 if (err) {
3058 dev_err(dev, "dpcon_set_notification failed()\n");
3059 goto err_set_cdan;
3060 }
3061
3062
3063
3064
3065 cpumask_set_cpu(i, &priv->dpio_cpumask);
3066 priv->num_channels++;
3067
3068
3069
3070
3071 if (priv->num_channels == priv->dpni_attrs.num_queues)
3072 break;
3073 }
3074
3075 return 0;
3076
3077err_set_cdan:
3078 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3079err_service_reg:
3080 dpaa2_eth_free_channel(priv, channel);
3081err_alloc_ch:
3082 if (err == -EPROBE_DEFER) {
3083 for (i = 0; i < priv->num_channels; i++) {
3084 channel = priv->channel[i];
3085 nctx = &channel->nctx;
3086 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3087 dpaa2_eth_free_channel(priv, channel);
3088 }
3089 priv->num_channels = 0;
3090 return err;
3091 }
3092
3093 if (cpumask_empty(&priv->dpio_cpumask)) {
3094 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
3095 return -ENODEV;
3096 }
3097
3098 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
3099 cpumask_pr_args(&priv->dpio_cpumask));
3100
3101 return 0;
3102}
3103
3104static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
3105{
3106 struct device *dev = priv->net_dev->dev.parent;
3107 struct dpaa2_eth_channel *ch;
3108 int i;
3109
3110
3111 for (i = 0; i < priv->num_channels; i++) {
3112 ch = priv->channel[i];
3113 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3114 dpaa2_eth_free_channel(priv, ch);
3115 }
3116}
3117
3118static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
3119 int cpu)
3120{
3121 struct device *dev = priv->net_dev->dev.parent;
3122 int i;
3123
3124 for (i = 0; i < priv->num_channels; i++)
3125 if (priv->channel[i]->nctx.desired_cpu == cpu)
3126 return priv->channel[i];
3127
3128
3129
3130
3131 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3132
3133 return priv->channel[0];
3134}
3135
3136static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
3137{
3138 struct device *dev = priv->net_dev->dev.parent;
3139 struct dpaa2_eth_fq *fq;
3140 int rx_cpu, txc_cpu;
3141 int i;
3142
3143
3144
3145
3146
3147 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
3148
3149 for (i = 0; i < priv->num_fqs; i++) {
3150 fq = &priv->fq[i];
3151 switch (fq->type) {
3152 case DPAA2_RX_FQ:
3153 case DPAA2_RX_ERR_FQ:
3154 fq->target_cpu = rx_cpu;
3155 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3156 if (rx_cpu >= nr_cpu_ids)
3157 rx_cpu = cpumask_first(&priv->dpio_cpumask);
3158 break;
3159 case DPAA2_TX_CONF_FQ:
3160 fq->target_cpu = txc_cpu;
3161 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
3162 if (txc_cpu >= nr_cpu_ids)
3163 txc_cpu = cpumask_first(&priv->dpio_cpumask);
3164 break;
3165 default:
3166 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3167 }
3168 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
3169 }
3170
3171 update_xps(priv);
3172}
3173
3174static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
3175{
3176 int i, j;
3177
3178
3179
3180
3181
3182 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3183 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3184 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3185 priv->fq[priv->num_fqs++].flowid = (u16)i;
3186 }
3187
3188 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3189 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3190 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3191 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3192 priv->fq[priv->num_fqs].tc = (u8)j;
3193 priv->fq[priv->num_fqs++].flowid = (u16)i;
3194 }
3195 }
3196
3197
3198 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3199 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3200
3201
3202 dpaa2_eth_set_fq_affinity(priv);
3203}
3204
3205
3206static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
3207{
3208 int err;
3209 struct fsl_mc_device *dpbp_dev;
3210 struct device *dev = priv->net_dev->dev.parent;
3211 struct dpbp_attr dpbp_attrs;
3212
3213 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3214 &dpbp_dev);
3215 if (err) {
3216 if (err == -ENXIO)
3217 err = -EPROBE_DEFER;
3218 else
3219 dev_err(dev, "DPBP device allocation failed\n");
3220 return err;
3221 }
3222
3223 priv->dpbp_dev = dpbp_dev;
3224
3225 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
3226 &dpbp_dev->mc_handle);
3227 if (err) {
3228 dev_err(dev, "dpbp_open() failed\n");
3229 goto err_open;
3230 }
3231
3232 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3233 if (err) {
3234 dev_err(dev, "dpbp_reset() failed\n");
3235 goto err_reset;
3236 }
3237
3238 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3239 if (err) {
3240 dev_err(dev, "dpbp_enable() failed\n");
3241 goto err_enable;
3242 }
3243
3244 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3245 &dpbp_attrs);
3246 if (err) {
3247 dev_err(dev, "dpbp_get_attributes() failed\n");
3248 goto err_get_attr;
3249 }
3250 priv->bpid = dpbp_attrs.bpid;
3251
3252 return 0;
3253
3254err_get_attr:
3255 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3256err_enable:
3257err_reset:
3258 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3259err_open:
3260 fsl_mc_object_free(dpbp_dev);
3261
3262 return err;
3263}
3264
3265static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
3266{
3267 dpaa2_eth_drain_pool(priv);
3268 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3269 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3270 fsl_mc_object_free(priv->dpbp_dev);
3271}
3272
3273static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
3274{
3275 struct device *dev = priv->net_dev->dev.parent;
3276 struct dpni_buffer_layout buf_layout = {0};
3277 u16 rx_buf_align;
3278 int err;
3279
3280
3281
3282
3283
3284 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3285 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3286 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3287 else
3288 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3289
3290
3291
3292
3293 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3294
3295
3296 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3297 buf_layout.pass_timestamp = true;
3298 buf_layout.pass_frame_status = true;
3299 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3300 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3301 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3302 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3303 DPNI_QUEUE_TX, &buf_layout);
3304 if (err) {
3305 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3306 return err;
3307 }
3308
3309
3310 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3311 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3312 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3313 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3314 if (err) {
3315 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3316 return err;
3317 }
3318
3319
3320
3321
3322 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3323 &priv->tx_data_offset);
3324 if (err) {
3325 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3326 return err;
3327 }
3328
3329 if ((priv->tx_data_offset % 64) != 0)
3330 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3331 priv->tx_data_offset);
3332
3333
3334 buf_layout.pass_frame_status = true;
3335 buf_layout.pass_parser_result = true;
3336 buf_layout.data_align = rx_buf_align;
3337 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3338 buf_layout.private_data_size = 0;
3339 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3340 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3341 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3342 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3343 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3344 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3345 DPNI_QUEUE_RX, &buf_layout);
3346 if (err) {
3347 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3348 return err;
3349 }
3350
3351 return 0;
3352}
3353
3354#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3355#define DPNI_ENQUEUE_FQID_VER_MINOR 9
3356
3357static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3358 struct dpaa2_eth_fq *fq,
3359 struct dpaa2_fd *fd, u8 prio,
3360 u32 num_frames __always_unused,
3361 int *frames_enqueued)
3362{
3363 int err;
3364
3365 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3366 priv->tx_qdid, prio,
3367 fq->tx_qdbin, fd);
3368 if (!err && frames_enqueued)
3369 *frames_enqueued = 1;
3370 return err;
3371}
3372
3373static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3374 struct dpaa2_eth_fq *fq,
3375 struct dpaa2_fd *fd,
3376 u8 prio, u32 num_frames,
3377 int *frames_enqueued)
3378{
3379 int err;
3380
3381 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3382 fq->tx_fqid[prio],
3383 fd, num_frames);
3384
3385 if (err == 0)
3386 return -EBUSY;
3387
3388 if (frames_enqueued)
3389 *frames_enqueued = err;
3390 return 0;
3391}
3392
3393static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3394{
3395 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3396 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3397 priv->enqueue = dpaa2_eth_enqueue_qd;
3398 else
3399 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3400}
3401
3402static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3403{
3404 struct device *dev = priv->net_dev->dev.parent;
3405 struct dpni_link_cfg link_cfg = {0};
3406 int err;
3407
3408
3409 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3410 if (err) {
3411 dev_err(dev, "dpni_get_link_cfg() failed\n");
3412 return err;
3413 }
3414
3415
3416 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3417 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3418 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3419 if (err) {
3420 dev_err(dev, "dpni_set_link_cfg() failed\n");
3421 return err;
3422 }
3423
3424 priv->link_state.options = link_cfg.options;
3425
3426 return 0;
3427}
3428
3429static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3430{
3431 struct dpni_queue_id qid = {0};
3432 struct dpaa2_eth_fq *fq;
3433 struct dpni_queue queue;
3434 int i, j, err;
3435
3436
3437
3438
3439 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3440 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3441 return;
3442
3443 for (i = 0; i < priv->num_fqs; i++) {
3444 fq = &priv->fq[i];
3445 if (fq->type != DPAA2_TX_CONF_FQ)
3446 continue;
3447 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3448 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3449 DPNI_QUEUE_TX, j, fq->flowid,
3450 &queue, &qid);
3451 if (err)
3452 goto out_err;
3453
3454 fq->tx_fqid[j] = qid.fqid;
3455 if (fq->tx_fqid[j] == 0)
3456 goto out_err;
3457 }
3458 }
3459
3460 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3461
3462 return;
3463
3464out_err:
3465 netdev_info(priv->net_dev,
3466 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3467 priv->enqueue = dpaa2_eth_enqueue_qd;
3468}
3469
3470
3471static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3472{
3473 struct device *dev = priv->net_dev->dev.parent;
3474 struct dpkg_profile_cfg kg_cfg = {0};
3475 struct dpni_qos_tbl_cfg qos_cfg = {0};
3476 struct dpni_rule_cfg key_params;
3477 void *dma_mem, *key, *mask;
3478 u8 key_size = 2;
3479 int i, pcp, err;
3480
3481
3482
3483
3484
3485
3486 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3487 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3488 return -EOPNOTSUPP;
3489 }
3490
3491 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3492 if (!dma_mem)
3493 return -ENOMEM;
3494
3495 kg_cfg.num_extracts = 1;
3496 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3497 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3498 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3499 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3500
3501 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3502 if (err) {
3503 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3504 goto out_free_tbl;
3505 }
3506
3507
3508 qos_cfg.default_tc = 0;
3509 qos_cfg.discard_on_miss = 0;
3510 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3511 DPAA2_CLASSIFIER_DMA_SIZE,
3512 DMA_TO_DEVICE);
3513 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3514 dev_err(dev, "QoS table DMA mapping failed\n");
3515 err = -ENOMEM;
3516 goto out_free_tbl;
3517 }
3518
3519 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3520 if (err) {
3521 dev_err(dev, "dpni_set_qos_table failed\n");
3522 goto out_unmap_tbl;
3523 }
3524
3525
3526 key = kzalloc(key_size * 2, GFP_KERNEL);
3527 if (!key) {
3528 err = -ENOMEM;
3529 goto out_unmap_tbl;
3530 }
3531 mask = key + key_size;
3532 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3533
3534 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3535 DMA_TO_DEVICE);
3536 if (dma_mapping_error(dev, key_params.key_iova)) {
3537 dev_err(dev, "Qos table entry DMA mapping failed\n");
3538 err = -ENOMEM;
3539 goto out_free_key;
3540 }
3541
3542 key_params.mask_iova = key_params.key_iova + key_size;
3543 key_params.key_size = key_size;
3544
3545
3546
3547
3548
3549
3550 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3551 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3552 dma_sync_single_for_device(dev, key_params.key_iova,
3553 key_size * 2, DMA_TO_DEVICE);
3554
3555 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3556 &key_params, i, i);
3557 if (err) {
3558 dev_err(dev, "dpni_add_qos_entry failed\n");
3559 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3560 goto out_unmap_key;
3561 }
3562 }
3563
3564 priv->vlan_cls_enabled = true;
3565
3566
3567
3568
3569out_unmap_key:
3570 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3571out_free_key:
3572 kfree(key);
3573out_unmap_tbl:
3574 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3575 DMA_TO_DEVICE);
3576out_free_tbl:
3577 kfree(dma_mem);
3578
3579 return err;
3580}
3581
3582
3583static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3584{
3585 struct device *dev = &ls_dev->dev;
3586 struct dpaa2_eth_priv *priv;
3587 struct net_device *net_dev;
3588 int err;
3589
3590 net_dev = dev_get_drvdata(dev);
3591 priv = netdev_priv(net_dev);
3592
3593
3594 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3595 if (err) {
3596 dev_err(dev, "dpni_open() failed\n");
3597 return err;
3598 }
3599
3600
3601 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3602 &priv->dpni_ver_minor);
3603 if (err) {
3604 dev_err(dev, "dpni_get_api_version() failed\n");
3605 goto close;
3606 }
3607 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3608 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3609 priv->dpni_ver_major, priv->dpni_ver_minor,
3610 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3611 err = -ENOTSUPP;
3612 goto close;
3613 }
3614
3615 ls_dev->mc_io = priv->mc_io;
3616 ls_dev->mc_handle = priv->mc_token;
3617
3618 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3619 if (err) {
3620 dev_err(dev, "dpni_reset() failed\n");
3621 goto close;
3622 }
3623
3624 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3625 &priv->dpni_attrs);
3626 if (err) {
3627 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3628 goto close;
3629 }
3630
3631 err = dpaa2_eth_set_buffer_layout(priv);
3632 if (err)
3633 goto close;
3634
3635 dpaa2_eth_set_enqueue_mode(priv);
3636
3637
3638 if (dpaa2_eth_has_pause_support(priv)) {
3639 err = dpaa2_eth_set_pause(priv);
3640 if (err)
3641 goto close;
3642 }
3643
3644 err = dpaa2_eth_set_vlan_qos(priv);
3645 if (err && err != -EOPNOTSUPP)
3646 goto close;
3647
3648 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3649 sizeof(struct dpaa2_eth_cls_rule),
3650 GFP_KERNEL);
3651 if (!priv->cls_rules) {
3652 err = -ENOMEM;
3653 goto close;
3654 }
3655
3656 return 0;
3657
3658close:
3659 dpni_close(priv->mc_io, 0, priv->mc_token);
3660
3661 return err;
3662}
3663
3664static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3665{
3666 int err;
3667
3668 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3669 if (err)
3670 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3671 err);
3672
3673 dpni_close(priv->mc_io, 0, priv->mc_token);
3674}
3675
3676static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3677 struct dpaa2_eth_fq *fq)
3678{
3679 struct device *dev = priv->net_dev->dev.parent;
3680 struct dpni_queue queue;
3681 struct dpni_queue_id qid;
3682 int err;
3683
3684 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3685 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3686 if (err) {
3687 dev_err(dev, "dpni_get_queue(RX) failed\n");
3688 return err;
3689 }
3690
3691 fq->fqid = qid.fqid;
3692
3693 queue.destination.id = fq->channel->dpcon_id;
3694 queue.destination.type = DPNI_DEST_DPCON;
3695 queue.destination.priority = 1;
3696 queue.user_context = (u64)(uintptr_t)fq;
3697 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3698 DPNI_QUEUE_RX, fq->tc, fq->flowid,
3699 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3700 &queue);
3701 if (err) {
3702 dev_err(dev, "dpni_set_queue(RX) failed\n");
3703 return err;
3704 }
3705
3706
3707
3708 if (fq->tc > 0)
3709 return 0;
3710
3711 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3712 fq->flowid, 0);
3713 if (err) {
3714 dev_err(dev, "xdp_rxq_info_reg failed\n");
3715 return err;
3716 }
3717
3718 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3719 MEM_TYPE_PAGE_ORDER0, NULL);
3720 if (err) {
3721 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3722 return err;
3723 }
3724
3725 return 0;
3726}
3727
3728static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3729 struct dpaa2_eth_fq *fq)
3730{
3731 struct device *dev = priv->net_dev->dev.parent;
3732 struct dpni_queue queue;
3733 struct dpni_queue_id qid;
3734 int i, err;
3735
3736 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3737 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3738 DPNI_QUEUE_TX, i, fq->flowid,
3739 &queue, &qid);
3740 if (err) {
3741 dev_err(dev, "dpni_get_queue(TX) failed\n");
3742 return err;
3743 }
3744 fq->tx_fqid[i] = qid.fqid;
3745 }
3746
3747
3748 fq->tx_qdbin = qid.qdbin;
3749
3750 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3751 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3752 &queue, &qid);
3753 if (err) {
3754 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3755 return err;
3756 }
3757
3758 fq->fqid = qid.fqid;
3759
3760 queue.destination.id = fq->channel->dpcon_id;
3761 queue.destination.type = DPNI_DEST_DPCON;
3762 queue.destination.priority = 0;
3763 queue.user_context = (u64)(uintptr_t)fq;
3764 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3765 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3766 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3767 &queue);
3768 if (err) {
3769 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3770 return err;
3771 }
3772
3773 return 0;
3774}
3775
3776static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3777 struct dpaa2_eth_fq *fq)
3778{
3779 struct device *dev = priv->net_dev->dev.parent;
3780 struct dpni_queue q = { { 0 } };
3781 struct dpni_queue_id qid;
3782 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3783 int err;
3784
3785 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3786 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3787 if (err) {
3788 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3789 return err;
3790 }
3791
3792 fq->fqid = qid.fqid;
3793
3794 q.destination.id = fq->channel->dpcon_id;
3795 q.destination.type = DPNI_DEST_DPCON;
3796 q.destination.priority = 1;
3797 q.user_context = (u64)(uintptr_t)fq;
3798 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3799 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3800 if (err) {
3801 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3802 return err;
3803 }
3804
3805 return 0;
3806}
3807
3808
3809static const struct dpaa2_eth_dist_fields dist_fields[] = {
3810 {
3811
3812 .rxnfc_field = RXH_L2DA,
3813 .cls_prot = NET_PROT_ETH,
3814 .cls_field = NH_FLD_ETH_DA,
3815 .id = DPAA2_ETH_DIST_ETHDST,
3816 .size = 6,
3817 }, {
3818 .cls_prot = NET_PROT_ETH,
3819 .cls_field = NH_FLD_ETH_SA,
3820 .id = DPAA2_ETH_DIST_ETHSRC,
3821 .size = 6,
3822 }, {
3823
3824
3825
3826
3827 .cls_prot = NET_PROT_ETH,
3828 .cls_field = NH_FLD_ETH_TYPE,
3829 .id = DPAA2_ETH_DIST_ETHTYPE,
3830 .size = 2,
3831 }, {
3832
3833 .rxnfc_field = RXH_VLAN,
3834 .cls_prot = NET_PROT_VLAN,
3835 .cls_field = NH_FLD_VLAN_TCI,
3836 .id = DPAA2_ETH_DIST_VLAN,
3837 .size = 2,
3838 }, {
3839
3840 .rxnfc_field = RXH_IP_SRC,
3841 .cls_prot = NET_PROT_IP,
3842 .cls_field = NH_FLD_IP_SRC,
3843 .id = DPAA2_ETH_DIST_IPSRC,
3844 .size = 4,
3845 }, {
3846 .rxnfc_field = RXH_IP_DST,
3847 .cls_prot = NET_PROT_IP,
3848 .cls_field = NH_FLD_IP_DST,
3849 .id = DPAA2_ETH_DIST_IPDST,
3850 .size = 4,
3851 }, {
3852 .rxnfc_field = RXH_L3_PROTO,
3853 .cls_prot = NET_PROT_IP,
3854 .cls_field = NH_FLD_IP_PROTO,
3855 .id = DPAA2_ETH_DIST_IPPROTO,
3856 .size = 1,
3857 }, {
3858
3859
3860
3861 .rxnfc_field = RXH_L4_B_0_1,
3862 .cls_prot = NET_PROT_UDP,
3863 .cls_field = NH_FLD_UDP_PORT_SRC,
3864 .id = DPAA2_ETH_DIST_L4SRC,
3865 .size = 2,
3866 }, {
3867 .rxnfc_field = RXH_L4_B_2_3,
3868 .cls_prot = NET_PROT_UDP,
3869 .cls_field = NH_FLD_UDP_PORT_DST,
3870 .id = DPAA2_ETH_DIST_L4DST,
3871 .size = 2,
3872 },
3873};
3874
3875
3876static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3877{
3878 struct device *dev = priv->net_dev->dev.parent;
3879 struct dpni_rx_tc_dist_cfg dist_cfg;
3880 int i, err = 0;
3881
3882 memset(&dist_cfg, 0, sizeof(dist_cfg));
3883
3884 dist_cfg.key_cfg_iova = key;
3885 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3886 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3887
3888 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3889 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3890 i, &dist_cfg);
3891 if (err) {
3892 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3893 break;
3894 }
3895 }
3896
3897 return err;
3898}
3899
3900
3901static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3902{
3903 struct device *dev = priv->net_dev->dev.parent;
3904 struct dpni_rx_dist_cfg dist_cfg;
3905 int i, err = 0;
3906
3907 memset(&dist_cfg, 0, sizeof(dist_cfg));
3908
3909 dist_cfg.key_cfg_iova = key;
3910 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3911 dist_cfg.enable = 1;
3912
3913 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3914 dist_cfg.tc = i;
3915 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3916 &dist_cfg);
3917 if (err) {
3918 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3919 break;
3920 }
3921
3922
3923
3924
3925 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3926 break;
3927 }
3928
3929 return err;
3930}
3931
3932
3933static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3934{
3935 struct device *dev = priv->net_dev->dev.parent;
3936 struct dpni_rx_dist_cfg dist_cfg;
3937 int i, err = 0;
3938
3939 memset(&dist_cfg, 0, sizeof(dist_cfg));
3940
3941 dist_cfg.key_cfg_iova = key;
3942 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3943 dist_cfg.enable = 1;
3944
3945 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3946 dist_cfg.tc = i;
3947 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3948 &dist_cfg);
3949 if (err) {
3950 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3951 break;
3952 }
3953
3954
3955
3956
3957 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3958 break;
3959 }
3960
3961 return err;
3962}
3963
3964
3965int dpaa2_eth_cls_key_size(u64 fields)
3966{
3967 int i, size = 0;
3968
3969 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3970 if (!(fields & dist_fields[i].id))
3971 continue;
3972 size += dist_fields[i].size;
3973 }
3974
3975 return size;
3976}
3977
3978
3979int dpaa2_eth_cls_fld_off(int prot, int field)
3980{
3981 int i, off = 0;
3982
3983 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3984 if (dist_fields[i].cls_prot == prot &&
3985 dist_fields[i].cls_field == field)
3986 return off;
3987 off += dist_fields[i].size;
3988 }
3989
3990 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3991 return 0;
3992}
3993
3994
3995
3996
3997void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
3998{
3999 int off = 0, new_off = 0;
4000 int i, size;
4001
4002 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4003 size = dist_fields[i].size;
4004 if (dist_fields[i].id & fields) {
4005 memcpy(key_mem + new_off, key_mem + off, size);
4006 new_off += size;
4007 }
4008 off += size;
4009 }
4010}
4011
4012
4013
4014
4015static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4016 enum dpaa2_eth_rx_dist type, u64 flags)
4017{
4018 struct device *dev = net_dev->dev.parent;
4019 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4020 struct dpkg_profile_cfg cls_cfg;
4021 u32 rx_hash_fields = 0;
4022 dma_addr_t key_iova;
4023 u8 *dma_mem;
4024 int i;
4025 int err = 0;
4026
4027 memset(&cls_cfg, 0, sizeof(cls_cfg));
4028
4029 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4030 struct dpkg_extract *key =
4031 &cls_cfg.extracts[cls_cfg.num_extracts];
4032
4033
4034
4035
4036 if (!(flags & dist_fields[i].id))
4037 continue;
4038 if (type == DPAA2_ETH_RX_DIST_HASH)
4039 rx_hash_fields |= dist_fields[i].rxnfc_field;
4040
4041 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4042 dev_err(dev, "error adding key extraction rule, too many rules?\n");
4043 return -E2BIG;
4044 }
4045
4046 key->type = DPKG_EXTRACT_FROM_HDR;
4047 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4048 key->extract.from_hdr.type = DPKG_FULL_FIELD;
4049 key->extract.from_hdr.field = dist_fields[i].cls_field;
4050 cls_cfg.num_extracts++;
4051 }
4052
4053 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4054 if (!dma_mem)
4055 return -ENOMEM;
4056
4057 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4058 if (err) {
4059 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4060 goto free_key;
4061 }
4062
4063
4064 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4065 DMA_TO_DEVICE);
4066 if (dma_mapping_error(dev, key_iova)) {
4067 dev_err(dev, "DMA mapping failed\n");
4068 err = -ENOMEM;
4069 goto free_key;
4070 }
4071
4072 if (type == DPAA2_ETH_RX_DIST_HASH) {
4073 if (dpaa2_eth_has_legacy_dist(priv))
4074 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4075 else
4076 err = dpaa2_eth_config_hash_key(priv, key_iova);
4077 } else {
4078 err = dpaa2_eth_config_cls_key(priv, key_iova);
4079 }
4080
4081 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4082 DMA_TO_DEVICE);
4083 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4084 priv->rx_hash_fields = rx_hash_fields;
4085
4086free_key:
4087 kfree(dma_mem);
4088 return err;
4089}
4090
4091int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4092{
4093 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4094 u64 key = 0;
4095 int i;
4096
4097 if (!dpaa2_eth_hash_enabled(priv))
4098 return -EOPNOTSUPP;
4099
4100 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4101 if (dist_fields[i].rxnfc_field & flags)
4102 key |= dist_fields[i].id;
4103
4104 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4105}
4106
4107int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4108{
4109 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4110}
4111
4112static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4113{
4114 struct device *dev = priv->net_dev->dev.parent;
4115 int err;
4116
4117
4118 if (dpaa2_eth_has_legacy_dist(priv)) {
4119 dev_dbg(dev, "Rx cls not supported by current MC version\n");
4120 return -EOPNOTSUPP;
4121 }
4122
4123 if (!dpaa2_eth_fs_enabled(priv)) {
4124 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4125 return -EOPNOTSUPP;
4126 }
4127
4128 if (!dpaa2_eth_hash_enabled(priv)) {
4129 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4130 return -EOPNOTSUPP;
4131 }
4132
4133
4134
4135
4136
4137 if (!dpaa2_eth_fs_mask_enabled(priv))
4138 goto out;
4139
4140 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4141 if (err)
4142 return err;
4143
4144out:
4145 priv->rx_cls_enabled = 1;
4146
4147 return 0;
4148}
4149
4150
4151
4152
4153static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
4154{
4155 struct net_device *net_dev = priv->net_dev;
4156 struct device *dev = net_dev->dev.parent;
4157 struct dpni_pools_cfg pools_params;
4158 struct dpni_error_cfg err_cfg;
4159 int err = 0;
4160 int i;
4161
4162 pools_params.num_dpbp = 1;
4163 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
4164 pools_params.pools[0].backup_pool = 0;
4165 pools_params.pools[0].buffer_size = priv->rx_buf_size;
4166 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4167 if (err) {
4168 dev_err(dev, "dpni_set_pools() failed\n");
4169 return err;
4170 }
4171
4172
4173
4174
4175 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4176 if (err && err != -EOPNOTSUPP)
4177 dev_err(dev, "Failed to configure hashing\n");
4178
4179
4180
4181
4182 err = dpaa2_eth_set_default_cls(priv);
4183 if (err && err != -EOPNOTSUPP)
4184 dev_err(dev, "Failed to configure Rx classification key\n");
4185
4186
4187 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4188 err_cfg.set_frame_annotation = 1;
4189 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4190 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4191 &err_cfg);
4192 if (err) {
4193 dev_err(dev, "dpni_set_errors_behavior failed\n");
4194 return err;
4195 }
4196
4197
4198 for (i = 0; i < priv->num_fqs; i++) {
4199 switch (priv->fq[i].type) {
4200 case DPAA2_RX_FQ:
4201 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
4202 break;
4203 case DPAA2_TX_CONF_FQ:
4204 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
4205 break;
4206 case DPAA2_RX_ERR_FQ:
4207 err = setup_rx_err_flow(priv, &priv->fq[i]);
4208 break;
4209 default:
4210 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4211 return -EINVAL;
4212 }
4213 if (err)
4214 return err;
4215 }
4216
4217 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
4218 DPNI_QUEUE_TX, &priv->tx_qdid);
4219 if (err) {
4220 dev_err(dev, "dpni_get_qdid() failed\n");
4221 return err;
4222 }
4223
4224 return 0;
4225}
4226
4227
4228static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
4229{
4230 struct net_device *net_dev = priv->net_dev;
4231 struct device *dev = net_dev->dev.parent;
4232 int i;
4233
4234 for (i = 0; i < priv->num_channels; i++) {
4235 priv->channel[i]->store =
4236 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
4237 if (!priv->channel[i]->store) {
4238 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
4239 goto err_ring;
4240 }
4241 }
4242
4243 return 0;
4244
4245err_ring:
4246 for (i = 0; i < priv->num_channels; i++) {
4247 if (!priv->channel[i]->store)
4248 break;
4249 dpaa2_io_store_destroy(priv->channel[i]->store);
4250 }
4251
4252 return -ENOMEM;
4253}
4254
4255static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
4256{
4257 int i;
4258
4259 for (i = 0; i < priv->num_channels; i++)
4260 dpaa2_io_store_destroy(priv->channel[i]->store);
4261}
4262
4263static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
4264{
4265 struct net_device *net_dev = priv->net_dev;
4266 struct device *dev = net_dev->dev.parent;
4267 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
4268 int err;
4269
4270
4271 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
4272 if (err) {
4273 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4274 return err;
4275 }
4276
4277
4278 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4279 dpni_mac_addr);
4280 if (err) {
4281 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4282 return err;
4283 }
4284
4285
4286 if (!is_zero_ether_addr(mac_addr)) {
4287
4288 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4289 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4290 priv->mc_token,
4291 mac_addr);
4292 if (err) {
4293 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4294 return err;
4295 }
4296 }
4297 eth_hw_addr_set(net_dev, mac_addr);
4298 } else if (is_zero_ether_addr(dpni_mac_addr)) {
4299
4300
4301
4302 eth_hw_addr_random(net_dev);
4303 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4304
4305 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4306 net_dev->dev_addr);
4307 if (err) {
4308 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4309 return err;
4310 }
4311
4312
4313
4314
4315
4316
4317 net_dev->addr_assign_type = NET_ADDR_PERM;
4318 } else {
4319
4320
4321
4322 eth_hw_addr_set(net_dev, dpni_mac_addr);
4323 }
4324
4325 return 0;
4326}
4327
4328static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4329{
4330 struct device *dev = net_dev->dev.parent;
4331 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4332 u32 options = priv->dpni_attrs.options;
4333 u64 supported = 0, not_supported = 0;
4334 u8 bcast_addr[ETH_ALEN];
4335 u8 num_queues;
4336 int err;
4337
4338 net_dev->netdev_ops = &dpaa2_eth_ops;
4339 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4340
4341 err = dpaa2_eth_set_mac_addr(priv);
4342 if (err)
4343 return err;
4344
4345
4346 eth_broadcast_addr(bcast_addr);
4347 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4348 if (err) {
4349 dev_err(dev, "dpni_add_mac_addr() failed\n");
4350 return err;
4351 }
4352
4353
4354 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4355 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4356 DPAA2_ETH_MFL);
4357 if (err) {
4358 dev_err(dev, "dpni_set_max_frame_length() failed\n");
4359 return err;
4360 }
4361
4362
4363 num_queues = dpaa2_eth_queue_count(priv);
4364 err = netif_set_real_num_tx_queues(net_dev, num_queues);
4365 if (err) {
4366 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4367 return err;
4368 }
4369 err = netif_set_real_num_rx_queues(net_dev, num_queues);
4370 if (err) {
4371 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4372 return err;
4373 }
4374
4375 dpaa2_eth_detect_features(priv);
4376
4377
4378 supported |= IFF_LIVE_ADDR_CHANGE;
4379
4380 if (options & DPNI_OPT_NO_MAC_FILTER)
4381 not_supported |= IFF_UNICAST_FLT;
4382 else
4383 supported |= IFF_UNICAST_FLT;
4384
4385 net_dev->priv_flags |= supported;
4386 net_dev->priv_flags &= ~not_supported;
4387
4388
4389 net_dev->features = NETIF_F_RXCSUM |
4390 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4391 NETIF_F_SG | NETIF_F_HIGHDMA |
4392 NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
4393 net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
4394 net_dev->hw_features = net_dev->features;
4395
4396 if (priv->dpni_attrs.vlan_filter_entries)
4397 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4398
4399 return 0;
4400}
4401
4402static int dpaa2_eth_poll_link_state(void *arg)
4403{
4404 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4405 int err;
4406
4407 while (!kthread_should_stop()) {
4408 err = dpaa2_eth_link_state_update(priv);
4409 if (unlikely(err))
4410 return err;
4411
4412 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4413 }
4414
4415 return 0;
4416}
4417
4418static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4419{
4420 struct fsl_mc_device *dpni_dev, *dpmac_dev;
4421 struct dpaa2_mac *mac;
4422 int err;
4423
4424 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4425 dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4426
4427 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
4428 return PTR_ERR(dpmac_dev);
4429
4430 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4431 return 0;
4432
4433 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4434 if (!mac)
4435 return -ENOMEM;
4436
4437 mac->mc_dev = dpmac_dev;
4438 mac->mc_io = priv->mc_io;
4439 mac->net_dev = priv->net_dev;
4440
4441 err = dpaa2_mac_open(mac);
4442 if (err)
4443 goto err_free_mac;
4444 priv->mac = mac;
4445
4446 if (dpaa2_eth_is_type_phy(priv)) {
4447 err = dpaa2_mac_connect(mac);
4448 if (err && err != -EPROBE_DEFER)
4449 netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
4450 ERR_PTR(err));
4451 if (err)
4452 goto err_close_mac;
4453 }
4454
4455 return 0;
4456
4457err_close_mac:
4458 dpaa2_mac_close(mac);
4459 priv->mac = NULL;
4460err_free_mac:
4461 kfree(mac);
4462 return err;
4463}
4464
4465static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4466{
4467 if (dpaa2_eth_is_type_phy(priv))
4468 dpaa2_mac_disconnect(priv->mac);
4469
4470 if (!dpaa2_eth_has_mac(priv))
4471 return;
4472
4473 dpaa2_mac_close(priv->mac);
4474 kfree(priv->mac);
4475 priv->mac = NULL;
4476}
4477
4478static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4479{
4480 u32 status = ~0;
4481 struct device *dev = (struct device *)arg;
4482 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4483 struct net_device *net_dev = dev_get_drvdata(dev);
4484 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4485 int err;
4486
4487 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4488 DPNI_IRQ_INDEX, &status);
4489 if (unlikely(err)) {
4490 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4491 return IRQ_HANDLED;
4492 }
4493
4494 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4495 dpaa2_eth_link_state_update(netdev_priv(net_dev));
4496
4497 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4498 dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4499 dpaa2_eth_update_tx_fqids(priv);
4500
4501 rtnl_lock();
4502 if (dpaa2_eth_has_mac(priv))
4503 dpaa2_eth_disconnect_mac(priv);
4504 else
4505 dpaa2_eth_connect_mac(priv);
4506 rtnl_unlock();
4507 }
4508
4509 return IRQ_HANDLED;
4510}
4511
4512static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4513{
4514 int err = 0;
4515 struct fsl_mc_device_irq *irq;
4516
4517 err = fsl_mc_allocate_irqs(ls_dev);
4518 if (err) {
4519 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4520 return err;
4521 }
4522
4523 irq = ls_dev->irqs[0];
4524 err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4525 NULL, dpni_irq0_handler_thread,
4526 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4527 dev_name(&ls_dev->dev), &ls_dev->dev);
4528 if (err < 0) {
4529 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4530 goto free_mc_irq;
4531 }
4532
4533 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4534 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4535 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4536 if (err < 0) {
4537 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4538 goto free_irq;
4539 }
4540
4541 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4542 DPNI_IRQ_INDEX, 1);
4543 if (err < 0) {
4544 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4545 goto free_irq;
4546 }
4547
4548 return 0;
4549
4550free_irq:
4551 devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4552free_mc_irq:
4553 fsl_mc_free_irqs(ls_dev);
4554
4555 return err;
4556}
4557
4558static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4559{
4560 int i;
4561 struct dpaa2_eth_channel *ch;
4562
4563 for (i = 0; i < priv->num_channels; i++) {
4564 ch = priv->channel[i];
4565
4566 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4567 NAPI_POLL_WEIGHT);
4568 }
4569}
4570
4571static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4572{
4573 int i;
4574 struct dpaa2_eth_channel *ch;
4575
4576 for (i = 0; i < priv->num_channels; i++) {
4577 ch = priv->channel[i];
4578 netif_napi_del(&ch->napi);
4579 }
4580}
4581
4582static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4583{
4584 struct device *dev;
4585 struct net_device *net_dev = NULL;
4586 struct dpaa2_eth_priv *priv = NULL;
4587 int err = 0;
4588
4589 dev = &dpni_dev->dev;
4590
4591
4592 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4593 if (!net_dev) {
4594 dev_err(dev, "alloc_etherdev_mq() failed\n");
4595 return -ENOMEM;
4596 }
4597
4598 SET_NETDEV_DEV(net_dev, dev);
4599 dev_set_drvdata(dev, net_dev);
4600
4601 priv = netdev_priv(net_dev);
4602 priv->net_dev = net_dev;
4603
4604 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4605
4606 priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4607 priv->rx_tstamp = false;
4608
4609 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4610 if (!priv->dpaa2_ptp_wq) {
4611 err = -ENOMEM;
4612 goto err_wq_alloc;
4613 }
4614
4615 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4616 mutex_init(&priv->onestep_tstamp_lock);
4617 skb_queue_head_init(&priv->tx_skbs);
4618
4619 priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4620
4621
4622 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4623 &priv->mc_io);
4624 if (err) {
4625 if (err == -ENXIO)
4626 err = -EPROBE_DEFER;
4627 else
4628 dev_err(dev, "MC portal allocation failed\n");
4629 goto err_portal_alloc;
4630 }
4631
4632
4633 err = dpaa2_eth_setup_dpni(dpni_dev);
4634 if (err)
4635 goto err_dpni_setup;
4636
4637 err = dpaa2_eth_setup_dpio(priv);
4638 if (err)
4639 goto err_dpio_setup;
4640
4641 dpaa2_eth_setup_fqs(priv);
4642
4643 err = dpaa2_eth_setup_dpbp(priv);
4644 if (err)
4645 goto err_dpbp_setup;
4646
4647 err = dpaa2_eth_bind_dpni(priv);
4648 if (err)
4649 goto err_bind;
4650
4651
4652 dpaa2_eth_add_ch_napi(priv);
4653
4654
4655 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4656 if (!priv->percpu_stats) {
4657 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4658 err = -ENOMEM;
4659 goto err_alloc_percpu_stats;
4660 }
4661 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4662 if (!priv->percpu_extras) {
4663 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4664 err = -ENOMEM;
4665 goto err_alloc_percpu_extras;
4666 }
4667
4668 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4669 if (!priv->sgt_cache) {
4670 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4671 err = -ENOMEM;
4672 goto err_alloc_sgt_cache;
4673 }
4674
4675 priv->fd = alloc_percpu(*priv->fd);
4676 if (!priv->fd) {
4677 dev_err(dev, "alloc_percpu(fds) failed\n");
4678 err = -ENOMEM;
4679 goto err_alloc_fds;
4680 }
4681
4682 err = dpaa2_eth_netdev_init(net_dev);
4683 if (err)
4684 goto err_netdev_init;
4685
4686
4687 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4688 if (err)
4689 goto err_csum;
4690
4691 err = dpaa2_eth_set_tx_csum(priv,
4692 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4693 if (err)
4694 goto err_csum;
4695
4696 err = dpaa2_eth_alloc_rings(priv);
4697 if (err)
4698 goto err_alloc_rings;
4699
4700#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4701 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4702 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4703 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4704 } else {
4705 dev_dbg(dev, "PFC not supported\n");
4706 }
4707#endif
4708
4709 err = dpaa2_eth_setup_irqs(dpni_dev);
4710 if (err) {
4711 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4712 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4713 "%s_poll_link", net_dev->name);
4714 if (IS_ERR(priv->poll_thread)) {
4715 dev_err(dev, "Error starting polling thread\n");
4716 goto err_poll_thread;
4717 }
4718 priv->do_link_poll = true;
4719 }
4720
4721 err = dpaa2_eth_connect_mac(priv);
4722 if (err)
4723 goto err_connect_mac;
4724
4725 err = dpaa2_eth_dl_alloc(priv);
4726 if (err)
4727 goto err_dl_register;
4728
4729 err = dpaa2_eth_dl_traps_register(priv);
4730 if (err)
4731 goto err_dl_trap_register;
4732
4733 err = dpaa2_eth_dl_port_add(priv);
4734 if (err)
4735 goto err_dl_port_add;
4736
4737 err = register_netdev(net_dev);
4738 if (err < 0) {
4739 dev_err(dev, "register_netdev() failed\n");
4740 goto err_netdev_reg;
4741 }
4742
4743#ifdef CONFIG_DEBUG_FS
4744 dpaa2_dbg_add(priv);
4745#endif
4746
4747 dpaa2_eth_dl_register(priv);
4748 dev_info(dev, "Probed interface %s\n", net_dev->name);
4749 return 0;
4750
4751err_netdev_reg:
4752 dpaa2_eth_dl_port_del(priv);
4753err_dl_port_add:
4754 dpaa2_eth_dl_traps_unregister(priv);
4755err_dl_trap_register:
4756 dpaa2_eth_dl_free(priv);
4757err_dl_register:
4758 dpaa2_eth_disconnect_mac(priv);
4759err_connect_mac:
4760 if (priv->do_link_poll)
4761 kthread_stop(priv->poll_thread);
4762 else
4763 fsl_mc_free_irqs(dpni_dev);
4764err_poll_thread:
4765 dpaa2_eth_free_rings(priv);
4766err_alloc_rings:
4767err_csum:
4768err_netdev_init:
4769 free_percpu(priv->fd);
4770err_alloc_fds:
4771 free_percpu(priv->sgt_cache);
4772err_alloc_sgt_cache:
4773 free_percpu(priv->percpu_extras);
4774err_alloc_percpu_extras:
4775 free_percpu(priv->percpu_stats);
4776err_alloc_percpu_stats:
4777 dpaa2_eth_del_ch_napi(priv);
4778err_bind:
4779 dpaa2_eth_free_dpbp(priv);
4780err_dpbp_setup:
4781 dpaa2_eth_free_dpio(priv);
4782err_dpio_setup:
4783 dpaa2_eth_free_dpni(priv);
4784err_dpni_setup:
4785 fsl_mc_portal_free(priv->mc_io);
4786err_portal_alloc:
4787 destroy_workqueue(priv->dpaa2_ptp_wq);
4788err_wq_alloc:
4789 dev_set_drvdata(dev, NULL);
4790 free_netdev(net_dev);
4791
4792 return err;
4793}
4794
4795static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4796{
4797 struct device *dev;
4798 struct net_device *net_dev;
4799 struct dpaa2_eth_priv *priv;
4800
4801 dev = &ls_dev->dev;
4802 net_dev = dev_get_drvdata(dev);
4803 priv = netdev_priv(net_dev);
4804
4805 dpaa2_eth_dl_unregister(priv);
4806
4807#ifdef CONFIG_DEBUG_FS
4808 dpaa2_dbg_remove(priv);
4809#endif
4810
4811 unregister_netdev(net_dev);
4812 rtnl_lock();
4813 dpaa2_eth_disconnect_mac(priv);
4814 rtnl_unlock();
4815
4816 dpaa2_eth_dl_port_del(priv);
4817 dpaa2_eth_dl_traps_unregister(priv);
4818 dpaa2_eth_dl_free(priv);
4819
4820 if (priv->do_link_poll)
4821 kthread_stop(priv->poll_thread);
4822 else
4823 fsl_mc_free_irqs(ls_dev);
4824
4825 dpaa2_eth_free_rings(priv);
4826 free_percpu(priv->fd);
4827 free_percpu(priv->sgt_cache);
4828 free_percpu(priv->percpu_stats);
4829 free_percpu(priv->percpu_extras);
4830
4831 dpaa2_eth_del_ch_napi(priv);
4832 dpaa2_eth_free_dpbp(priv);
4833 dpaa2_eth_free_dpio(priv);
4834 dpaa2_eth_free_dpni(priv);
4835 if (priv->onestep_reg_base)
4836 iounmap(priv->onestep_reg_base);
4837
4838 fsl_mc_portal_free(priv->mc_io);
4839
4840 destroy_workqueue(priv->dpaa2_ptp_wq);
4841
4842 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4843
4844 free_netdev(net_dev);
4845
4846 return 0;
4847}
4848
4849static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4850 {
4851 .vendor = FSL_MC_VENDOR_FREESCALE,
4852 .obj_type = "dpni",
4853 },
4854 { .vendor = 0x0 }
4855};
4856MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4857
4858static struct fsl_mc_driver dpaa2_eth_driver = {
4859 .driver = {
4860 .name = KBUILD_MODNAME,
4861 .owner = THIS_MODULE,
4862 },
4863 .probe = dpaa2_eth_probe,
4864 .remove = dpaa2_eth_remove,
4865 .match_id_table = dpaa2_eth_match_id_table
4866};
4867
4868static int __init dpaa2_eth_driver_init(void)
4869{
4870 int err;
4871
4872 dpaa2_eth_dbg_init();
4873 err = fsl_mc_driver_register(&dpaa2_eth_driver);
4874 if (err) {
4875 dpaa2_eth_dbg_exit();
4876 return err;
4877 }
4878
4879 return 0;
4880}
4881
4882static void __exit dpaa2_eth_driver_exit(void)
4883{
4884 dpaa2_eth_dbg_exit();
4885 fsl_mc_driver_unregister(&dpaa2_eth_driver);
4886}
4887
4888module_init(dpaa2_eth_driver_init);
4889module_exit(dpaa2_eth_driver_exit);
4890