1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/init.h>
34#include <linux/module.h>
35#include <linux/of_platform.h>
36#include <linux/of_mdio.h>
37#include <linux/of_net.h>
38#include <linux/io.h>
39#include <linux/if_arp.h>
40#include <linux/if_vlan.h>
41#include <linux/icmp.h>
42#include <linux/ip.h>
43#include <linux/ipv6.h>
44#include <linux/udp.h>
45#include <linux/tcp.h>
46#include <linux/net.h>
47#include <linux/skbuff.h>
48#include <linux/etherdevice.h>
49#include <linux/if_ether.h>
50#include <linux/highmem.h>
51#include <linux/percpu.h>
52#include <linux/dma-mapping.h>
53#include <linux/sort.h>
54#include <soc/fsl/bman.h>
55#include <soc/fsl/qman.h>
56
57#include "fman.h"
58#include "fman_port.h"
59#include "mac.h"
60#include "dpaa_eth.h"
61
62
63
64
65#define CREATE_TRACE_POINTS
66#include "dpaa_eth_trace.h"
67
68static int debug = -1;
69module_param(debug, int, 0444);
70MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
71
72static u16 tx_timeout = 1000;
73module_param(tx_timeout, ushort, 0444);
74MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
75
76#define FM_FD_STAT_RX_ERRORS \
77 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
78 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
79 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
80 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
81 FM_FD_ERR_PRS_HDR_ERR)
82
83#define FM_FD_STAT_TX_ERRORS \
84 (FM_FD_ERR_UNSUPPORTED_FORMAT | \
85 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
86
87#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
88 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
89 NETIF_MSG_IFDOWN)
90
91#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
92
93
94
95
96
97
98
99#define DPAA_FQ_TD 0x200000
100
101#define DPAA_CS_THRESHOLD_1G 0x06000000
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#define DPAA_CS_THRESHOLD_10G 0x10000000
118
119
120
121
122
123#define FSL_QMAN_MAX_OAL 127
124
125
126#define DPAA_FD_DATA_ALIGNMENT 16
127
128
129#define DPAA_SGT_SIZE 256
130
131
132
133
134#define FM_L3_PARSE_RESULT_IPV4 0x8000
135
136#define FM_L3_PARSE_RESULT_IPV6 0x4000
137
138
139#define FM_L4_PARSE_RESULT_UDP 0x40
140
141#define FM_L4_PARSE_RESULT_TCP 0x20
142
143
144
145
146
147
148#define FM_FD_STAT_L4CV 0x00000004
149
150#define DPAA_SGT_MAX_ENTRIES 16
151#define DPAA_BUFF_RELEASE_MAX 8
152
153#define FSL_DPAA_BPID_INV 0xff
154#define FSL_DPAA_ETH_MAX_BUF_COUNT 128
155#define FSL_DPAA_ETH_REFILL_THRESHOLD 80
156
157#define DPAA_TX_PRIV_DATA_SIZE 16
158#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
159#define DPAA_TIME_STAMP_SIZE 8
160#define DPAA_HASH_RESULTS_SIZE 8
161#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
162 dpaa_rx_extra_headroom)
163
164#define DPAA_ETH_PCD_RXQ_NUM 128
165
166#define DPAA_ENQUEUE_RETRIES 100000
167
168enum port_type {RX, TX};
169
170struct fm_port_fqs {
171 struct dpaa_fq *tx_defq;
172 struct dpaa_fq *tx_errq;
173 struct dpaa_fq *rx_defq;
174 struct dpaa_fq *rx_errq;
175 struct dpaa_fq *rx_pcdq;
176};
177
178
179static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
180
181
182#define DPAA_BP_RAW_SIZE 4096
183
184
185
186
187
188
189static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
190{
191 size_t res = DPAA_BP_RAW_SIZE / 4;
192 u8 i;
193
194 for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
195 res *= 2;
196 return res;
197}
198
199
200
201
202
203
204
205#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
206
207static int dpaa_max_frm;
208
209static int dpaa_rx_extra_headroom;
210
211#define dpaa_get_max_mtu() \
212 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
213
214static int dpaa_netdev_init(struct net_device *net_dev,
215 const struct net_device_ops *dpaa_ops,
216 u16 tx_timeout)
217{
218 struct dpaa_priv *priv = netdev_priv(net_dev);
219 struct device *dev = net_dev->dev.parent;
220 struct dpaa_percpu_priv *percpu_priv;
221 const u8 *mac_addr;
222 int i, err;
223
224
225
226
227 for_each_possible_cpu(i) {
228 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
229 percpu_priv->net_dev = net_dev;
230 }
231
232 net_dev->netdev_ops = dpaa_ops;
233 mac_addr = priv->mac_dev->addr;
234
235 net_dev->mem_start = priv->mac_dev->res->start;
236 net_dev->mem_end = priv->mac_dev->res->end;
237
238 net_dev->min_mtu = ETH_MIN_MTU;
239 net_dev->max_mtu = dpaa_get_max_mtu();
240
241 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
242 NETIF_F_LLTX | NETIF_F_RXHASH);
243
244 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
245
246
247
248 net_dev->features |= NETIF_F_GSO;
249 net_dev->features |= NETIF_F_RXCSUM;
250
251 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
252
253 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
254
255 net_dev->features |= net_dev->hw_features;
256 net_dev->vlan_features = net_dev->features;
257
258 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
259 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
260
261 net_dev->ethtool_ops = &dpaa_ethtool_ops;
262
263 net_dev->needed_headroom = priv->tx_headroom;
264 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
265
266
267 netif_carrier_off(net_dev);
268
269 err = register_netdev(net_dev);
270 if (err < 0) {
271 dev_err(dev, "register_netdev() = %d\n", err);
272 return err;
273 }
274
275 return 0;
276}
277
278static int dpaa_stop(struct net_device *net_dev)
279{
280 struct mac_device *mac_dev;
281 struct dpaa_priv *priv;
282 int i, err, error;
283
284 priv = netdev_priv(net_dev);
285 mac_dev = priv->mac_dev;
286
287 netif_tx_stop_all_queues(net_dev);
288
289
290
291 usleep_range(5000, 10000);
292
293 err = mac_dev->stop(mac_dev);
294 if (err < 0)
295 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
296 err);
297
298 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
299 error = fman_port_disable(mac_dev->port[i]);
300 if (error)
301 err = error;
302 }
303
304 if (net_dev->phydev)
305 phy_disconnect(net_dev->phydev);
306 net_dev->phydev = NULL;
307
308 return err;
309}
310
311static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
312{
313 struct dpaa_percpu_priv *percpu_priv;
314 const struct dpaa_priv *priv;
315
316 priv = netdev_priv(net_dev);
317 percpu_priv = this_cpu_ptr(priv->percpu_priv);
318
319 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
320 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
321
322 percpu_priv->stats.tx_errors++;
323}
324
325
326
327
328static void dpaa_get_stats64(struct net_device *net_dev,
329 struct rtnl_link_stats64 *s)
330{
331 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
332 struct dpaa_priv *priv = netdev_priv(net_dev);
333 struct dpaa_percpu_priv *percpu_priv;
334 u64 *netstats = (u64 *)s;
335 u64 *cpustats;
336 int i, j;
337
338 for_each_possible_cpu(i) {
339 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
340
341 cpustats = (u64 *)&percpu_priv->stats;
342
343
344 for (j = 0; j < numstats; j++)
345 netstats[j] += cpustats[j];
346 }
347}
348
349static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
350 void *type_data)
351{
352 struct dpaa_priv *priv = netdev_priv(net_dev);
353 struct tc_mqprio_qopt *mqprio = type_data;
354 u8 num_tc;
355 int i;
356
357 if (type != TC_SETUP_QDISC_MQPRIO)
358 return -EOPNOTSUPP;
359
360 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
361 num_tc = mqprio->num_tc;
362
363 if (num_tc == priv->num_tc)
364 return 0;
365
366 if (!num_tc) {
367 netdev_reset_tc(net_dev);
368 goto out;
369 }
370
371 if (num_tc > DPAA_TC_NUM) {
372 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
373 DPAA_TC_NUM);
374 return -EINVAL;
375 }
376
377 netdev_set_num_tc(net_dev, num_tc);
378
379 for (i = 0; i < num_tc; i++)
380 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
381 i * DPAA_TC_TXQ_NUM);
382
383out:
384 priv->num_tc = num_tc ? : 1;
385 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
386 return 0;
387}
388
389static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
390{
391 struct dpaa_eth_data *eth_data;
392 struct device *dpaa_dev;
393 struct mac_device *mac_dev;
394
395 dpaa_dev = &pdev->dev;
396 eth_data = dpaa_dev->platform_data;
397 if (!eth_data) {
398 dev_err(dpaa_dev, "eth_data missing\n");
399 return ERR_PTR(-ENODEV);
400 }
401 mac_dev = eth_data->mac_dev;
402 if (!mac_dev) {
403 dev_err(dpaa_dev, "mac_dev missing\n");
404 return ERR_PTR(-EINVAL);
405 }
406
407 return mac_dev;
408}
409
410static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
411{
412 const struct dpaa_priv *priv;
413 struct mac_device *mac_dev;
414 struct sockaddr old_addr;
415 int err;
416
417 priv = netdev_priv(net_dev);
418
419 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
420
421 err = eth_mac_addr(net_dev, addr);
422 if (err < 0) {
423 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
424 return err;
425 }
426
427 mac_dev = priv->mac_dev;
428
429 err = mac_dev->change_addr(mac_dev->fman_mac,
430 (enet_addr_t *)net_dev->dev_addr);
431 if (err < 0) {
432 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
433 err);
434
435 eth_mac_addr(net_dev, &old_addr);
436
437 return err;
438 }
439
440 return 0;
441}
442
443static void dpaa_set_rx_mode(struct net_device *net_dev)
444{
445 const struct dpaa_priv *priv;
446 int err;
447
448 priv = netdev_priv(net_dev);
449
450 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
451 priv->mac_dev->promisc = !priv->mac_dev->promisc;
452 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
453 priv->mac_dev->promisc);
454 if (err < 0)
455 netif_err(priv, drv, net_dev,
456 "mac_dev->set_promisc() = %d\n",
457 err);
458 }
459
460 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
461 priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
462 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
463 priv->mac_dev->allmulti);
464 if (err < 0)
465 netif_err(priv, drv, net_dev,
466 "mac_dev->set_allmulti() = %d\n",
467 err);
468 }
469
470 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
471 if (err < 0)
472 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
473 err);
474}
475
476static struct dpaa_bp *dpaa_bpid2pool(int bpid)
477{
478 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
479 return NULL;
480
481 return dpaa_bp_array[bpid];
482}
483
484
485static bool dpaa_bpid2pool_use(int bpid)
486{
487 if (dpaa_bpid2pool(bpid)) {
488 atomic_inc(&dpaa_bp_array[bpid]->refs);
489 return true;
490 }
491
492 return false;
493}
494
495
496static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
497{
498 dpaa_bp_array[bpid] = dpaa_bp;
499 atomic_set(&dpaa_bp->refs, 1);
500}
501
502static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
503{
504 int err;
505
506 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
507 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
508 __func__);
509 return -EINVAL;
510 }
511
512
513 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
514 dpaa_bpid2pool_use(dpaa_bp->bpid))
515 return 0;
516
517 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
518 dpaa_bp->pool = bman_new_pool();
519 if (!dpaa_bp->pool) {
520 pr_err("%s: bman_new_pool() failed\n",
521 __func__);
522 return -ENODEV;
523 }
524
525 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
526 }
527
528 if (dpaa_bp->seed_cb) {
529 err = dpaa_bp->seed_cb(dpaa_bp);
530 if (err)
531 goto pool_seed_failed;
532 }
533
534 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
535
536 return 0;
537
538pool_seed_failed:
539 pr_err("%s: pool seeding failed\n", __func__);
540 bman_free_pool(dpaa_bp->pool);
541
542 return err;
543}
544
545
546static void dpaa_bp_drain(struct dpaa_bp *bp)
547{
548 u8 num = 8;
549 int ret;
550
551 do {
552 struct bm_buffer bmb[8];
553 int i;
554
555 ret = bman_acquire(bp->pool, bmb, num);
556 if (ret < 0) {
557 if (num == 8) {
558
559
560
561 num = 1;
562 ret = 1;
563 continue;
564 } else {
565
566 break;
567 }
568 }
569
570 if (bp->free_buf_cb)
571 for (i = 0; i < num; i++)
572 bp->free_buf_cb(bp, &bmb[i]);
573 } while (ret > 0);
574}
575
576static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
577{
578 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
579
580
581
582
583
584 if (!bp)
585 return;
586
587 if (!atomic_dec_and_test(&bp->refs))
588 return;
589
590 if (bp->free_buf_cb)
591 dpaa_bp_drain(bp);
592
593 dpaa_bp_array[bp->bpid] = NULL;
594 bman_free_pool(bp->pool);
595}
596
597static void dpaa_bps_free(struct dpaa_priv *priv)
598{
599 int i;
600
601 for (i = 0; i < DPAA_BPS_NUM; i++)
602 dpaa_bp_free(priv->dpaa_bps[i]);
603}
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
619{
620 switch (fq->fq_type) {
621 case FQ_TYPE_TX_CONFIRM:
622 case FQ_TYPE_TX_CONF_MQ:
623 fq->wq = 1;
624 break;
625 case FQ_TYPE_RX_ERROR:
626 case FQ_TYPE_TX_ERROR:
627 fq->wq = 5;
628 break;
629 case FQ_TYPE_RX_DEFAULT:
630 case FQ_TYPE_RX_PCD:
631 fq->wq = 6;
632 break;
633 case FQ_TYPE_TX:
634 switch (idx / DPAA_TC_TXQ_NUM) {
635 case 0:
636
637 fq->wq = 6;
638 break;
639 case 1:
640
641 fq->wq = 2;
642 break;
643 case 2:
644
645 fq->wq = 1;
646 break;
647 case 3:
648
649 fq->wq = 0;
650 break;
651 default:
652 WARN(1, "Too many TX FQs: more than %d!\n",
653 DPAA_ETH_TXQ_NUM);
654 }
655 break;
656 default:
657 WARN(1, "Invalid FQ type %d for FQID %d!\n",
658 fq->fq_type, fq->fqid);
659 }
660}
661
662static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
663 u32 start, u32 count,
664 struct list_head *list,
665 enum dpaa_fq_type fq_type)
666{
667 struct dpaa_fq *dpaa_fq;
668 int i;
669
670 dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
671 GFP_KERNEL);
672 if (!dpaa_fq)
673 return NULL;
674
675 for (i = 0; i < count; i++) {
676 dpaa_fq[i].fq_type = fq_type;
677 dpaa_fq[i].fqid = start ? start + i : 0;
678 list_add_tail(&dpaa_fq[i].list, list);
679 }
680
681 for (i = 0; i < count; i++)
682 dpaa_assign_wq(dpaa_fq + i, i);
683
684 return dpaa_fq;
685}
686
687static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
688 struct fm_port_fqs *port_fqs)
689{
690 struct dpaa_fq *dpaa_fq;
691 u32 fq_base, fq_base_aligned, i;
692
693 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
694 if (!dpaa_fq)
695 goto fq_alloc_failed;
696
697 port_fqs->rx_errq = &dpaa_fq[0];
698
699 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
700 if (!dpaa_fq)
701 goto fq_alloc_failed;
702
703 port_fqs->rx_defq = &dpaa_fq[0];
704
705
706 if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
707 goto fq_alloc_failed;
708
709 fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
710
711 for (i = fq_base; i < fq_base_aligned; i++)
712 qman_release_fqid(i);
713
714 for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
715 i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
716 qman_release_fqid(i);
717
718 dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
719 list, FQ_TYPE_RX_PCD);
720 if (!dpaa_fq)
721 goto fq_alloc_failed;
722
723 port_fqs->rx_pcdq = &dpaa_fq[0];
724
725 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
726 goto fq_alloc_failed;
727
728 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
729 if (!dpaa_fq)
730 goto fq_alloc_failed;
731
732 port_fqs->tx_errq = &dpaa_fq[0];
733
734 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
735 if (!dpaa_fq)
736 goto fq_alloc_failed;
737
738 port_fqs->tx_defq = &dpaa_fq[0];
739
740 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
741 goto fq_alloc_failed;
742
743 return 0;
744
745fq_alloc_failed:
746 dev_err(dev, "dpaa_fq_alloc() failed\n");
747 return -ENOMEM;
748}
749
750static u32 rx_pool_channel;
751static DEFINE_SPINLOCK(rx_pool_channel_init);
752
753static int dpaa_get_channel(void)
754{
755 spin_lock(&rx_pool_channel_init);
756 if (!rx_pool_channel) {
757 u32 pool;
758 int ret;
759
760 ret = qman_alloc_pool(&pool);
761
762 if (!ret)
763 rx_pool_channel = pool;
764 }
765 spin_unlock(&rx_pool_channel_init);
766 if (!rx_pool_channel)
767 return -ENOMEM;
768 return rx_pool_channel;
769}
770
771static void dpaa_release_channel(void)
772{
773 qman_release_pool(rx_pool_channel);
774}
775
776static void dpaa_eth_add_channel(u16 channel)
777{
778 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
779 const cpumask_t *cpus = qman_affine_cpus();
780 struct qman_portal *portal;
781 int cpu;
782
783 for_each_cpu(cpu, cpus) {
784 portal = qman_get_affine_portal(cpu);
785 qman_p_static_dequeue_add(portal, pool);
786 }
787}
788
789
790
791
792
793
794static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
795 int congested)
796{
797 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
798 struct dpaa_priv, cgr_data.cgr);
799
800 if (congested) {
801 priv->cgr_data.congestion_start_jiffies = jiffies;
802 netif_tx_stop_all_queues(priv->net_dev);
803 priv->cgr_data.cgr_congested_count++;
804 } else {
805 priv->cgr_data.congested_jiffies +=
806 (jiffies - priv->cgr_data.congestion_start_jiffies);
807 netif_tx_wake_all_queues(priv->net_dev);
808 }
809}
810
811static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
812{
813 struct qm_mcc_initcgr initcgr;
814 u32 cs_th;
815 int err;
816
817 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
818 if (err < 0) {
819 if (netif_msg_drv(priv))
820 pr_err("%s: Error %d allocating CGR ID\n",
821 __func__, err);
822 goto out_error;
823 }
824 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
825
826
827 memset(&initcgr, 0, sizeof(initcgr));
828 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
829 initcgr.cgr.cscn_en = QM_CGR_EN;
830
831
832
833
834
835
836 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
837 cs_th = DPAA_CS_THRESHOLD_10G;
838 else
839 cs_th = DPAA_CS_THRESHOLD_1G;
840 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
841
842 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
843 initcgr.cgr.cstd_en = QM_CGR_EN;
844
845 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
846 &initcgr);
847 if (err < 0) {
848 if (netif_msg_drv(priv))
849 pr_err("%s: Error %d creating CGR with ID %d\n",
850 __func__, err, priv->cgr_data.cgr.cgrid);
851 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
852 goto out_error;
853 }
854 if (netif_msg_drv(priv))
855 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
856 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
857 priv->cgr_data.cgr.chan);
858
859out_error:
860 return err;
861}
862
863static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
864 struct dpaa_fq *fq,
865 const struct qman_fq *template)
866{
867 fq->fq_base = *template;
868 fq->net_dev = priv->net_dev;
869
870 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
871 fq->channel = priv->channel;
872}
873
874static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
875 struct dpaa_fq *fq,
876 struct fman_port *port,
877 const struct qman_fq *template)
878{
879 fq->fq_base = *template;
880 fq->net_dev = priv->net_dev;
881
882 if (port) {
883 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
884 fq->channel = (u16)fman_port_get_qman_channel_id(port);
885 } else {
886 fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
887 }
888}
889
890static void dpaa_fq_setup(struct dpaa_priv *priv,
891 const struct dpaa_fq_cbs *fq_cbs,
892 struct fman_port *tx_port)
893{
894 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
895 const cpumask_t *affine_cpus = qman_affine_cpus();
896 u16 channels[NR_CPUS];
897 struct dpaa_fq *fq;
898
899 for_each_cpu(cpu, affine_cpus)
900 channels[num_portals++] = qman_affine_channel(cpu);
901
902 if (num_portals == 0)
903 dev_err(priv->net_dev->dev.parent,
904 "No Qman software (affine) channels found");
905
906
907 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
908 switch (fq->fq_type) {
909 case FQ_TYPE_RX_DEFAULT:
910 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
911 break;
912 case FQ_TYPE_RX_ERROR:
913 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
914 break;
915 case FQ_TYPE_RX_PCD:
916 if (!num_portals)
917 continue;
918 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
919 fq->channel = channels[portal_cnt++ % num_portals];
920 break;
921 case FQ_TYPE_TX:
922 dpaa_setup_egress(priv, fq, tx_port,
923 &fq_cbs->egress_ern);
924
925
926
927 if (egress_cnt < DPAA_ETH_TXQ_NUM)
928 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
929 break;
930 case FQ_TYPE_TX_CONF_MQ:
931 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
932
933 case FQ_TYPE_TX_CONFIRM:
934 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
935 break;
936 case FQ_TYPE_TX_ERROR:
937 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
938 break;
939 default:
940 dev_warn(priv->net_dev->dev.parent,
941 "Unknown FQ type detected!\n");
942 break;
943 }
944 }
945
946
947 while (egress_cnt < DPAA_ETH_TXQ_NUM) {
948 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
949 if (fq->fq_type != FQ_TYPE_TX)
950 continue;
951 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
952 if (egress_cnt == DPAA_ETH_TXQ_NUM)
953 break;
954 }
955 }
956}
957
958static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
959 struct qman_fq *tx_fq)
960{
961 int i;
962
963 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
964 if (priv->egress_fqs[i] == tx_fq)
965 return i;
966
967 return -EINVAL;
968}
969
970static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
971{
972 const struct dpaa_priv *priv;
973 struct qman_fq *confq = NULL;
974 struct qm_mcc_initfq initfq;
975 struct device *dev;
976 struct qman_fq *fq;
977 int queue_id;
978 int err;
979
980 priv = netdev_priv(dpaa_fq->net_dev);
981 dev = dpaa_fq->net_dev->dev.parent;
982
983 if (dpaa_fq->fqid == 0)
984 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
985
986 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
987
988 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
989 if (err) {
990 dev_err(dev, "qman_create_fq() failed\n");
991 return err;
992 }
993 fq = &dpaa_fq->fq_base;
994
995 if (dpaa_fq->init) {
996 memset(&initfq, 0, sizeof(initfq));
997
998 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
999
1000 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1001
1002
1003
1004
1005 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1006 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1007
1008
1009 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1010
1011 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1012
1013
1014
1015
1016
1017
1018
1019 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1020 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1021 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1022 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1023 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1024 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1035 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1036 qm_fqd_set_oal(&initfq.fqd,
1037 min(sizeof(struct sk_buff) +
1038 priv->tx_headroom,
1039 (size_t)FSL_QMAN_MAX_OAL));
1040 }
1041
1042 if (td_enable) {
1043 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1044 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1045 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1046 }
1047
1048 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1049 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1050 if (queue_id >= 0)
1051 confq = priv->conf_fqs[queue_id];
1052 if (confq) {
1053 initfq.we_mask |=
1054 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1055
1056
1057
1058
1059
1060
1061
1062 qm_fqd_context_a_set64(&initfq.fqd,
1063 0x1e00000080000000ULL);
1064 }
1065 }
1066
1067
1068 if (priv->use_ingress_cgr &&
1069 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1070 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
1071 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
1072 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1073 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1074 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1075
1076
1077
1078 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1079 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1080 qm_fqd_set_oal(&initfq.fqd,
1081 min(sizeof(struct sk_buff) +
1082 priv->tx_headroom,
1083 (size_t)FSL_QMAN_MAX_OAL));
1084 }
1085
1086
1087 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1088 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1089 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1090 QM_FQCTRL_CTXASTASHING);
1091 initfq.fqd.context_a.stashing.exclusive =
1092 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1093 QM_STASHING_EXCL_ANNOTATION;
1094 qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1095 DIV_ROUND_UP(sizeof(struct qman_fq),
1096 64));
1097 }
1098
1099 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1100 if (err < 0) {
1101 dev_err(dev, "qman_init_fq(%u) = %d\n",
1102 qman_fq_fqid(fq), err);
1103 qman_destroy_fq(fq);
1104 return err;
1105 }
1106 }
1107
1108 dpaa_fq->fqid = qman_fq_fqid(fq);
1109
1110 return 0;
1111}
1112
1113static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1114{
1115 const struct dpaa_priv *priv;
1116 struct dpaa_fq *dpaa_fq;
1117 int err, error;
1118
1119 err = 0;
1120
1121 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1122 priv = netdev_priv(dpaa_fq->net_dev);
1123
1124 if (dpaa_fq->init) {
1125 err = qman_retire_fq(fq, NULL);
1126 if (err < 0 && netif_msg_drv(priv))
1127 dev_err(dev, "qman_retire_fq(%u) = %d\n",
1128 qman_fq_fqid(fq), err);
1129
1130 error = qman_oos_fq(fq);
1131 if (error < 0 && netif_msg_drv(priv)) {
1132 dev_err(dev, "qman_oos_fq(%u) = %d\n",
1133 qman_fq_fqid(fq), error);
1134 if (err >= 0)
1135 err = error;
1136 }
1137 }
1138
1139 qman_destroy_fq(fq);
1140 list_del(&dpaa_fq->list);
1141
1142 return err;
1143}
1144
1145static int dpaa_fq_free(struct device *dev, struct list_head *list)
1146{
1147 struct dpaa_fq *dpaa_fq, *tmp;
1148 int err, error;
1149
1150 err = 0;
1151 list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1152 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1153 if (error < 0 && err >= 0)
1154 err = error;
1155 }
1156
1157 return err;
1158}
1159
1160static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1161 struct dpaa_fq *defq,
1162 struct dpaa_buffer_layout *buf_layout)
1163{
1164 struct fman_buffer_prefix_content buf_prefix_content;
1165 struct fman_port_params params;
1166 int err;
1167
1168 memset(¶ms, 0, sizeof(params));
1169 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1170
1171 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1172 buf_prefix_content.pass_prs_result = true;
1173 buf_prefix_content.pass_hash_result = true;
1174 buf_prefix_content.pass_time_stamp = false;
1175 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1176
1177 params.specific_params.non_rx_params.err_fqid = errq->fqid;
1178 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1179
1180 err = fman_port_config(port, ¶ms);
1181 if (err) {
1182 pr_err("%s: fman_port_config failed\n", __func__);
1183 return err;
1184 }
1185
1186 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1187 if (err) {
1188 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1189 __func__);
1190 return err;
1191 }
1192
1193 err = fman_port_init(port);
1194 if (err)
1195 pr_err("%s: fm_port_init failed\n", __func__);
1196
1197 return err;
1198}
1199
1200static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1201 size_t count, struct dpaa_fq *errq,
1202 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
1203 struct dpaa_buffer_layout *buf_layout)
1204{
1205 struct fman_buffer_prefix_content buf_prefix_content;
1206 struct fman_port_rx_params *rx_p;
1207 struct fman_port_params params;
1208 int i, err;
1209
1210 memset(¶ms, 0, sizeof(params));
1211 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1212
1213 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1214 buf_prefix_content.pass_prs_result = true;
1215 buf_prefix_content.pass_hash_result = true;
1216 buf_prefix_content.pass_time_stamp = false;
1217 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1218
1219 rx_p = ¶ms.specific_params.rx_params;
1220 rx_p->err_fqid = errq->fqid;
1221 rx_p->dflt_fqid = defq->fqid;
1222 if (pcdq) {
1223 rx_p->pcd_base_fqid = pcdq->fqid;
1224 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
1225 }
1226
1227 count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
1228 rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
1229 for (i = 0; i < count; i++) {
1230 rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
1231 rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
1232 }
1233
1234 err = fman_port_config(port, ¶ms);
1235 if (err) {
1236 pr_err("%s: fman_port_config failed\n", __func__);
1237 return err;
1238 }
1239
1240 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1241 if (err) {
1242 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1243 __func__);
1244 return err;
1245 }
1246
1247 err = fman_port_init(port);
1248 if (err)
1249 pr_err("%s: fm_port_init failed\n", __func__);
1250
1251 return err;
1252}
1253
1254static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1255 struct dpaa_bp **bps, size_t count,
1256 struct fm_port_fqs *port_fqs,
1257 struct dpaa_buffer_layout *buf_layout,
1258 struct device *dev)
1259{
1260 struct fman_port *rxport = mac_dev->port[RX];
1261 struct fman_port *txport = mac_dev->port[TX];
1262 int err;
1263
1264 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1265 port_fqs->tx_defq, &buf_layout[TX]);
1266 if (err)
1267 return err;
1268
1269 err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1270 port_fqs->rx_defq, port_fqs->rx_pcdq,
1271 &buf_layout[RX]);
1272
1273 return err;
1274}
1275
1276static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1277 struct bm_buffer *bmb, int cnt)
1278{
1279 int err;
1280
1281 err = bman_release(dpaa_bp->pool, bmb, cnt);
1282
1283 if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
1284 while (cnt-- > 0)
1285 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1286
1287 return cnt;
1288}
1289
1290static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1291{
1292 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1293 struct dpaa_bp *dpaa_bp;
1294 int i = 0, j;
1295
1296 memset(bmb, 0, sizeof(bmb));
1297
1298 do {
1299 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1300 if (!dpaa_bp)
1301 return;
1302
1303 j = 0;
1304 do {
1305 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1306
1307 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1308
1309 j++; i++;
1310 } while (j < ARRAY_SIZE(bmb) &&
1311 !qm_sg_entry_is_final(&sgt[i - 1]) &&
1312 sgt[i - 1].bpid == sgt[i].bpid);
1313
1314 dpaa_bman_release(dpaa_bp, bmb, j);
1315 } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1316}
1317
1318static void dpaa_fd_release(const struct net_device *net_dev,
1319 const struct qm_fd *fd)
1320{
1321 struct qm_sg_entry *sgt;
1322 struct dpaa_bp *dpaa_bp;
1323 struct bm_buffer bmb;
1324 dma_addr_t addr;
1325 void *vaddr;
1326
1327 bmb.data = 0;
1328 bm_buffer_set64(&bmb, qm_fd_addr(fd));
1329
1330 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1331 if (!dpaa_bp)
1332 return;
1333
1334 if (qm_fd_get_format(fd) == qm_fd_sg) {
1335 vaddr = phys_to_virt(qm_fd_addr(fd));
1336 sgt = vaddr + qm_fd_get_offset(fd);
1337
1338 dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1339 DMA_FROM_DEVICE);
1340
1341 dpaa_release_sgt_members(sgt);
1342
1343 addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1344 DMA_FROM_DEVICE);
1345 if (dma_mapping_error(dpaa_bp->dev, addr)) {
1346 dev_err(dpaa_bp->dev, "DMA mapping failed");
1347 return;
1348 }
1349 bm_buffer_set64(&bmb, addr);
1350 }
1351
1352 dpaa_bman_release(dpaa_bp, &bmb, 1);
1353}
1354
1355static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1356 const union qm_mr_entry *msg)
1357{
1358 switch (msg->ern.rc & QM_MR_RC_MASK) {
1359 case QM_MR_RC_CGR_TAILDROP:
1360 percpu_priv->ern_cnt.cg_tdrop++;
1361 break;
1362 case QM_MR_RC_WRED:
1363 percpu_priv->ern_cnt.wred++;
1364 break;
1365 case QM_MR_RC_ERROR:
1366 percpu_priv->ern_cnt.err_cond++;
1367 break;
1368 case QM_MR_RC_ORPWINDOW_EARLY:
1369 percpu_priv->ern_cnt.early_window++;
1370 break;
1371 case QM_MR_RC_ORPWINDOW_LATE:
1372 percpu_priv->ern_cnt.late_window++;
1373 break;
1374 case QM_MR_RC_FQ_TAILDROP:
1375 percpu_priv->ern_cnt.fq_tdrop++;
1376 break;
1377 case QM_MR_RC_ORPWINDOW_RETIRED:
1378 percpu_priv->ern_cnt.fq_retired++;
1379 break;
1380 case QM_MR_RC_ORP_ZERO:
1381 percpu_priv->ern_cnt.orp_zero++;
1382 break;
1383 }
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1397 struct sk_buff *skb,
1398 struct qm_fd *fd,
1399 char *parse_results)
1400{
1401 struct fman_prs_result *parse_result;
1402 u16 ethertype = ntohs(skb->protocol);
1403 struct ipv6hdr *ipv6h = NULL;
1404 struct iphdr *iph;
1405 int retval = 0;
1406 u8 l4_proto;
1407
1408 if (skb->ip_summed != CHECKSUM_PARTIAL)
1409 return 0;
1410
1411
1412
1413
1414
1415
1416
1417
1418 parse_result = (struct fman_prs_result *)parse_results;
1419
1420
1421 if (ethertype == ETH_P_8021Q) {
1422
1423
1424
1425 skb_reset_mac_header(skb);
1426 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1427 }
1428
1429
1430
1431
1432 switch (ethertype) {
1433 case ETH_P_IP:
1434 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1435 iph = ip_hdr(skb);
1436 WARN_ON(!iph);
1437 l4_proto = iph->protocol;
1438 break;
1439 case ETH_P_IPV6:
1440 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1441 ipv6h = ipv6_hdr(skb);
1442 WARN_ON(!ipv6h);
1443 l4_proto = ipv6h->nexthdr;
1444 break;
1445 default:
1446
1447 if (net_ratelimit())
1448 netif_alert(priv, tx_err, priv->net_dev,
1449 "Can't compute HW csum for L3 proto 0x%x\n",
1450 ntohs(skb->protocol));
1451 retval = -EIO;
1452 goto return_error;
1453 }
1454
1455
1456 switch (l4_proto) {
1457 case IPPROTO_UDP:
1458 parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1459 break;
1460 case IPPROTO_TCP:
1461 parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1462 break;
1463 default:
1464 if (net_ratelimit())
1465 netif_alert(priv, tx_err, priv->net_dev,
1466 "Can't compute HW csum for L4 proto 0x%x\n",
1467 l4_proto);
1468 retval = -EIO;
1469 goto return_error;
1470 }
1471
1472
1473 parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1474 parse_result->l4_off = (u8)skb_transport_offset(skb);
1475
1476
1477 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1478
1479
1480
1481
1482
1483
1484
1485return_error:
1486 return retval;
1487}
1488
1489static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1490{
1491 struct device *dev = dpaa_bp->dev;
1492 struct bm_buffer bmb[8];
1493 dma_addr_t addr;
1494 void *new_buf;
1495 u8 i;
1496
1497 for (i = 0; i < 8; i++) {
1498 new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
1499 if (unlikely(!new_buf)) {
1500 dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1501 dpaa_bp->raw_size);
1502 goto release_previous_buffs;
1503 }
1504 new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
1505
1506 addr = dma_map_single(dev, new_buf,
1507 dpaa_bp->size, DMA_FROM_DEVICE);
1508 if (unlikely(dma_mapping_error(dev, addr))) {
1509 dev_err(dpaa_bp->dev, "DMA map failed");
1510 goto release_previous_buffs;
1511 }
1512
1513 bmb[i].data = 0;
1514 bm_buffer_set64(&bmb[i], addr);
1515 }
1516
1517release_bufs:
1518 return dpaa_bman_release(dpaa_bp, bmb, i);
1519
1520release_previous_buffs:
1521 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1522
1523 bm_buffer_set64(&bmb[i], 0);
1524
1525
1526
1527 if (likely(i))
1528 goto release_bufs;
1529
1530 return 0;
1531}
1532
1533static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1534{
1535 int i;
1536
1537
1538 for_each_possible_cpu(i) {
1539 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1540 int j;
1541
1542
1543
1544
1545 for (j = 0; j < dpaa_bp->config_count; j += 8)
1546 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1547 }
1548 return 0;
1549}
1550
1551
1552
1553
1554static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1555{
1556 int count = *countptr;
1557 int new_bufs;
1558
1559 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1560 do {
1561 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1562 if (unlikely(!new_bufs)) {
1563
1564
1565
1566
1567 break;
1568 }
1569 count += new_bufs;
1570 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1571
1572 *countptr = count;
1573 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1574 return -ENOMEM;
1575 }
1576
1577 return 0;
1578}
1579
1580static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1581{
1582 struct dpaa_bp *dpaa_bp;
1583 int *countptr;
1584 int res, i;
1585
1586 for (i = 0; i < DPAA_BPS_NUM; i++) {
1587 dpaa_bp = priv->dpaa_bps[i];
1588 if (!dpaa_bp)
1589 return -EINVAL;
1590 countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1591 res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1592 if (res)
1593 return res;
1594 }
1595 return 0;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1609 const struct qm_fd *fd)
1610{
1611 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1612 struct device *dev = priv->net_dev->dev.parent;
1613 dma_addr_t addr = qm_fd_addr(fd);
1614 const struct qm_sg_entry *sgt;
1615 struct sk_buff **skbh, *skb;
1616 int nr_frags, i;
1617
1618 skbh = (struct sk_buff **)phys_to_virt(addr);
1619 skb = *skbh;
1620
1621 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1622 nr_frags = skb_shinfo(skb)->nr_frags;
1623 dma_unmap_single(dev, addr,
1624 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1625 dma_dir);
1626
1627
1628
1629
1630 sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
1631
1632
1633 dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1634 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1635
1636
1637 for (i = 1; i < nr_frags; i++) {
1638 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1639
1640 dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1641 qm_sg_entry_get_len(&sgt[i]), dma_dir);
1642 }
1643
1644
1645 skb_free_frag(phys_to_virt(addr));
1646 } else {
1647 dma_unmap_single(dev, addr,
1648 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
1649 }
1650
1651 return skb;
1652}
1653
1654static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1655{
1656
1657
1658
1659
1660 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1661 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1662 return CHECKSUM_UNNECESSARY;
1663
1664
1665
1666
1667
1668 return CHECKSUM_NONE;
1669}
1670
1671
1672
1673
1674
1675static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1676 const struct qm_fd *fd)
1677{
1678 ssize_t fd_off = qm_fd_get_offset(fd);
1679 dma_addr_t addr = qm_fd_addr(fd);
1680 struct dpaa_bp *dpaa_bp;
1681 struct sk_buff *skb;
1682 void *vaddr;
1683
1684 vaddr = phys_to_virt(addr);
1685 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1686
1687 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1688 if (!dpaa_bp)
1689 goto free_buffer;
1690
1691 skb = build_skb(vaddr, dpaa_bp->size +
1692 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1693 if (unlikely(!skb)) {
1694 WARN_ONCE(1, "Build skb failure on Rx\n");
1695 goto free_buffer;
1696 }
1697 WARN_ON(fd_off != priv->rx_headroom);
1698 skb_reserve(skb, fd_off);
1699 skb_put(skb, qm_fd_get_length(fd));
1700
1701 skb->ip_summed = rx_csum_offload(priv, fd);
1702
1703 return skb;
1704
1705free_buffer:
1706 skb_free_frag(vaddr);
1707 return NULL;
1708}
1709
1710
1711
1712
1713
1714
1715static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1716 const struct qm_fd *fd)
1717{
1718 ssize_t fd_off = qm_fd_get_offset(fd);
1719 dma_addr_t addr = qm_fd_addr(fd);
1720 const struct qm_sg_entry *sgt;
1721 struct page *page, *head_page;
1722 struct dpaa_bp *dpaa_bp;
1723 void *vaddr, *sg_vaddr;
1724 int frag_off, frag_len;
1725 struct sk_buff *skb;
1726 dma_addr_t sg_addr;
1727 int page_offset;
1728 unsigned int sz;
1729 int *count_ptr;
1730 int i;
1731
1732 vaddr = phys_to_virt(addr);
1733 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1734
1735
1736 sgt = vaddr + fd_off;
1737 skb = NULL;
1738 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1739
1740 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1741
1742 sg_addr = qm_sg_addr(&sgt[i]);
1743 sg_vaddr = phys_to_virt(sg_addr);
1744 WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
1745 SMP_CACHE_BYTES));
1746
1747
1748 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1749 if (!dpaa_bp)
1750 goto free_buffers;
1751
1752 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1753 dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1754 DMA_FROM_DEVICE);
1755 if (!skb) {
1756 sz = dpaa_bp->size +
1757 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1758 skb = build_skb(sg_vaddr, sz);
1759 if (WARN_ON(unlikely(!skb)))
1760 goto free_buffers;
1761
1762 skb->ip_summed = rx_csum_offload(priv, fd);
1763
1764
1765
1766
1767 WARN_ON(fd_off != priv->rx_headroom);
1768 skb_reserve(skb, fd_off);
1769 skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1770 } else {
1771
1772
1773
1774
1775
1776
1777
1778 page = virt_to_page(sg_vaddr);
1779 head_page = virt_to_head_page(sg_vaddr);
1780
1781
1782 page_offset = ((unsigned long)sg_vaddr &
1783 (PAGE_SIZE - 1)) +
1784 (page_address(page) - page_address(head_page));
1785
1786
1787
1788 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1789 frag_len = qm_sg_entry_get_len(&sgt[i]);
1790
1791
1792
1793
1794 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1795 frag_len, dpaa_bp->size);
1796 }
1797
1798 (*count_ptr)--;
1799
1800 if (qm_sg_entry_is_final(&sgt[i]))
1801 break;
1802 }
1803 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1804
1805
1806 skb_free_frag(vaddr);
1807
1808 return skb;
1809
1810free_buffers:
1811
1812 for (i--; i >= 0; i--) {
1813 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1814 if (dpaa_bp) {
1815 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1816 (*count_ptr)++;
1817 }
1818 }
1819
1820 for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
1821 sg_addr = qm_sg_addr(&sgt[i]);
1822 sg_vaddr = phys_to_virt(sg_addr);
1823 skb_free_frag(sg_vaddr);
1824 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1825 if (dpaa_bp) {
1826 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1827 (*count_ptr)--;
1828 }
1829
1830 if (qm_sg_entry_is_final(&sgt[i]))
1831 break;
1832 }
1833
1834 skb_free_frag(vaddr);
1835
1836 return NULL;
1837}
1838
1839static int skb_to_contig_fd(struct dpaa_priv *priv,
1840 struct sk_buff *skb, struct qm_fd *fd,
1841 int *offset)
1842{
1843 struct net_device *net_dev = priv->net_dev;
1844 struct device *dev = net_dev->dev.parent;
1845 enum dma_data_direction dma_dir;
1846 unsigned char *buffer_start;
1847 struct sk_buff **skbh;
1848 dma_addr_t addr;
1849 int err;
1850
1851
1852
1853
1854 fd->bpid = FSL_DPAA_BPID_INV;
1855 buffer_start = skb->data - priv->tx_headroom;
1856 dma_dir = DMA_TO_DEVICE;
1857
1858 skbh = (struct sk_buff **)buffer_start;
1859 *skbh = skb;
1860
1861
1862
1863
1864
1865
1866 err = dpaa_enable_tx_csum(priv, skb, fd,
1867 ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
1868 if (unlikely(err < 0)) {
1869 if (net_ratelimit())
1870 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1871 err);
1872 return err;
1873 }
1874
1875
1876 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1877 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1878
1879
1880 addr = dma_map_single(dev, skbh,
1881 skb_tail_pointer(skb) - buffer_start, dma_dir);
1882 if (unlikely(dma_mapping_error(dev, addr))) {
1883 if (net_ratelimit())
1884 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1885 return -EINVAL;
1886 }
1887 qm_fd_addr_set64(fd, addr);
1888
1889 return 0;
1890}
1891
1892static int skb_to_sg_fd(struct dpaa_priv *priv,
1893 struct sk_buff *skb, struct qm_fd *fd)
1894{
1895 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1896 const int nr_frags = skb_shinfo(skb)->nr_frags;
1897 struct net_device *net_dev = priv->net_dev;
1898 struct device *dev = net_dev->dev.parent;
1899 struct qm_sg_entry *sgt;
1900 struct sk_buff **skbh;
1901 int i, j, err, sz;
1902 void *buffer_start;
1903 skb_frag_t *frag;
1904 dma_addr_t addr;
1905 size_t frag_len;
1906 void *sgt_buf;
1907
1908
1909 sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
1910 sgt_buf = netdev_alloc_frag(sz);
1911 if (unlikely(!sgt_buf)) {
1912 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
1913 sz);
1914 return -ENOMEM;
1915 }
1916
1917
1918
1919
1920
1921
1922 err = dpaa_enable_tx_csum(priv, skb, fd,
1923 sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
1924 if (unlikely(err < 0)) {
1925 if (net_ratelimit())
1926 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1927 err);
1928 goto csum_failed;
1929 }
1930
1931
1932 sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
1933 frag_len = skb_headlen(skb);
1934 qm_sg_entry_set_len(&sgt[0], frag_len);
1935 sgt[0].bpid = FSL_DPAA_BPID_INV;
1936 sgt[0].offset = 0;
1937 addr = dma_map_single(dev, skb->data,
1938 skb_headlen(skb), dma_dir);
1939 if (unlikely(dma_mapping_error(dev, addr))) {
1940 dev_err(dev, "DMA mapping failed");
1941 err = -EINVAL;
1942 goto sg0_map_failed;
1943 }
1944 qm_sg_entry_set64(&sgt[0], addr);
1945
1946
1947 for (i = 0; i < nr_frags; i++) {
1948 frag = &skb_shinfo(skb)->frags[i];
1949 frag_len = frag->size;
1950 WARN_ON(!skb_frag_page(frag));
1951 addr = skb_frag_dma_map(dev, frag, 0,
1952 frag_len, dma_dir);
1953 if (unlikely(dma_mapping_error(dev, addr))) {
1954 dev_err(dev, "DMA mapping failed");
1955 err = -EINVAL;
1956 goto sg_map_failed;
1957 }
1958
1959 qm_sg_entry_set_len(&sgt[i + 1], frag_len);
1960 sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
1961 sgt[i + 1].offset = 0;
1962
1963
1964 qm_sg_entry_set64(&sgt[i + 1], addr);
1965 }
1966
1967
1968 qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
1969
1970 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
1971
1972
1973 buffer_start = (void *)sgt - priv->tx_headroom;
1974 skbh = (struct sk_buff **)buffer_start;
1975 *skbh = skb;
1976
1977 addr = dma_map_single(dev, buffer_start,
1978 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1979 if (unlikely(dma_mapping_error(dev, addr))) {
1980 dev_err(dev, "DMA mapping failed");
1981 err = -EINVAL;
1982 goto sgt_map_failed;
1983 }
1984
1985 fd->bpid = FSL_DPAA_BPID_INV;
1986 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1987 qm_fd_addr_set64(fd, addr);
1988
1989 return 0;
1990
1991sgt_map_failed:
1992sg_map_failed:
1993 for (j = 0; j < i; j++)
1994 dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
1995 qm_sg_entry_get_len(&sgt[j]), dma_dir);
1996sg0_map_failed:
1997csum_failed:
1998 skb_free_frag(sgt_buf);
1999
2000 return err;
2001}
2002
2003static inline int dpaa_xmit(struct dpaa_priv *priv,
2004 struct rtnl_link_stats64 *percpu_stats,
2005 int queue,
2006 struct qm_fd *fd)
2007{
2008 struct qman_fq *egress_fq;
2009 int err, i;
2010
2011 egress_fq = priv->egress_fqs[queue];
2012 if (fd->bpid == FSL_DPAA_BPID_INV)
2013 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2014
2015
2016 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2017
2018 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2019 err = qman_enqueue(egress_fq, fd);
2020 if (err != -EBUSY)
2021 break;
2022 }
2023
2024 if (unlikely(err < 0)) {
2025 percpu_stats->tx_fifo_errors++;
2026 return err;
2027 }
2028
2029 percpu_stats->tx_packets++;
2030 percpu_stats->tx_bytes += qm_fd_get_length(fd);
2031
2032 return 0;
2033}
2034
2035static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2036{
2037 const int queue_mapping = skb_get_queue_mapping(skb);
2038 bool nonlinear = skb_is_nonlinear(skb);
2039 struct rtnl_link_stats64 *percpu_stats;
2040 struct dpaa_percpu_priv *percpu_priv;
2041 struct dpaa_priv *priv;
2042 struct qm_fd fd;
2043 int offset = 0;
2044 int err = 0;
2045
2046 priv = netdev_priv(net_dev);
2047 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2048 percpu_stats = &percpu_priv->stats;
2049
2050 qm_fd_clear_fd(&fd);
2051
2052 if (!nonlinear) {
2053
2054
2055
2056
2057
2058
2059 if (skb_cow_head(skb, priv->tx_headroom))
2060 goto enomem;
2061
2062 WARN_ON(skb_is_nonlinear(skb));
2063 }
2064
2065
2066
2067
2068 if (unlikely(nonlinear &&
2069 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
2070
2071
2072
2073 if (__skb_linearize(skb))
2074 goto enomem;
2075
2076 nonlinear = skb_is_nonlinear(skb);
2077 }
2078
2079 if (nonlinear) {
2080
2081 err = skb_to_sg_fd(priv, skb, &fd);
2082 percpu_priv->tx_frag_skbuffs++;
2083 } else {
2084
2085 err = skb_to_contig_fd(priv, skb, &fd, &offset);
2086 }
2087 if (unlikely(err < 0))
2088 goto skb_to_fd_failed;
2089
2090 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2091 return NETDEV_TX_OK;
2092
2093 dpaa_cleanup_tx_fd(priv, &fd);
2094skb_to_fd_failed:
2095enomem:
2096 percpu_stats->tx_errors++;
2097 dev_kfree_skb(skb);
2098 return NETDEV_TX_OK;
2099}
2100
2101static void dpaa_rx_error(struct net_device *net_dev,
2102 const struct dpaa_priv *priv,
2103 struct dpaa_percpu_priv *percpu_priv,
2104 const struct qm_fd *fd,
2105 u32 fqid)
2106{
2107 if (net_ratelimit())
2108 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2109 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2110
2111 percpu_priv->stats.rx_errors++;
2112
2113 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2114 percpu_priv->rx_errors.dme++;
2115 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2116 percpu_priv->rx_errors.fpe++;
2117 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2118 percpu_priv->rx_errors.fse++;
2119 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2120 percpu_priv->rx_errors.phe++;
2121
2122 dpaa_fd_release(net_dev, fd);
2123}
2124
2125static void dpaa_tx_error(struct net_device *net_dev,
2126 const struct dpaa_priv *priv,
2127 struct dpaa_percpu_priv *percpu_priv,
2128 const struct qm_fd *fd,
2129 u32 fqid)
2130{
2131 struct sk_buff *skb;
2132
2133 if (net_ratelimit())
2134 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2135 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2136
2137 percpu_priv->stats.tx_errors++;
2138
2139 skb = dpaa_cleanup_tx_fd(priv, fd);
2140 dev_kfree_skb(skb);
2141}
2142
2143static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2144{
2145 struct dpaa_napi_portal *np =
2146 container_of(napi, struct dpaa_napi_portal, napi);
2147
2148 int cleaned = qman_p_poll_dqrr(np->p, budget);
2149
2150 if (cleaned < budget) {
2151 napi_complete_done(napi, cleaned);
2152 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2153
2154 } else if (np->down) {
2155 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2156 }
2157
2158 return cleaned;
2159}
2160
2161static void dpaa_tx_conf(struct net_device *net_dev,
2162 const struct dpaa_priv *priv,
2163 struct dpaa_percpu_priv *percpu_priv,
2164 const struct qm_fd *fd,
2165 u32 fqid)
2166{
2167 struct sk_buff *skb;
2168
2169 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2170 if (net_ratelimit())
2171 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2172 be32_to_cpu(fd->status) &
2173 FM_FD_STAT_TX_ERRORS);
2174
2175 percpu_priv->stats.tx_errors++;
2176 }
2177
2178 percpu_priv->tx_confirm++;
2179
2180 skb = dpaa_cleanup_tx_fd(priv, fd);
2181
2182 consume_skb(skb);
2183}
2184
2185static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2186 struct qman_portal *portal)
2187{
2188 if (unlikely(in_irq() || !in_serving_softirq())) {
2189
2190 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2191
2192 percpu_priv->np.p = portal;
2193 napi_schedule(&percpu_priv->np.napi);
2194 percpu_priv->in_interrupt++;
2195 return 1;
2196 }
2197 return 0;
2198}
2199
2200static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2201 struct qman_fq *fq,
2202 const struct qm_dqrr_entry *dq)
2203{
2204 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2205 struct dpaa_percpu_priv *percpu_priv;
2206 struct net_device *net_dev;
2207 struct dpaa_bp *dpaa_bp;
2208 struct dpaa_priv *priv;
2209
2210 net_dev = dpaa_fq->net_dev;
2211 priv = netdev_priv(net_dev);
2212 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2213 if (!dpaa_bp)
2214 return qman_cb_dqrr_consume;
2215
2216 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2217
2218 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2219 return qman_cb_dqrr_stop;
2220
2221 dpaa_eth_refill_bpools(priv);
2222 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2223
2224 return qman_cb_dqrr_consume;
2225}
2226
2227static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2228 struct qman_fq *fq,
2229 const struct qm_dqrr_entry *dq)
2230{
2231 struct rtnl_link_stats64 *percpu_stats;
2232 struct dpaa_percpu_priv *percpu_priv;
2233 const struct qm_fd *fd = &dq->fd;
2234 dma_addr_t addr = qm_fd_addr(fd);
2235 enum qm_fd_format fd_format;
2236 struct net_device *net_dev;
2237 u32 fd_status, hash_offset;
2238 struct dpaa_bp *dpaa_bp;
2239 struct dpaa_priv *priv;
2240 unsigned int skb_len;
2241 struct sk_buff *skb;
2242 int *count_ptr;
2243 void *vaddr;
2244
2245 fd_status = be32_to_cpu(fd->status);
2246 fd_format = qm_fd_get_format(fd);
2247 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2248 priv = netdev_priv(net_dev);
2249 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2250 if (!dpaa_bp)
2251 return qman_cb_dqrr_consume;
2252
2253
2254 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2255
2256 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2257 percpu_stats = &percpu_priv->stats;
2258
2259 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
2260 return qman_cb_dqrr_stop;
2261
2262
2263 if (unlikely(dpaa_eth_refill_bpools(priv))) {
2264
2265
2266
2267
2268 dpaa_fd_release(net_dev, &dq->fd);
2269 return qman_cb_dqrr_consume;
2270 }
2271
2272 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2273 if (net_ratelimit())
2274 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2275 fd_status & FM_FD_STAT_RX_ERRORS);
2276
2277 percpu_stats->rx_errors++;
2278 dpaa_fd_release(net_dev, fd);
2279 return qman_cb_dqrr_consume;
2280 }
2281
2282 dpaa_bp = dpaa_bpid2pool(fd->bpid);
2283 if (!dpaa_bp)
2284 return qman_cb_dqrr_consume;
2285
2286 dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2287
2288
2289 vaddr = phys_to_virt(addr);
2290 prefetch(vaddr + qm_fd_get_offset(fd));
2291
2292
2293 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2294
2295
2296
2297
2298 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2299 (*count_ptr)--;
2300
2301 if (likely(fd_format == qm_fd_contig))
2302 skb = contig_fd_to_skb(priv, fd);
2303 else
2304 skb = sg_fd_to_skb(priv, fd);
2305 if (!skb)
2306 return qman_cb_dqrr_consume;
2307
2308 skb->protocol = eth_type_trans(skb, net_dev);
2309
2310 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2311 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
2312 &hash_offset)) {
2313 enum pkt_hash_types type;
2314
2315
2316 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
2317 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
2318 skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)),
2319 type);
2320 }
2321
2322 skb_len = skb->len;
2323
2324 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2325 percpu_stats->rx_dropped++;
2326 return qman_cb_dqrr_consume;
2327 }
2328
2329 percpu_stats->rx_packets++;
2330 percpu_stats->rx_bytes += skb_len;
2331
2332 return qman_cb_dqrr_consume;
2333}
2334
2335static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2336 struct qman_fq *fq,
2337 const struct qm_dqrr_entry *dq)
2338{
2339 struct dpaa_percpu_priv *percpu_priv;
2340 struct net_device *net_dev;
2341 struct dpaa_priv *priv;
2342
2343 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2344 priv = netdev_priv(net_dev);
2345
2346 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2347
2348 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2349 return qman_cb_dqrr_stop;
2350
2351 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2352
2353 return qman_cb_dqrr_consume;
2354}
2355
2356static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2357 struct qman_fq *fq,
2358 const struct qm_dqrr_entry *dq)
2359{
2360 struct dpaa_percpu_priv *percpu_priv;
2361 struct net_device *net_dev;
2362 struct dpaa_priv *priv;
2363
2364 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2365 priv = netdev_priv(net_dev);
2366
2367
2368 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2369
2370 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2371
2372 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2373 return qman_cb_dqrr_stop;
2374
2375 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2376
2377 return qman_cb_dqrr_consume;
2378}
2379
2380static void egress_ern(struct qman_portal *portal,
2381 struct qman_fq *fq,
2382 const union qm_mr_entry *msg)
2383{
2384 const struct qm_fd *fd = &msg->ern.fd;
2385 struct dpaa_percpu_priv *percpu_priv;
2386 const struct dpaa_priv *priv;
2387 struct net_device *net_dev;
2388 struct sk_buff *skb;
2389
2390 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2391 priv = netdev_priv(net_dev);
2392 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2393
2394 percpu_priv->stats.tx_dropped++;
2395 percpu_priv->stats.tx_fifo_errors++;
2396 count_ern(percpu_priv, msg);
2397
2398 skb = dpaa_cleanup_tx_fd(priv, fd);
2399 dev_kfree_skb_any(skb);
2400}
2401
2402static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2403 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2404 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2405 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2406 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2407 .egress_ern = { .cb = { .ern = egress_ern } }
2408};
2409
2410static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2411{
2412 struct dpaa_percpu_priv *percpu_priv;
2413 int i;
2414
2415 for_each_possible_cpu(i) {
2416 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2417
2418 percpu_priv->np.down = 0;
2419 napi_enable(&percpu_priv->np.napi);
2420 }
2421}
2422
2423static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2424{
2425 struct dpaa_percpu_priv *percpu_priv;
2426 int i;
2427
2428 for_each_possible_cpu(i) {
2429 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2430
2431 percpu_priv->np.down = 1;
2432 napi_disable(&percpu_priv->np.napi);
2433 }
2434}
2435
2436static void dpaa_adjust_link(struct net_device *net_dev)
2437{
2438 struct mac_device *mac_dev;
2439 struct dpaa_priv *priv;
2440
2441 priv = netdev_priv(net_dev);
2442 mac_dev = priv->mac_dev;
2443 mac_dev->adjust_link(mac_dev);
2444}
2445
2446static int dpaa_phy_init(struct net_device *net_dev)
2447{
2448 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2449 struct mac_device *mac_dev;
2450 struct phy_device *phy_dev;
2451 struct dpaa_priv *priv;
2452
2453 priv = netdev_priv(net_dev);
2454 mac_dev = priv->mac_dev;
2455
2456 phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
2457 &dpaa_adjust_link, 0,
2458 mac_dev->phy_if);
2459 if (!phy_dev) {
2460 netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2461 return -ENODEV;
2462 }
2463
2464
2465 ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
2466 linkmode_and(phy_dev->supported, phy_dev->supported, mask);
2467
2468 phy_support_asym_pause(phy_dev);
2469
2470 mac_dev->phy_dev = phy_dev;
2471 net_dev->phydev = phy_dev;
2472
2473 return 0;
2474}
2475
2476static int dpaa_open(struct net_device *net_dev)
2477{
2478 struct mac_device *mac_dev;
2479 struct dpaa_priv *priv;
2480 int err, i;
2481
2482 priv = netdev_priv(net_dev);
2483 mac_dev = priv->mac_dev;
2484 dpaa_eth_napi_enable(priv);
2485
2486 err = dpaa_phy_init(net_dev);
2487 if (err)
2488 goto phy_init_failed;
2489
2490 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2491 err = fman_port_enable(mac_dev->port[i]);
2492 if (err)
2493 goto mac_start_failed;
2494 }
2495
2496 err = priv->mac_dev->start(mac_dev);
2497 if (err < 0) {
2498 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2499 goto mac_start_failed;
2500 }
2501
2502 netif_tx_start_all_queues(net_dev);
2503
2504 return 0;
2505
2506mac_start_failed:
2507 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2508 fman_port_disable(mac_dev->port[i]);
2509
2510phy_init_failed:
2511 dpaa_eth_napi_disable(priv);
2512
2513 return err;
2514}
2515
2516static int dpaa_eth_stop(struct net_device *net_dev)
2517{
2518 struct dpaa_priv *priv;
2519 int err;
2520
2521 err = dpaa_stop(net_dev);
2522
2523 priv = netdev_priv(net_dev);
2524 dpaa_eth_napi_disable(priv);
2525
2526 return err;
2527}
2528
2529static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2530{
2531 if (!net_dev->phydev)
2532 return -EINVAL;
2533 return phy_mii_ioctl(net_dev->phydev, rq, cmd);
2534}
2535
2536static const struct net_device_ops dpaa_ops = {
2537 .ndo_open = dpaa_open,
2538 .ndo_start_xmit = dpaa_start_xmit,
2539 .ndo_stop = dpaa_eth_stop,
2540 .ndo_tx_timeout = dpaa_tx_timeout,
2541 .ndo_get_stats64 = dpaa_get_stats64,
2542 .ndo_set_mac_address = dpaa_set_mac_address,
2543 .ndo_validate_addr = eth_validate_addr,
2544 .ndo_set_rx_mode = dpaa_set_rx_mode,
2545 .ndo_do_ioctl = dpaa_ioctl,
2546 .ndo_setup_tc = dpaa_setup_tc,
2547};
2548
2549static int dpaa_napi_add(struct net_device *net_dev)
2550{
2551 struct dpaa_priv *priv = netdev_priv(net_dev);
2552 struct dpaa_percpu_priv *percpu_priv;
2553 int cpu;
2554
2555 for_each_possible_cpu(cpu) {
2556 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2557
2558 netif_napi_add(net_dev, &percpu_priv->np.napi,
2559 dpaa_eth_poll, NAPI_POLL_WEIGHT);
2560 }
2561
2562 return 0;
2563}
2564
2565static void dpaa_napi_del(struct net_device *net_dev)
2566{
2567 struct dpaa_priv *priv = netdev_priv(net_dev);
2568 struct dpaa_percpu_priv *percpu_priv;
2569 int cpu;
2570
2571 for_each_possible_cpu(cpu) {
2572 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2573
2574 netif_napi_del(&percpu_priv->np.napi);
2575 }
2576}
2577
2578static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
2579 struct bm_buffer *bmb)
2580{
2581 dma_addr_t addr = bm_buf_addr(bmb);
2582
2583 dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2584
2585 skb_free_frag(phys_to_virt(addr));
2586}
2587
2588
2589static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
2590{
2591 struct dpaa_bp *dpaa_bp;
2592
2593 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
2594 if (!dpaa_bp)
2595 return ERR_PTR(-ENOMEM);
2596
2597 dpaa_bp->bpid = FSL_DPAA_BPID_INV;
2598 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
2599 if (!dpaa_bp->percpu_count)
2600 return ERR_PTR(-ENOMEM);
2601
2602 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
2603
2604 dpaa_bp->seed_cb = dpaa_bp_seed;
2605 dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
2606
2607 return dpaa_bp;
2608}
2609
2610
2611
2612
2613
2614
2615static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2616{
2617 struct qm_mcc_initcgr initcgr;
2618 u32 cs_th;
2619 int err;
2620
2621 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2622 if (err < 0) {
2623 if (netif_msg_drv(priv))
2624 pr_err("Error %d allocating CGR ID\n", err);
2625 goto out_error;
2626 }
2627
2628
2629 memset(&initcgr, 0, sizeof(initcgr));
2630 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2631 initcgr.cgr.cscn_en = QM_CGR_EN;
2632 cs_th = DPAA_INGRESS_CS_THRESHOLD;
2633 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2634
2635 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
2636 initcgr.cgr.cstd_en = QM_CGR_EN;
2637
2638
2639
2640
2641 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2642 &initcgr);
2643 if (err < 0) {
2644 if (netif_msg_drv(priv))
2645 pr_err("Error %d creating ingress CGR with ID %d\n",
2646 err, priv->ingress_cgr.cgrid);
2647 qman_release_cgrid(priv->ingress_cgr.cgrid);
2648 goto out_error;
2649 }
2650 if (netif_msg_drv(priv))
2651 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2652 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2653
2654 priv->use_ingress_cgr = true;
2655
2656out_error:
2657 return err;
2658}
2659
2660static const struct of_device_id dpaa_match[];
2661
2662static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2663{
2664 u16 headroom;
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2677 DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2678
2679 return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
2680 DPAA_FD_DATA_ALIGNMENT) :
2681 headroom;
2682}
2683
2684static int dpaa_eth_probe(struct platform_device *pdev)
2685{
2686 struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
2687 struct net_device *net_dev = NULL;
2688 struct dpaa_fq *dpaa_fq, *tmp;
2689 struct dpaa_priv *priv = NULL;
2690 struct fm_port_fqs port_fqs;
2691 struct mac_device *mac_dev;
2692 int err = 0, i, channel;
2693 struct device *dev;
2694
2695
2696 dev = pdev->dev.parent;
2697 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2698 if (err) {
2699 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
2700 return err;
2701 }
2702
2703
2704
2705
2706 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
2707 if (!net_dev) {
2708 dev_err(dev, "alloc_etherdev_mq() failed\n");
2709 return -ENOMEM;
2710 }
2711
2712
2713 SET_NETDEV_DEV(net_dev, dev);
2714 dev_set_drvdata(dev, net_dev);
2715
2716 priv = netdev_priv(net_dev);
2717 priv->net_dev = net_dev;
2718
2719 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
2720
2721 mac_dev = dpaa_mac_dev_get(pdev);
2722 if (IS_ERR(mac_dev)) {
2723 dev_err(dev, "dpaa_mac_dev_get() failed\n");
2724 err = PTR_ERR(mac_dev);
2725 goto free_netdev;
2726 }
2727
2728
2729
2730
2731
2732
2733
2734
2735 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
2736
2737 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
2738 net_dev->mtu);
2739
2740 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE;
2741 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE;
2742
2743
2744 for (i = 0; i < DPAA_BPS_NUM; i++) {
2745 dpaa_bps[i] = dpaa_bp_alloc(dev);
2746 if (IS_ERR(dpaa_bps[i])) {
2747 err = PTR_ERR(dpaa_bps[i]);
2748 goto free_dpaa_bps;
2749 }
2750
2751 dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
2752
2753 dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
2754 dpaa_bps[i]->dev = dev;
2755
2756 err = dpaa_bp_alloc_pool(dpaa_bps[i]);
2757 if (err < 0)
2758 goto free_dpaa_bps;
2759 priv->dpaa_bps[i] = dpaa_bps[i];
2760 }
2761
2762 INIT_LIST_HEAD(&priv->dpaa_fq_list);
2763
2764 memset(&port_fqs, 0, sizeof(port_fqs));
2765
2766 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
2767 if (err < 0) {
2768 dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
2769 goto free_dpaa_bps;
2770 }
2771
2772 priv->mac_dev = mac_dev;
2773
2774 channel = dpaa_get_channel();
2775 if (channel < 0) {
2776 dev_err(dev, "dpaa_get_channel() failed\n");
2777 err = channel;
2778 goto free_dpaa_bps;
2779 }
2780
2781 priv->channel = (u16)channel;
2782
2783
2784
2785
2786 dpaa_eth_add_channel(priv->channel);
2787
2788 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
2789
2790
2791
2792
2793
2794
2795 err = dpaa_eth_cgr_init(priv);
2796 if (err < 0) {
2797 dev_err(dev, "Error initializing CGR\n");
2798 goto free_dpaa_bps;
2799 }
2800
2801 err = dpaa_ingress_cgr_init(priv);
2802 if (err < 0) {
2803 dev_err(dev, "Error initializing ingress CGR\n");
2804 goto delete_egress_cgr;
2805 }
2806
2807
2808 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
2809 err = dpaa_fq_init(dpaa_fq, false);
2810 if (err < 0)
2811 goto free_dpaa_fqs;
2812 }
2813
2814 priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
2815 priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
2816
2817
2818 err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
2819 &priv->buf_layout[0], dev);
2820 if (err)
2821 goto free_dpaa_fqs;
2822
2823
2824 priv->keygen_in_use = true;
2825
2826 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2827 if (!priv->percpu_priv) {
2828 dev_err(dev, "devm_alloc_percpu() failed\n");
2829 err = -ENOMEM;
2830 goto free_dpaa_fqs;
2831 }
2832
2833 priv->num_tc = 1;
2834 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
2835
2836
2837 err = dpaa_napi_add(net_dev);
2838 if (err < 0)
2839 goto delete_dpaa_napi;
2840
2841 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
2842 if (err < 0)
2843 goto delete_dpaa_napi;
2844
2845 dpaa_eth_sysfs_init(&net_dev->dev);
2846
2847 netif_info(priv, probe, net_dev, "Probed interface %s\n",
2848 net_dev->name);
2849
2850 return 0;
2851
2852delete_dpaa_napi:
2853 dpaa_napi_del(net_dev);
2854free_dpaa_fqs:
2855 dpaa_fq_free(dev, &priv->dpaa_fq_list);
2856 qman_delete_cgr_safe(&priv->ingress_cgr);
2857 qman_release_cgrid(priv->ingress_cgr.cgrid);
2858delete_egress_cgr:
2859 qman_delete_cgr_safe(&priv->cgr_data.cgr);
2860 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2861free_dpaa_bps:
2862 dpaa_bps_free(priv);
2863free_netdev:
2864 dev_set_drvdata(dev, NULL);
2865 free_netdev(net_dev);
2866
2867 return err;
2868}
2869
2870static int dpaa_remove(struct platform_device *pdev)
2871{
2872 struct net_device *net_dev;
2873 struct dpaa_priv *priv;
2874 struct device *dev;
2875 int err;
2876
2877 dev = pdev->dev.parent;
2878 net_dev = dev_get_drvdata(dev);
2879
2880 priv = netdev_priv(net_dev);
2881
2882 dpaa_eth_sysfs_remove(dev);
2883
2884 dev_set_drvdata(dev, NULL);
2885 unregister_netdev(net_dev);
2886
2887 err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
2888
2889 qman_delete_cgr_safe(&priv->ingress_cgr);
2890 qman_release_cgrid(priv->ingress_cgr.cgrid);
2891 qman_delete_cgr_safe(&priv->cgr_data.cgr);
2892 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2893
2894 dpaa_napi_del(net_dev);
2895
2896 dpaa_bps_free(priv);
2897
2898 free_netdev(net_dev);
2899
2900 return err;
2901}
2902
2903static const struct platform_device_id dpaa_devtype[] = {
2904 {
2905 .name = "dpaa-ethernet",
2906 .driver_data = 0,
2907 }, {
2908 }
2909};
2910MODULE_DEVICE_TABLE(platform, dpaa_devtype);
2911
2912static struct platform_driver dpaa_driver = {
2913 .driver = {
2914 .name = KBUILD_MODNAME,
2915 },
2916 .id_table = dpaa_devtype,
2917 .probe = dpaa_eth_probe,
2918 .remove = dpaa_remove
2919};
2920
2921static int __init dpaa_load(void)
2922{
2923 int err;
2924
2925 pr_debug("FSL DPAA Ethernet driver\n");
2926
2927
2928 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
2929 dpaa_max_frm = fman_get_max_frm();
2930
2931 err = platform_driver_register(&dpaa_driver);
2932 if (err < 0)
2933 pr_err("Error, platform_driver_register() = %d\n", err);
2934
2935 return err;
2936}
2937module_init(dpaa_load);
2938
2939static void __exit dpaa_unload(void)
2940{
2941 platform_driver_unregister(&dpaa_driver);
2942
2943
2944
2945
2946 dpaa_release_channel();
2947}
2948module_exit(dpaa_unload);
2949
2950MODULE_LICENSE("Dual BSD/GPL");
2951MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
2952